Bug 1002266 - Access MediaQueues from MediaDecoderStateMachine through accessors. r=kinetik

This commit is contained in:
Chris Pearce 2014-04-28 13:12:50 +12:00
Родитель 4d91442251
Коммит e212cfc560
2 изменённых файлов: 60 добавлений и 57 удалений

Просмотреть файл

@ -258,7 +258,7 @@ MediaDecoderStateMachine::~MediaDecoderStateMachine()
#endif #endif
} }
bool MediaDecoderStateMachine::HasFutureAudio() const { bool MediaDecoderStateMachine::HasFutureAudio() {
AssertCurrentThreadInMonitor(); AssertCurrentThreadInMonitor();
NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio"); NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio");
// We've got audio ready to play if: // We've got audio ready to play if:
@ -267,20 +267,20 @@ bool MediaDecoderStateMachine::HasFutureAudio() const {
// we've completely decoded all audio (but not finished playing it yet // we've completely decoded all audio (but not finished playing it yet
// as per 1). // as per 1).
return !mAudioCompleted && return !mAudioCompleted &&
(AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || mReader->AudioQueue().IsFinished()); (AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || AudioQueue().IsFinished());
} }
bool MediaDecoderStateMachine::HaveNextFrameData() const { bool MediaDecoderStateMachine::HaveNextFrameData() {
AssertCurrentThreadInMonitor(); AssertCurrentThreadInMonitor();
return (!HasAudio() || HasFutureAudio()) && return (!HasAudio() || HasFutureAudio()) &&
(!HasVideo() || mReader->VideoQueue().GetSize() > 0); (!HasVideo() || VideoQueue().GetSize() > 0);
} }
int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() { int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() {
NS_ASSERTION(OnDecodeThread() || OnStateMachineThread(), NS_ASSERTION(OnDecodeThread() || OnStateMachineThread(),
"Should be on decode thread or state machine thread"); "Should be on decode thread or state machine thread");
AssertCurrentThreadInMonitor(); AssertCurrentThreadInMonitor();
int64_t audioDecoded = mReader->AudioQueue().Duration(); int64_t audioDecoded = AudioQueue().Duration();
if (mAudioEndTime != -1) { if (mAudioEndTime != -1) {
audioDecoded += mAudioEndTime - GetMediaTime(); audioDecoded += mAudioEndTime - GetMediaTime();
} }
@ -381,8 +381,8 @@ void MediaDecoderStateMachine::SendStreamData()
int64_t minLastAudioPacketTime = INT64_MAX; int64_t minLastAudioPacketTime = INT64_MAX;
bool finished = bool finished =
(!mInfo.HasAudio() || mReader->AudioQueue().IsFinished()) && (!mInfo.HasAudio() || AudioQueue().IsFinished()) &&
(!mInfo.HasVideo() || mReader->VideoQueue().IsFinished()); (!mInfo.HasVideo() || VideoQueue().IsFinished());
if (mDecoder->IsSameOriginMedia()) { if (mDecoder->IsSameOriginMedia()) {
SourceMediaStream* mediaStream = stream->mStream; SourceMediaStream* mediaStream = stream->mStream;
StreamTime endPosition = 0; StreamTime endPosition = 0;
@ -407,7 +407,7 @@ void MediaDecoderStateMachine::SendStreamData()
nsAutoTArray<AudioData*,10> audio; nsAutoTArray<AudioData*,10> audio;
// It's OK to hold references to the AudioData because while audio // It's OK to hold references to the AudioData because while audio
// is captured, only the decoder thread pops from the queue (see below). // is captured, only the decoder thread pops from the queue (see below).
mReader->AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio); AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
AudioSegment output; AudioSegment output;
for (uint32_t i = 0; i < audio.Length(); ++i) { for (uint32_t i = 0; i < audio.Length(); ++i) {
SendStreamAudio(audio[i], stream, &output); SendStreamAudio(audio[i], stream, &output);
@ -415,7 +415,7 @@ void MediaDecoderStateMachine::SendStreamData()
if (output.GetDuration() > 0) { if (output.GetDuration() > 0) {
mediaStream->AppendToTrack(TRACK_AUDIO, &output); mediaStream->AppendToTrack(TRACK_AUDIO, &output);
} }
if (mReader->AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) { if (AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
mediaStream->EndTrack(TRACK_AUDIO); mediaStream->EndTrack(TRACK_AUDIO);
stream->mHaveSentFinishAudio = true; stream->mHaveSentFinishAudio = true;
} }
@ -428,7 +428,7 @@ void MediaDecoderStateMachine::SendStreamData()
nsAutoTArray<VideoData*,10> video; nsAutoTArray<VideoData*,10> video;
// It's OK to hold references to the VideoData only the decoder thread // It's OK to hold references to the VideoData only the decoder thread
// pops from the queue. // pops from the queue.
mReader->VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video); VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video);
VideoSegment output; VideoSegment output;
for (uint32_t i = 0; i < video.Length(); ++i) { for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i]; VideoData* v = video[i];
@ -459,7 +459,7 @@ void MediaDecoderStateMachine::SendStreamData()
if (output.GetDuration() > 0) { if (output.GetDuration() > 0) {
mediaStream->AppendToTrack(TRACK_VIDEO, &output); mediaStream->AppendToTrack(TRACK_VIDEO, &output);
} }
if (mReader->VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) { if (VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
mediaStream->EndTrack(TRACK_VIDEO); mediaStream->EndTrack(TRACK_VIDEO);
stream->mHaveSentFinishVideo = true; stream->mHaveSentFinishVideo = true;
} }
@ -480,7 +480,7 @@ void MediaDecoderStateMachine::SendStreamData()
if (mAudioCaptured) { if (mAudioCaptured) {
// Discard audio packets that are no longer needed. // Discard audio packets that are no longer needed.
while (true) { while (true) {
const AudioData* a = mReader->AudioQueue().PeekFront(); const AudioData* a = AudioQueue().PeekFront();
// Packet times are not 100% reliable so this may discard packets that // Packet times are not 100% reliable so this may discard packets that
// actually contain data for mCurrentFrameTime. This means if someone might // actually contain data for mCurrentFrameTime. This means if someone might
// create a new output stream and we actually don't have the audio for the // create a new output stream and we actually don't have the audio for the
@ -490,7 +490,7 @@ void MediaDecoderStateMachine::SendStreamData()
if (!a || a->GetEndTime() >= minLastAudioPacketTime) if (!a || a->GetEndTime() >= minLastAudioPacketTime)
break; break;
mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime()); mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime());
delete mReader->AudioQueue().PopFront(); delete AudioQueue().PopFront();
} }
if (finished) { if (finished) {
@ -515,7 +515,7 @@ bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs)
{ {
AssertCurrentThreadInMonitor(); AssertCurrentThreadInMonitor();
if (mReader->AudioQueue().GetSize() == 0 || if (AudioQueue().GetSize() == 0 ||
GetDecodedAudioDuration() < aAmpleAudioUSecs) { GetDecodedAudioDuration() < aAmpleAudioUSecs) {
return false; return false;
} }
@ -539,7 +539,7 @@ bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
{ {
AssertCurrentThreadInMonitor(); AssertCurrentThreadInMonitor();
if (static_cast<uint32_t>(mReader->VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) { if (static_cast<uint32_t>(VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) {
return false; return false;
} }
@ -582,7 +582,7 @@ MediaDecoderStateMachine::DecodeVideo()
// only just started up the decode loop, so wait until we've decoded // only just started up the decode loop, so wait until we've decoded
// some frames before enabling the keyframe skip logic on video. // some frames before enabling the keyframe skip logic on video.
if (mIsVideoPrerolling && if (mIsVideoPrerolling &&
(static_cast<uint32_t>(mReader->VideoQueue().GetSize()) (static_cast<uint32_t>(VideoQueue().GetSize())
>= mVideoPrerollFrames * mPlaybackRate)) >= mVideoPrerollFrames * mPlaybackRate))
{ {
mIsVideoPrerolling = false; mIsVideoPrerolling = false;
@ -603,7 +603,7 @@ MediaDecoderStateMachine::DecodeVideo()
// don't skip frame when |clock time| <= |mVideoFrameEndTime| for // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
// we are still in the safe range without underrunning video frames // we are still in the safe range without underrunning video frames
GetClock() > mVideoFrameEndTime && GetClock() > mVideoFrameEndTime &&
(static_cast<uint32_t>(mReader->VideoQueue().GetSize()) (static_cast<uint32_t>(VideoQueue().GetSize())
< LOW_VIDEO_FRAMES * mPlaybackRate))) && < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
!HasLowUndecodedData()) !HasLowUndecodedData())
{ {
@ -624,7 +624,7 @@ MediaDecoderStateMachine::DecodeVideo()
} }
if (!mIsVideoDecoding) { if (!mIsVideoDecoding) {
// Playback ended for this stream, close the sample queue. // Playback ended for this stream, close the sample queue.
mReader->VideoQueue().Finish(); VideoQueue().Finish();
CheckIfDecodeComplete(); CheckIfDecodeComplete();
} }
@ -686,7 +686,7 @@ MediaDecoderStateMachine::DecodeAudio()
} }
if (!mIsAudioDecoding) { if (!mIsAudioDecoding) {
// Playback ended for this stream, close the sample queue. // Playback ended for this stream, close the sample queue.
mReader->AudioQueue().Finish(); AudioQueue().Finish();
CheckIfDecodeComplete(); CheckIfDecodeComplete();
} }
@ -715,8 +715,8 @@ MediaDecoderStateMachine::CheckIfDecodeComplete()
// since we don't want to abort the shutdown or seek processes. // since we don't want to abort the shutdown or seek processes.
return; return;
} }
MOZ_ASSERT(!mReader->AudioQueue().IsFinished() || !mIsAudioDecoding); MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding);
MOZ_ASSERT(!mReader->VideoQueue().IsFinished() || !mIsVideoDecoding); MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding);
if (!mIsVideoDecoding && !mIsAudioDecoding) { if (!mIsVideoDecoding && !mIsAudioDecoding) {
// We've finished decoding all active streams, // We've finished decoding all active streams,
// so move to COMPLETED state. // so move to COMPLETED state.
@ -822,8 +822,8 @@ void MediaDecoderStateMachine::AudioLoop()
!mStopAudioThread && !mStopAudioThread &&
(!IsPlaying() || (!IsPlaying() ||
mState == DECODER_STATE_BUFFERING || mState == DECODER_STATE_BUFFERING ||
(mReader->AudioQueue().GetSize() == 0 && (AudioQueue().GetSize() == 0 &&
!mReader->AudioQueue().AtEndOfStream()))) !AudioQueue().AtEndOfStream())))
{ {
if (!IsPlaying() && !mAudioStream->IsPaused()) { if (!IsPlaying() && !mAudioStream->IsPaused()) {
mAudioStream->Pause(); mAudioStream->Pause();
@ -835,7 +835,7 @@ void MediaDecoderStateMachine::AudioLoop()
// Also break out if audio is being captured. // Also break out if audio is being captured.
if (mState == DECODER_STATE_SHUTDOWN || if (mState == DECODER_STATE_SHUTDOWN ||
mStopAudioThread || mStopAudioThread ||
mReader->AudioQueue().AtEndOfStream()) AudioQueue().AtEndOfStream())
{ {
break; break;
} }
@ -873,11 +873,11 @@ void MediaDecoderStateMachine::AudioLoop()
NS_WARNING("Setting the pitch preservation failed in AudioLoop."); NS_WARNING("Setting the pitch preservation failed in AudioLoop.");
} }
} }
NS_ASSERTION(mReader->AudioQueue().GetSize() > 0, NS_ASSERTION(AudioQueue().GetSize() > 0,
"Should have data to play"); "Should have data to play");
// See if there's a gap in the audio. If there is, push silence into the // See if there's a gap in the audio. If there is, push silence into the
// audio hardware, so we can play across the gap. // audio hardware, so we can play across the gap.
const AudioData* s = mReader->AudioQueue().PeekFront(); const AudioData* s = AudioQueue().PeekFront();
// Calculate the number of frames that have been pushed onto the audio // Calculate the number of frames that have been pushed onto the audio
// hardware. // hardware.
@ -918,7 +918,7 @@ void MediaDecoderStateMachine::AudioLoop()
} }
{ {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
if (mReader->AudioQueue().AtEndOfStream() && if (AudioQueue().AtEndOfStream() &&
mState != DECODER_STATE_SHUTDOWN && mState != DECODER_STATE_SHUTDOWN &&
!mStopAudioThread) !mStopAudioThread)
{ {
@ -988,7 +988,7 @@ uint32_t MediaDecoderStateMachine::PlayFromAudioQueue(uint64_t aFrameOffset,
{ {
NS_ASSERTION(OnAudioThread(), "Only call on audio thread."); NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
nsAutoPtr<AudioData> audio(mReader->AudioQueue().PopFront()); nsAutoPtr<AudioData> audio(AudioQueue().PopFront());
{ {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
NS_WARN_IF_FALSE(IsPlaying(), "Should be playing"); NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
@ -1325,8 +1325,8 @@ void MediaDecoderStateMachine::StartDecoding()
// Reset our "stream finished decoding" flags, so we try to decode all // Reset our "stream finished decoding" flags, so we try to decode all
// streams that we have when we start decoding. // streams that we have when we start decoding.
mIsVideoDecoding = HasVideo() && !mReader->VideoQueue().IsFinished(); mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished();
mIsAudioDecoding = HasAudio() && !mReader->AudioQueue().IsFinished(); mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished();
CheckIfDecodeComplete(); CheckIfDecodeComplete();
if (mState == DECODER_STATE_COMPLETED) { if (mState == DECODER_STATE_COMPLETED) {
@ -1519,7 +1519,7 @@ MediaDecoderStateMachine::SetReaderIdle()
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
DECODER_LOG(PR_LOG_DEBUG, "SetReaderIdle() audioQueue=%lld videoQueue=%lld", DECODER_LOG(PR_LOG_DEBUG, "SetReaderIdle() audioQueue=%lld videoQueue=%lld",
GetDecodedAudioDuration(), GetDecodedAudioDuration(),
mReader->VideoQueue().Duration()); VideoQueue().Duration());
} }
#endif #endif
MOZ_ASSERT(OnDecodeThread()); MOZ_ASSERT(OnDecodeThread());
@ -1715,7 +1715,7 @@ MediaDecoderStateMachine::StartAudioThread()
return NS_OK; return NS_OK;
} }
int64_t MediaDecoderStateMachine::AudioDecodedUsecs() const int64_t MediaDecoderStateMachine::AudioDecodedUsecs()
{ {
NS_ASSERTION(HasAudio(), NS_ASSERTION(HasAudio(),
"Should only call AudioDecodedUsecs() when we have audio"); "Should only call AudioDecodedUsecs() when we have audio");
@ -1723,10 +1723,10 @@ int64_t MediaDecoderStateMachine::AudioDecodedUsecs() const
// already decoded and pushed to the hardware, plus the amount of audio // already decoded and pushed to the hardware, plus the amount of audio
// data waiting to be pushed to the hardware. // data waiting to be pushed to the hardware.
int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0; int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0;
return pushed + mReader->AudioQueue().Duration(); return pushed + AudioQueue().Duration();
} }
bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs) const bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
{ {
AssertCurrentThreadInMonitor(); AssertCurrentThreadInMonitor();
// We consider ourselves low on decoded data if we're low on audio, // We consider ourselves low on decoded data if we're low on audio,
@ -1734,20 +1734,20 @@ bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs) const
// if we're low on video frames, provided // if we're low on video frames, provided
// we've not decoded to the end of the video stream. // we've not decoded to the end of the video stream.
return ((HasAudio() && return ((HasAudio() &&
!mReader->AudioQueue().IsFinished() && !AudioQueue().IsFinished() &&
AudioDecodedUsecs() < aAudioUsecs) AudioDecodedUsecs() < aAudioUsecs)
|| ||
(HasVideo() && (HasVideo() &&
!mReader->VideoQueue().IsFinished() && !VideoQueue().IsFinished() &&
static_cast<uint32_t>(mReader->VideoQueue().GetSize()) < LOW_VIDEO_FRAMES)); static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
} }
bool MediaDecoderStateMachine::HasLowUndecodedData() const bool MediaDecoderStateMachine::HasLowUndecodedData()
{ {
return HasLowUndecodedData(mLowDataThresholdUsecs); return HasLowUndecodedData(mLowDataThresholdUsecs);
} }
bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs) const bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs)
{ {
AssertCurrentThreadInMonitor(); AssertCurrentThreadInMonitor();
NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA, NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA,
@ -1886,12 +1886,12 @@ nsresult MediaDecoderStateMachine::DecodeMetadata()
if (HasAudio()) { if (HasAudio()) {
RefPtr<nsIRunnable> decodeTask( RefPtr<nsIRunnable> decodeTask(
NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded)); NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
mReader->AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue); AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
} }
if (HasVideo()) { if (HasVideo()) {
RefPtr<nsIRunnable> decodeTask( RefPtr<nsIRunnable> decodeTask(
NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded)); NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
mReader->VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue); VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
} }
if (mState == DECODER_STATE_DECODING_METADATA) { if (mState == DECODER_STATE_DECODING_METADATA) {
@ -1989,7 +1989,7 @@ void MediaDecoderStateMachine::DecodeSeek()
if (seekTime == mEndTime) { if (seekTime == mEndTime) {
newCurrentTime = mAudioStartTime = seekTime; newCurrentTime = mAudioStartTime = seekTime;
} else if (HasAudio()) { } else if (HasAudio()) {
AudioData* audio = mReader->AudioQueue().PeekFront(); AudioData* audio = AudioQueue().PeekFront();
newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime; newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
} else { } else {
newCurrentTime = video ? video->mTime : seekTime; newCurrentTime = video ? video->mTime : seekTime;
@ -2128,8 +2128,8 @@ nsresult MediaDecoderStateMachine::RunStateMachine()
// The reader's listeners hold references to the state machine, // The reader's listeners hold references to the state machine,
// creating a cycle which keeps the state machine and its shared // creating a cycle which keeps the state machine and its shared
// thread pools alive. So break it here. // thread pools alive. So break it here.
mReader->AudioQueue().ClearListeners(); AudioQueue().ClearListeners();
mReader->VideoQueue().ClearListeners(); VideoQueue().ClearListeners();
{ {
ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
@ -2258,7 +2258,7 @@ nsresult MediaDecoderStateMachine::RunStateMachine()
// Play the remaining media. We want to run AdvanceFrame() at least // Play the remaining media. We want to run AdvanceFrame() at least
// once to ensure the current playback position is advanced to the // once to ensure the current playback position is advanced to the
// end of the media, and so that we update the readyState. // end of the media, and so that we update the readyState.
if (mReader->VideoQueue().GetSize() > 0 || if (VideoQueue().GetSize() > 0 ||
(HasAudio() && !mAudioCompleted) || (HasAudio() && !mAudioCompleted) ||
(mDecoder->GetDecodedStream() && !mDecoder->GetDecodedStream()->IsFinished())) (mDecoder->GetDecodedStream() && !mDecoder->GetDecodedStream()->IsFinished()))
{ {
@ -2424,8 +2424,8 @@ void MediaDecoderStateMachine::AdvanceFrame()
#ifdef PR_LOGGING #ifdef PR_LOGGING
int32_t droppedFrames = 0; int32_t droppedFrames = 0;
#endif #endif
if (mReader->VideoQueue().GetSize() > 0) { if (VideoQueue().GetSize() > 0) {
VideoData* frame = mReader->VideoQueue().PeekFront(); VideoData* frame = VideoQueue().PeekFront();
while (mRealTime || clock_time >= frame->mTime) { while (mRealTime || clock_time >= frame->mTime) {
mVideoFrameEndTime = frame->GetEndTime(); mVideoFrameEndTime = frame->GetEndTime();
currentFrame = frame; currentFrame = frame;
@ -2435,14 +2435,14 @@ void MediaDecoderStateMachine::AdvanceFrame()
VERBOSE_LOG("discarding video frame %lld (%d so far)", frame->mTime, droppedFrames-1); VERBOSE_LOG("discarding video frame %lld (%d so far)", frame->mTime, droppedFrames-1);
} }
#endif #endif
mReader->VideoQueue().PopFront(); VideoQueue().PopFront();
// Notify the decode thread that the video queue's buffers may have // Notify the decode thread that the video queue's buffers may have
// free'd up space for more frames. // free'd up space for more frames.
mDecoder->GetReentrantMonitor().NotifyAll(); mDecoder->GetReentrantMonitor().NotifyAll();
mDecoder->UpdatePlaybackOffset(frame->mOffset); mDecoder->UpdatePlaybackOffset(frame->mOffset);
if (mReader->VideoQueue().GetSize() == 0) if (VideoQueue().GetSize() == 0)
break; break;
frame = mReader->VideoQueue().PeekFront(); frame = VideoQueue().PeekFront();
} }
// Current frame has already been presented, wait until it's time to // Current frame has already been presented, wait until it's time to
// present the next frame. // present the next frame.
@ -2463,7 +2463,7 @@ void MediaDecoderStateMachine::AdvanceFrame()
!resource->IsSuspended()) { !resource->IsSuspended()) {
if (JustExitedQuickBuffering() || HasLowUndecodedData()) { if (JustExitedQuickBuffering() || HasLowUndecodedData()) {
if (currentFrame) { if (currentFrame) {
mReader->VideoQueue().PushFront(currentFrame.forget()); VideoQueue().PushFront(currentFrame.forget());
} }
StartBuffering(); StartBuffering();
// Don't go straight back to the state machine loop since that might // Don't go straight back to the state machine loop since that might

Просмотреть файл

@ -241,7 +241,7 @@ public:
} }
// Should be called by main thread. // Should be called by main thread.
bool HaveNextFrameData() const; bool HaveNextFrameData();
// Must be called with the decode monitor held. // Must be called with the decode monitor held.
bool IsBuffering() const { bool IsBuffering() const {
@ -397,6 +397,9 @@ protected:
}; };
WakeDecoderRunnable* GetWakeDecoderRunnable(); WakeDecoderRunnable* GetWakeDecoderRunnable();
MediaQueue<AudioData>& AudioQueue() { return mReader->AudioQueue(); }
MediaQueue<VideoData>& VideoQueue() { return mReader->VideoQueue(); }
// True if our buffers of decoded audio are not full, and we should // True if our buffers of decoded audio are not full, and we should
// decode more. // decode more.
bool NeedToDecodeAudio(); bool NeedToDecodeAudio();
@ -413,24 +416,24 @@ protected:
// Returns true if we've got less than aAudioUsecs microseconds of decoded // Returns true if we've got less than aAudioUsecs microseconds of decoded
// and playable data. The decoder monitor must be held. // and playable data. The decoder monitor must be held.
bool HasLowDecodedData(int64_t aAudioUsecs) const; bool HasLowDecodedData(int64_t aAudioUsecs);
// Returns true if we're running low on data which is not yet decoded. // Returns true if we're running low on data which is not yet decoded.
// The decoder monitor must be held. // The decoder monitor must be held.
bool HasLowUndecodedData() const; bool HasLowUndecodedData();
// Returns true if we have less than aUsecs of undecoded data available. // Returns true if we have less than aUsecs of undecoded data available.
bool HasLowUndecodedData(double aUsecs) const; bool HasLowUndecodedData(double aUsecs);
// Returns the number of unplayed usecs of audio we've got decoded and/or // Returns the number of unplayed usecs of audio we've got decoded and/or
// pushed to the hardware waiting to play. This is how much audio we can // pushed to the hardware waiting to play. This is how much audio we can
// play without having to run the audio decoder. The decoder monitor // play without having to run the audio decoder. The decoder monitor
// must be held. // must be held.
int64_t AudioDecodedUsecs() const; int64_t AudioDecodedUsecs();
// Returns true when there's decoded audio waiting to play. // Returns true when there's decoded audio waiting to play.
// The decoder monitor must be held. // The decoder monitor must be held.
bool HasFutureAudio() const; bool HasFutureAudio();
// Returns true if we recently exited "quick buffering" mode. // Returns true if we recently exited "quick buffering" mode.
bool JustExitedQuickBuffering(); bool JustExitedQuickBuffering();