Bug 641718 - Backout 44d43f095a4f. r=backout

This commit is contained in:
Chris Pearce 2011-04-01 13:02:20 +13:00
Родитель 9c1402e650
Коммит 373a8c69e0
26 изменённых файлов: 284 добавлений и 267 удалений

Просмотреть файл

@ -1820,7 +1820,7 @@ nsresult nsHTMLMediaElement::InitializeDecoderAsClone(nsMediaDecoder* aOriginal)
double duration = aOriginal->GetDuration();
if (duration >= 0) {
decoder->SetDuration(duration);
decoder->SetDuration(PRInt64(NS_round(duration * 1000)));
decoder->SetSeekable(aOriginal->GetSeekable());
}

Просмотреть файл

@ -175,24 +175,24 @@ PRBool MulOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult) {
return PR_TRUE;
}
// Converts from number of audio samples to microseconds, given the specified
// Converts from number of audio samples to milliseconds, given the specified
// audio rate.
PRBool SamplesToUsecs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutUsecs)
PRBool SamplesToMs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutMs)
{
PRInt64 x;
if (!MulOverflow(aSamples, USECS_PER_S, x))
if (!MulOverflow(aSamples, 1000, x))
return PR_FALSE;
aOutUsecs = x / aRate;
aOutMs = x / aRate;
return PR_TRUE;
}
// Converts from microseconds to number of audio samples, given the specified
// Converts from milliseconds to number of audio samples, given the specified
// audio rate.
PRBool UsecsToSamples(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutSamples)
PRBool MsToSamples(PRInt64 aMs, PRUint32 aRate, PRInt64& aOutSamples)
{
PRInt64 x;
if (!MulOverflow(aUsecs, aRate, x))
if (!MulOverflow(aMs, aRate, x))
return PR_FALSE;
aOutSamples = x / USECS_PER_S;
aOutSamples = x / 1000;
return PR_TRUE;
}

Просмотреть файл

@ -126,22 +126,16 @@ PRBool AddOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
// in an integer overflow.
PRBool MulOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
// Converts from number of audio samples (aSamples) to microseconds, given
// the specified audio rate (aRate). Stores result in aOutUsecs. Returns PR_TRUE
// Converts from number of audio samples (aSamples) to milliseconds, given
// the specified audio rate (aRate). Stores result in aOutMs. Returns PR_TRUE
// if the operation succeeded, or PR_FALSE if there was an integer overflow
// while calulating the conversion.
PRBool SamplesToUsecs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutUsecs);
PRBool SamplesToMs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutMs);
// Converts from microseconds (aUsecs) to number of audio samples, given the
// Converts from milliseconds (aMs) to number of audio samples, given the
// specified audio rate (aRate). Stores the result in aOutSamples. Returns
// PR_TRUE if the operation succeeded, or PR_FALSE if there was an integer
// overflow while calulating the conversion.
PRBool UsecsToSamples(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutSamples);
// Number of microseconds per second. 1e6.
#define USECS_PER_S 1000000
// Number of microseconds per millisecond.
#define USECS_PER_MS 1000
PRBool MsToSamples(PRInt64 aMs, PRUint32 aRate, PRInt64& aOutSamples);
#endif

Просмотреть файл

@ -39,8 +39,8 @@
#include "nsTArray.h"
#include "nsAudioAvailableEventManager.h"
#include "VideoUtils.h"
#define MILLISECONDS_PER_SECOND 1000.0f
#define MAX_PENDING_EVENTS 100
using namespace mozilla;
@ -106,7 +106,7 @@ void nsAudioAvailableEventManager::DispatchPendingEvents(PRUint64 aCurrentTime)
while (mPendingEvents.Length() > 0) {
nsAudioAvailableEventRunner* e =
(nsAudioAvailableEventRunner*)mPendingEvents[0].get();
if (e->mTime * USECS_PER_S > aCurrentTime) {
if (e->mTime * MILLISECONDS_PER_SECOND > aCurrentTime) {
break;
}
nsCOMPtr<nsIRunnable> event = mPendingEvents[0];
@ -227,7 +227,7 @@ void nsAudioAvailableEventManager::Drain(PRUint64 aEndTime)
(mSignalBufferLength - mSignalBufferPosition) * sizeof(float));
// Force this last event to go now.
float time = (aEndTime / static_cast<float>(USECS_PER_S)) -
float time = (aEndTime / MILLISECONDS_PER_SECOND) -
(mSignalBufferPosition / mSamplesPerSecond);
nsCOMPtr<nsIRunnable> lastEvent =
new nsAudioAvailableEventRunner(mDecoder, mSignalBuffer.forget(),

Просмотреть файл

@ -53,7 +53,6 @@ using namespace mozilla::dom;
#include "nsAutoPtr.h"
#include "nsAudioStream.h"
#include "nsAlgorithm.h"
#include "VideoUtils.h"
extern "C" {
#include "sydneyaudio/sydney_audio.h"
}
@ -77,6 +76,7 @@ PRLogModuleInfo* gAudioStreamLog = nsnull;
#endif
#define FAKE_BUFFER_SIZE 176400
#define MILLISECONDS_PER_SECOND 1000
class nsAudioStreamLocal : public nsAudioStream
{
@ -555,7 +555,7 @@ PRInt64 nsAudioStreamLocal::GetPosition()
{
PRInt64 sampleOffset = GetSampleOffset();
if (sampleOffset >= 0) {
return ((USECS_PER_S * sampleOffset) / mRate / mChannels);
return ((MILLISECONDS_PER_SECOND * sampleOffset) / mRate / mChannels);
}
return -1;
}
@ -724,7 +724,7 @@ PRInt64 nsAudioStreamRemote::GetPosition()
{
PRInt64 sampleOffset = GetSampleOffset();
if (sampleOffset >= 0) {
return ((USECS_PER_S * sampleOffset) / mRate / mChannels);
return ((MILLISECONDS_PER_SECOND * sampleOffset) / mRate / mChannels);
}
return 0;
}
@ -740,7 +740,7 @@ nsAudioStreamRemote::GetSampleOffset()
return 0;
PRInt64 time = mAudioChild->GetLastKnownSampleOffsetTime();
PRInt64 result = offset + (mRate * mChannels * (PR_IntervalNow() - time) / USECS_PER_S);
PRInt64 result = offset + (mRate * mChannels * (PR_IntervalNow() - time) / MILLISECONDS_PER_SECOND);
return result;
}

Просмотреть файл

@ -107,7 +107,7 @@ public:
// Resume audio playback
virtual void Resume() = 0;
// Return the position in microseconds of the sample being played by the
// Return the position in milliseconds of the sample being played by the
// audio hardware.
virtual PRInt64 GetPosition() = 0;

Просмотреть файл

@ -83,7 +83,7 @@ double nsBuiltinDecoder::GetDuration()
{
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
if (mDuration >= 0) {
return static_cast<double>(mDuration) / static_cast<double>(USECS_PER_S);
return static_cast<double>(mDuration) / 1000.0;
}
return std::numeric_limits<double>::quiet_NaN();
}
@ -524,7 +524,7 @@ double nsBuiltinDecoder::ComputePlaybackRate(PRPackedBool* aReliable)
PRInt64 length = mStream ? mStream->GetLength() : -1;
if (mDuration >= 0 && length >= 0) {
*aReliable = PR_TRUE;
return length * static_cast<double>(USECS_PER_S) / mDuration;
return double(length)*1000.0/mDuration;
}
return mPlaybackStatistics.GetRateAtLastStop(aReliable);
}
@ -800,15 +800,15 @@ void nsBuiltinDecoder::DurationChanged()
UpdatePlaybackRate();
if (mElement && oldDuration != mDuration) {
LOG(PR_LOG_DEBUG, ("%p duration changed to %lld", this, mDuration));
LOG(PR_LOG_DEBUG, ("%p duration changed to %lldms", this, mDuration));
mElement->DispatchEvent(NS_LITERAL_STRING("durationchange"));
}
}
void nsBuiltinDecoder::SetDuration(double aDuration)
void nsBuiltinDecoder::SetDuration(PRInt64 aDuration)
{
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
mDuration = static_cast<PRInt64>(NS_round(aDuration * static_cast<double>(USECS_PER_S)));
mDuration = aDuration;
MonitorAutoEnter mon(mMonitor);
if (mDecoderStateMachine) {

Просмотреть файл

@ -250,14 +250,13 @@ public:
virtual void Shutdown() = 0;
// Called from the main thread to get the duration. The decoder monitor
// must be obtained before calling this. It is in units of microseconds.
// must be obtained before calling this. It is in units of milliseconds.
virtual PRInt64 GetDuration() = 0;
// Called from the main thread to set the duration of the media resource
// if it is able to be obtained via HTTP headers. Called from the
// state machine thread to set the duration if it is obtained from the
// media metadata. The decoder monitor must be obtained before calling this.
// aDuration is in microseconds.
virtual void SetDuration(PRInt64 aDuration) = 0;
// Functions used by assertions to ensure we're calling things
@ -384,10 +383,10 @@ class nsBuiltinDecoder : public nsMediaDecoder
// Call on the main thread only.
virtual PRBool IsEnded() const;
// Set the duration of the media resource in units of seconds.
// Set the duration of the media resource in units of milliseconds.
// This is called via a channel listener if it can pick up the duration
// from a content header. Must be called from the main thread only.
virtual void SetDuration(double aDuration);
virtual void SetDuration(PRInt64 aDuration);
// Set a flag indicating whether seeking is supported
virtual void SetSeekable(PRBool aSeekable);

Просмотреть файл

@ -180,8 +180,8 @@ public:
// chunk ends.
const PRInt64 mOffset;
PRInt64 mTime; // Start time of samples in usecs.
const PRInt64 mDuration; // In usecs.
PRInt64 mTime; // Start time of samples in ms.
const PRInt64 mDuration; // In ms.
const PRUint32 mSamples;
const PRUint32 mChannels;
nsAutoArrayPtr<SoundDataValue> mAudioData;
@ -242,10 +242,10 @@ public:
// Approximate byte offset of the end of the frame in the media.
PRInt64 mOffset;
// Start time of frame in microseconds.
// Start time of frame in milliseconds.
PRInt64 mTime;
// End time of frame in microseconds;
// End time of frame in milliseconds;
PRInt64 mEndTime;
// Codec specific internal time code. For Ogg based codecs this is the
@ -388,7 +388,7 @@ template <class T> class MediaQueue : private nsDeque {
mEndOfStream = PR_TRUE;
}
// Returns the approximate number of microseconds of samples in the queue.
// Returns the approximate number of milliseconds of samples in the queue.
PRInt64 Duration() {
MonitorAutoEnter mon(mMonitor);
if (GetSize() < 2) {
@ -458,9 +458,9 @@ public:
// This will not read past aEndOffset. Returns -1 on failure.
virtual PRInt64 FindEndTime(PRInt64 aEndOffset);
// Moves the decode head to aTime microseconds. aStartTime and aEndTime
// denote the start and end times of the media in usecs, and aCurrentTime
// is the current playback position in microseconds.
// Moves the decode head to aTime milliseconds. aStartTime and aEndTime
// denote the start and end times of the media in ms, and aCurrentTime
// is the current playback position in ms.
virtual nsresult Seek(PRInt64 aTime,
PRInt64 aStartTime,
PRInt64 aEndTime,
@ -486,7 +486,7 @@ public:
protected:
// Pumps the decode until we reach frames/samples required to play at
// time aTarget (usecs).
// time aTarget (ms).
nsresult DecodeToTarget(PRInt64 aTarget);
// Reader decode function. Matches DecodeVideoFrame() and

Просмотреть файл

@ -68,17 +68,17 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
#define BUFFERING_MIN_RATE 50000
#define BUFFERING_RATE(x) ((x)< BUFFERING_MIN_RATE ? BUFFERING_MIN_RATE : (x))
// If audio queue has less than this many usecs of decoded audio, we won't risk
// If audio queue has less than this many ms of decoded audio, we won't risk
// trying to decode the video, we'll skip decoding video up to the next
// keyframe. We may increase this value for an individual decoder if we
// encounter video frames which take a long time to decode.
static const PRUint32 LOW_AUDIO_USECS = 300000;
static const PRUint32 LOW_AUDIO_MS = 300;
// If more than this many usecs of decoded audio is queued, we'll hold off
// If more than this many ms of decoded audio is queued, we'll hold off
// decoding more audio. If we increase the low audio threshold (see
// LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not
// LOW_AUDIO_MS above) we'll also increase this value to ensure it's not
// less than the low audio threshold.
const PRInt64 AMPLE_AUDIO_USECS = 1000000;
const PRInt64 AMPLE_AUDIO_MS = 1000;
// Maximum number of bytes we'll allocate and write at once to the audio
// hardware when the audio stream contains missing samples and we're
@ -98,9 +98,9 @@ static const PRUint32 LOW_VIDEO_FRAMES = 1;
static const PRUint32 AMPLE_VIDEO_FRAMES = 10;
// Arbitrary "frame duration" when playing only audio.
static const int AUDIO_DURATION_USECS = 40000;
static const int AUDIO_DURATION_MS = 40;
// If we increase our "low audio threshold" (see LOW_AUDIO_USECS above), we
// If we increase our "low audio threshold" (see LOW_AUDIO_MS above), we
// use this as a factor in all our calculations. Increasing this will cause
// us to be more likely to increase our low audio threshold, and to
// increase it by more.
@ -110,16 +110,16 @@ static const int THRESHOLD_FACTOR = 2;
// ourselves to be running low on undecoded data. We determine how much
// undecoded data we have remaining using the reader's GetBuffered()
// implementation.
static const PRInt64 LOW_DATA_THRESHOLD_USECS = 5000000;
static const PRInt64 LOW_DATA_THRESHOLD_MS = 5000;
// LOW_DATA_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, otherwise
// LOW_DATA_THRESHOLD_MS needs to be greater than AMPLE_AUDIO_MS, otherwise
// the skip-to-keyframe logic can activate when we're running low on data.
PR_STATIC_ASSERT(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS);
PR_STATIC_ASSERT(LOW_DATA_THRESHOLD_MS > AMPLE_AUDIO_MS);
// Amount of excess usecs of data to add in to the "should we buffer" calculation.
static const PRUint32 EXHAUSTED_DATA_MARGIN_USECS = 60000;
// Amount of excess ms of data to add in to the "should we buffer" calculation.
static const PRUint32 EXHAUSTED_DATA_MARGIN_MS = 60;
// If we enter buffering within QUICK_BUFFER_THRESHOLD_USECS seconds of starting
// If we enter buffering within QUICK_BUFFER_THRESHOLD_MS seconds of starting
// decoding, we'll enter "quick buffering" mode, which exits a lot sooner than
// normal buffering mode. This exists so that if the decode-ahead exhausts the
// downloaded data while decode/playback is just starting up (for example
@ -128,24 +128,24 @@ static const PRUint32 EXHAUSTED_DATA_MARGIN_USECS = 60000;
// for buffering. We may actually be able to playback in this case, so exit
// buffering early and try to play. If it turns out we can't play, we'll fall
// back to buffering normally.
static const PRUint32 QUICK_BUFFER_THRESHOLD_USECS = 2000000;
static const PRUint32 QUICK_BUFFER_THRESHOLD_MS = 2000;
// If we're quick buffering, we'll remain in buffering mode while we have less than
// QUICK_BUFFERING_LOW_DATA_USECS of decoded data available.
static const PRUint32 QUICK_BUFFERING_LOW_DATA_USECS = 1000000;
// QUICK_BUFFERING_LOW_DATA_MS of decoded data available.
static const PRUint32 QUICK_BUFFERING_LOW_DATA_MS = 1000;
// If QUICK_BUFFERING_LOW_DATA_USECS is > AMPLE_AUDIO_USECS, we won't exit
// If QUICK_BUFFERING_LOW_DATA_MS is > AMPLE_AUDIO_MS, we won't exit
// quick buffering in a timely fashion, as the decode pauses when it
// reaches AMPLE_AUDIO_USECS decoded data, and thus we'll never reach
// QUICK_BUFFERING_LOW_DATA_USECS.
PR_STATIC_ASSERT(QUICK_BUFFERING_LOW_DATA_USECS <= AMPLE_AUDIO_USECS);
// reaches AMPLE_AUDIO_MS decoded data, and thus we'll never reach
// QUICK_BUFFERING_LOW_DATA_MS.
PR_STATIC_ASSERT(QUICK_BUFFERING_LOW_DATA_MS <= AMPLE_AUDIO_MS);
static TimeDuration UsecsToDuration(PRInt64 aUsecs) {
return TimeDuration::FromMilliseconds(static_cast<double>(aUsecs) / USECS_PER_MS);
static TimeDuration MsToDuration(PRInt64 aMs) {
return TimeDuration::FromMilliseconds(static_cast<double>(aMs));
}
static PRInt64 DurationToUsecs(TimeDuration aDuration) {
return static_cast<PRInt64>(aDuration.ToSeconds() * USECS_PER_S);
static PRInt64 DurationToMs(TimeDuration aDuration) {
return static_cast<PRInt64>(aDuration.ToSeconds() * 1000);
}
class nsAudioMetadataEventRunner : public nsRunnable
@ -214,7 +214,7 @@ PRBool nsBuiltinDecoderStateMachine::HasFutureAudio() const {
// we've completely decoded all audio (but not finished playing it yet
// as per 1).
return !mAudioCompleted &&
(AudioDecodedUsecs() > LOW_AUDIO_USECS || mReader->mAudioQueue.IsFinished());
(AudioDecodedMs() > LOW_AUDIO_MS || mReader->mAudioQueue.IsFinished());
}
PRBool nsBuiltinDecoderStateMachine::HaveNextFrameData() const {
@ -252,19 +252,19 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
// no longer be considered to be "pumping video".
const unsigned videoPumpThreshold = AMPLE_VIDEO_FRAMES / 2;
// After the audio decode fills with more than audioPumpThreshold usecs
// After the audio decode fills with more than audioPumpThresholdMs ms
// of decoded audio, we'll start to check whether the audio or video decode
// is falling behind.
const unsigned audioPumpThreshold = LOW_AUDIO_USECS * 2;
const unsigned audioPumpThresholdMs = LOW_AUDIO_MS * 2;
// Our local low audio threshold. We may increase this if we're slow to
// decode video frames, in order to reduce the chance of audio underruns.
PRInt64 lowAudioThreshold = LOW_AUDIO_USECS;
PRInt64 lowAudioThreshold = LOW_AUDIO_MS;
// Our local ample audio threshold. If we increase lowAudioThreshold, we'll
// also increase this too appropriately (we don't want lowAudioThreshold to
// be greater than ampleAudioThreshold, else we'd stop decoding!).
PRInt64 ampleAudioThreshold = AMPLE_AUDIO_USECS;
PRInt64 ampleAudioThreshold = AMPLE_AUDIO_MS;
MediaQueue<VideoData>& videoQueue = mReader->mVideoQueue;
MediaQueue<SoundData>& audioQueue = mReader->mAudioQueue;
@ -291,7 +291,7 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
// We don't want to consider skipping to the next keyframe if we've
// only just started up the decode loop, so wait until we've decoded
// some audio data before enabling the keyframe skip logic on audio.
if (audioPump && GetDecodedAudioDuration() >= audioPumpThreshold) {
if (audioPump && GetDecodedAudioDuration() >= audioPumpThresholdMs) {
audioPump = PR_FALSE;
}
@ -330,11 +330,11 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
videoPlaying = mReader->DecodeVideoFrame(skipToNextKeyframe, currentTime);
decodeTime = TimeStamp::Now() - start;
}
if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > lowAudioThreshold &&
if (THRESHOLD_FACTOR * DurationToMs(decodeTime) > lowAudioThreshold &&
!HasLowUndecodedData())
{
lowAudioThreshold =
NS_MIN(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
NS_MIN(THRESHOLD_FACTOR * DurationToMs(decodeTime), AMPLE_AUDIO_MS);
ampleAudioThreshold = NS_MAX(THRESHOLD_FACTOR * lowAudioThreshold,
ampleAudioThreshold);
LOG(PR_LOG_DEBUG,
@ -476,7 +476,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
// Calculate the number of samples that have been pushed onto the audio
// hardware.
PRInt64 playedSamples = 0;
if (!UsecsToSamples(audioStartTime, rate, playedSamples)) {
if (!MsToSamples(audioStartTime, rate, playedSamples)) {
NS_WARNING("Int overflow converting playedSamples");
break;
}
@ -488,7 +488,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
// Calculate the timestamp of the next chunk of audio in numbers of
// samples.
PRInt64 sampleTime = 0;
if (!UsecsToSamples(s->mTime, rate, sampleTime)) {
if (!MsToSamples(s->mTime, rate, sampleTime)) {
NS_WARNING("Int overflow converting sampleTime");
break;
}
@ -511,18 +511,18 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
}
{
MonitorAutoEnter mon(mDecoder->GetMonitor());
PRInt64 playedUsecs;
if (!SamplesToUsecs(audioDuration, rate, playedUsecs)) {
NS_WARNING("Int overflow calculating playedUsecs");
PRInt64 playedMs;
if (!SamplesToMs(audioDuration, rate, playedMs)) {
NS_WARNING("Int overflow calculating playedMs");
break;
}
if (!AddOverflow(audioStartTime, playedUsecs, mAudioEndTime)) {
if (!AddOverflow(audioStartTime, playedMs, mAudioEndTime)) {
NS_WARNING("Int overflow calculating audio end time");
break;
}
PRInt64 audioAhead = mAudioEndTime - GetMediaTime();
if (audioAhead > AMPLE_AUDIO_USECS &&
if (audioAhead > AMPLE_AUDIO_MS &&
audioDuration - samplesAtLastSleep > minWriteSamples)
{
samplesAtLastSleep = audioDuration;
@ -530,7 +530,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
// significant amount ahead of the playback position. The decode
// thread will be going to sleep, so we won't get any new samples
// anyway, so sleep until we need to push to the hardware again.
Wait(AMPLE_AUDIO_USECS / 2);
Wait(AMPLE_AUDIO_MS / 2);
// Kick the decode thread; since above we only do a NotifyAll when
// we pop an audio chunk of the queue, the decoder won't wake up if
// we've got no more decoded chunks to push to the hardware. We can
@ -561,8 +561,8 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
mState != DECODER_STATE_SEEKING &&
mState != DECODER_STATE_SHUTDOWN)
{
const PRInt64 DRAIN_BLOCK_USECS = 100000;
Wait(NS_MIN(mAudioEndTime - position, DRAIN_BLOCK_USECS));
const PRInt64 DRAIN_BLOCK_MS = 100;
Wait(NS_MIN(mAudioEndTime - position, DRAIN_BLOCK_MS));
oldPosition = position;
position = GetMediaTime();
}
@ -685,7 +685,7 @@ void nsBuiltinDecoderStateMachine::StopPlayback(eStopMode aMode)
// audio thread can block in the write, and we deadlock trying to acquire
// the audio monitor upon resume playback.
if (IsPlaying()) {
mPlayDuration += DurationToUsecs(TimeStamp::Now() - mPlayStartTime);
mPlayDuration += TimeStamp::Now() - mPlayStartTime;
mPlayStartTime = TimeStamp();
}
if (HasAudio()) {
@ -801,7 +801,7 @@ double nsBuiltinDecoderStateMachine::GetCurrentTime() const
OnDecodeThread(),
"Should be on main, decode, or state machine thread.");
return static_cast<double>(mCurrentFrameTime) / static_cast<double>(USECS_PER_S);
return static_cast<double>(mCurrentFrameTime) / 1000.0;
}
PRInt64 nsBuiltinDecoderStateMachine::GetDuration()
@ -895,7 +895,7 @@ void nsBuiltinDecoderStateMachine::Seek(double aTime)
"We shouldn't already be seeking");
NS_ASSERTION(mState >= DECODER_STATE_DECODING,
"We should have loaded metadata");
double t = aTime * static_cast<double>(USECS_PER_S);
double t = aTime * 1000.0;
if (t > PR_INT64_MAX) {
// Prevent integer overflow.
return;
@ -967,10 +967,10 @@ nsBuiltinDecoderStateMachine::StartDecodeThreads()
return NS_OK;
}
PRInt64 nsBuiltinDecoderStateMachine::AudioDecodedUsecs() const
PRInt64 nsBuiltinDecoderStateMachine::AudioDecodedMs() const
{
NS_ASSERTION(HasAudio(),
"Should only call AudioDecodedUsecs() when we have audio");
"Should only call AudioDecodedMs() when we have audio");
// The amount of audio we have decoded is the amount of audio data we've
// already decoded and pushed to the hardware, plus the amount of audio
// data waiting to be pushed to the hardware.
@ -978,7 +978,7 @@ PRInt64 nsBuiltinDecoderStateMachine::AudioDecodedUsecs() const
return pushed + mReader->mAudioQueue.Duration();
}
PRBool nsBuiltinDecoderStateMachine::HasLowDecodedData(PRInt64 aAudioUsecs) const
PRBool nsBuiltinDecoderStateMachine::HasLowDecodedData(PRInt64 aAudioMs) const
{
mDecoder->GetMonitor().AssertCurrentThreadIn();
// We consider ourselves low on decoded data if we're low on audio,
@ -987,7 +987,7 @@ PRBool nsBuiltinDecoderStateMachine::HasLowDecodedData(PRInt64 aAudioUsecs) cons
// we've not decoded to the end of the video stream.
return ((HasAudio() &&
!mReader->mAudioQueue.IsFinished() &&
AudioDecodedUsecs() < aAudioUsecs)
AudioDecodedMs() < aAudioMs)
||
(!HasAudio() &&
HasVideo() &&
@ -997,7 +997,7 @@ PRBool nsBuiltinDecoderStateMachine::HasLowDecodedData(PRInt64 aAudioUsecs) cons
PRBool nsBuiltinDecoderStateMachine::HasLowUndecodedData() const
{
return GetUndecodedData() < LOW_DATA_THRESHOLD_USECS;
return GetUndecodedData() < LOW_DATA_THRESHOLD_MS;
}
PRInt64 nsBuiltinDecoderStateMachine::GetUndecodedData() const
@ -1025,7 +1025,7 @@ PRInt64 nsBuiltinDecoderStateMachine::GetUndecodedData() const
NS_ENSURE_SUCCESS(res, 0);
if (start <= currentTime && end >= currentTime) {
return static_cast<PRInt64>((end - currentTime) * USECS_PER_S);
return static_cast<PRInt64>((end - currentTime) * 1000);
}
}
return 0;
@ -1078,7 +1078,7 @@ nsresult nsBuiltinDecoderStateMachine::Run()
!mSeekable || mEndTime != -1,
"Active seekable media should have end time");
NS_ASSERTION(!mSeekable || GetDuration() != -1, "Seekable media should have duration");
LOG(PR_LOG_DEBUG, ("%p Media goes from %lld to %lld (duration %lld) seekable=%d",
LOG(PR_LOG_DEBUG, ("%p Media goes from %lldms to %lldms (duration %lldms) seekable=%d",
mDecoder, mStartTime, mEndTime, GetDuration(), mSeekable));
if (mState == DECODER_STATE_SHUTDOWN)
@ -1179,7 +1179,7 @@ nsresult nsBuiltinDecoderStateMachine::Run()
"Seek target should lie inside the first audio block after seek");
PRInt64 startTime = (audio && audio->mTime < seekTime) ? audio->mTime : seekTime;
mAudioStartTime = startTime;
mPlayDuration = startTime - mStartTime;
mPlayDuration = MsToDuration(startTime - mStartTime);
if (HasVideo()) {
nsAutoPtr<VideoData> video(mReader->mVideoQueue.PeekFront());
if (video) {
@ -1212,12 +1212,12 @@ nsresult nsBuiltinDecoderStateMachine::Run()
nsCOMPtr<nsIRunnable> stopEvent;
if (GetMediaTime() == mEndTime) {
LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lld) to COMPLETED",
LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lldms) to COMPLETED",
mDecoder, seekTime));
stopEvent = NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::SeekingStoppedAtEnd);
mState = DECODER_STATE_COMPLETED;
} else {
LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lld) to DECODING",
LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lldms) to DECODING",
mDecoder, seekTime));
stopEvent = NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::SeekingStopped);
StartDecoding();
@ -1253,18 +1253,18 @@ nsresult nsBuiltinDecoderStateMachine::Run()
PRBool isLiveStream = mDecoder->GetCurrentStream()->GetLength() == -1;
if ((isLiveStream || !mDecoder->CanPlayThrough()) &&
elapsed < TimeDuration::FromSeconds(BUFFERING_WAIT) &&
(mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_USECS)
: (GetUndecodedData() < BUFFERING_WAIT * USECS_PER_S)) &&
(mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_MS)
: (GetUndecodedData() < BUFFERING_WAIT * 1000)) &&
!stream->IsDataCachedToEndOfStream(mDecoder->mDecoderPosition) &&
!stream->IsSuspended())
{
LOG(PR_LOG_DEBUG,
("Buffering: %.3lfs/%ds, timeout in %.3lfs %s",
GetUndecodedData() / static_cast<double>(USECS_PER_S),
GetUndecodedData() / 1000.0,
BUFFERING_WAIT,
BUFFERING_WAIT - elapsed.ToSeconds(),
(mQuickBuffering ? "(quick exit)" : "")));
Wait(USECS_PER_S);
Wait(1000);
if (mState == DECODER_STATE_SHUTDOWN)
continue;
} else {
@ -1387,7 +1387,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
// audio clock. Just wait and then return, to give the audio clock time
// to tick. This should really wait for a specific signal from the audio
// thread rather than polling after a sleep. See bug 568431 comment 4.
Wait(AUDIO_DURATION_USECS);
Wait(AUDIO_DURATION_MS);
return;
}
@ -1396,18 +1396,18 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
// audio, or don't have audio, use the system clock.
PRInt64 clock_time = -1;
if (!IsPlaying()) {
clock_time = mPlayDuration + mStartTime;
clock_time = DurationToMs(mPlayDuration) + mStartTime;
} else {
PRInt64 audio_time = GetAudioClock();
if (HasAudio() && !mAudioCompleted && audio_time != -1) {
clock_time = audio_time;
// Resync against the audio clock, while we're trusting the
// audio clock. This ensures no "drift", particularly on Linux.
mPlayDuration = clock_time - mStartTime;
mPlayDuration = MsToDuration(clock_time - mStartTime);
mPlayStartTime = TimeStamp::Now();
} else {
// Sound is disabled on this system. Sync to the system clock.
clock_time = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
clock_time = DurationToMs(TimeStamp::Now() - mPlayStartTime + mPlayDuration);
// Ensure the clock can never go backwards.
NS_ASSERTION(mCurrentFrameTime <= clock_time, "Clock should go forwards");
clock_time = NS_MAX(mCurrentFrameTime, clock_time) + mStartTime;
@ -1416,7 +1416,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
// Skip frames up to the frame at the playback position, and figure out
// the time remaining until it's time to display the next frame.
PRInt64 remainingTime = AUDIO_DURATION_USECS;
PRInt64 remainingTime = AUDIO_DURATION_MS;
NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time.");
nsAutoPtr<VideoData> currentFrame;
if (mReader->mVideoQueue.GetSize() > 0) {
@ -1434,8 +1434,8 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
// present the next frame.
if (frame && !currentFrame) {
PRInt64 now = IsPlaying()
? (DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration)
: mPlayDuration;
? DurationToMs(TimeStamp::Now() - mPlayStartTime + mPlayDuration)
: DurationToMs(mPlayDuration);
remainingTime = frame->mTime - mStartTime - now;
}
}
@ -1445,7 +1445,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
nsMediaStream* stream = mDecoder->GetCurrentStream();
if (mState == DECODER_STATE_DECODING &&
mDecoder->GetState() == nsBuiltinDecoder::PLAY_STATE_PLAYING &&
HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) &&
HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_MS) &&
!stream->IsDataCachedToEndOfStream(mDecoder->mDecoderPosition) &&
!stream->IsSuspended() &&
(JustExitedQuickBuffering() || HasLowUndecodedData()))
@ -1466,8 +1466,8 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
if (currentFrame) {
// Decode one frame and display it.
TimeStamp presTime = mPlayStartTime - UsecsToDuration(mPlayDuration) +
UsecsToDuration(currentFrame->mTime - mStartTime);
TimeStamp presTime = mPlayStartTime - mPlayDuration +
MsToDuration(currentFrame->mTime - mStartTime);
NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time");
{
nsIntSize display = mInfo.mDisplay;
@ -1480,7 +1480,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
}
}
mDecoder->GetFrameStatistics().NotifyPresentedFrame();
PRInt64 now = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
PRInt64 now = DurationToMs(TimeStamp::Now() - mPlayStartTime + mPlayDuration);
remainingTime = currentFrame->mEndTime - mStartTime - now;
currentFrame = nsnull;
}
@ -1526,9 +1526,9 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
}
}
void nsBuiltinDecoderStateMachine::Wait(PRInt64 aUsecs) {
void nsBuiltinDecoderStateMachine::Wait(PRInt64 aMs) {
mDecoder->GetMonitor().AssertCurrentThreadIn();
TimeStamp end = TimeStamp::Now() + UsecsToDuration(aUsecs);
TimeStamp end = TimeStamp::Now() + MsToDuration(aMs);
TimeStamp now;
while ((now = TimeStamp::Now()) < end &&
mState != DECODER_STATE_SHUTDOWN &&
@ -1538,6 +1538,8 @@ void nsBuiltinDecoderStateMachine::Wait(PRInt64 aUsecs) {
if (ms == 0 || ms > PR_UINT32_MAX) {
break;
}
NS_ASSERTION(ms <= aMs && ms > 0,
"nsBuiltinDecoderStateMachine::Wait interval very wrong!");
mDecoder->GetMonitor().Wait(PR_MillisecondsToInterval(static_cast<PRUint32>(ms)));
}
}
@ -1569,7 +1571,7 @@ VideoData* nsBuiltinDecoderStateMachine::FindStartTime()
// first acutal audio sample we have, we'll inject silence during playback
// to ensure the audio starts at the correct time.
mAudioStartTime = mStartTime;
LOG(PR_LOG_DEBUG, ("%p Media start time is %lld", mDecoder, mStartTime));
LOG(PR_LOG_DEBUG, ("%p Media start time is %lldms", mDecoder, mStartTime));
return v;
}
@ -1594,7 +1596,7 @@ void nsBuiltinDecoderStateMachine::FindEndTime()
mEndTime = endTime;
}
LOG(PR_LOG_DEBUG, ("%p Media end time is %lld", mDecoder, mEndTime));
LOG(PR_LOG_DEBUG, ("%p Media end time is %lldms", mDecoder, mEndTime));
}
void nsBuiltinDecoderStateMachine::UpdateReadyState() {
@ -1648,7 +1650,7 @@ PRBool nsBuiltinDecoderStateMachine::JustExitedQuickBuffering()
{
return !mDecodeStartTime.IsNull() &&
mQuickBuffering &&
(TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromSeconds(QUICK_BUFFER_THRESHOLD_USECS);
(TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromSeconds(QUICK_BUFFER_THRESHOLD_MS);
}
void nsBuiltinDecoderStateMachine::StartBuffering()
@ -1661,7 +1663,7 @@ void nsBuiltinDecoderStateMachine::StartBuffering()
// when the download speed is similar to the decode speed.
mQuickBuffering =
!JustExitedQuickBuffering() &&
decodeDuration < UsecsToDuration(QUICK_BUFFER_THRESHOLD_USECS);
decodeDuration < TimeDuration::FromMilliseconds(QUICK_BUFFER_THRESHOLD_MS);
mBufferingStart = TimeStamp::Now();
// We need to tell the element that buffering has started.

Просмотреть файл

@ -249,23 +249,23 @@ public:
protected:
// Returns PR_TRUE if we've got less than aAudioUsecs microseconds of decoded
// and playable data. The decoder monitor must be held.
PRBool HasLowDecodedData(PRInt64 aAudioUsecs) const;
// Returns PR_TRUE if we'v got less than aAudioMs ms of decoded and playable
// data. The decoder monitor must be held.
PRBool HasLowDecodedData(PRInt64 aAudioMs) const;
// Returns PR_TRUE if we're running low on data which is not yet decoded.
// The decoder monitor must be held.
PRBool HasLowUndecodedData() const;
// Returns the number of microseconds of undecoded data available for
// Returns the number of milliseconds of undecoded data available for
// decoding. The decoder monitor must be held.
PRInt64 GetUndecodedData() const;
// Returns the number of unplayed usecs of audio we've got decoded and/or
// Returns the number of unplayed ms of audio we've got decoded and/or
// pushed to the hardware waiting to play. This is how much audio we can
// play without having to run the audio decoder. The decoder monitor
// must be held.
PRInt64 AudioDecodedUsecs() const;
PRInt64 AudioDecodedMs() const;
// Returns PR_TRUE when there's decoded audio waiting to play.
// The decoder monitor must be held.
@ -274,13 +274,13 @@ protected:
// Returns PR_TRUE if we recently exited "quick buffering" mode.
PRBool JustExitedQuickBuffering();
// Waits on the decoder Monitor for aUsecs microseconds. If the decoder
// monitor is awoken by a Notify() call, we'll continue waiting, unless
// we've moved into shutdown state. This enables us to ensure that we
// wait for a specified time, and that the myriad of Notify()s we do an
// the decoder monitor don't cause the audio thread to be starved. The
// decoder monitor must be locked.
void Wait(PRInt64 aUsecs);
// Waits on the decoder Monitor for aMs. If the decoder monitor is awoken
// by a Notify() call, we'll continue waiting, unless we've moved into
// shutdown state. This enables us to ensure that we wait for a specified
// time, and that the myriad of Notify()s we do an the decoder monitor
// don't cause the audio thread to be starved. The decoder monitor must
// be locked.
void Wait(PRInt64 aMs);
// Dispatches an asynchronous event to update the media element's ready state.
void UpdateReadyState();
@ -330,8 +330,7 @@ protected:
// hardware. This ensures that the playback position advances smoothly, and
// guarantees that we don't try to allocate an impossibly large chunk of
// memory in order to play back silence. Called on the audio thread.
PRUint32 PlaySilence(PRUint32 aSamples,
PRUint32 aChannels,
PRUint32 PlaySilence(PRUint32 aSamples, PRUint32 aChannels,
PRUint64 aSampleOffset);
// Pops an audio chunk from the front of the audio queue, and pushes its
@ -389,9 +388,9 @@ protected:
return mStartTime + mCurrentFrameTime;
}
// Returns an upper bound on the number of microseconds of audio that is
// decoded and playable. This is the sum of the number of usecs of audio which
// is decoded and in the reader's audio queue, and the usecs of unplayed audio
// Returns an upper bound on the number of milliseconds of audio that is
// decoded and playable. This is the sum of the number of ms of audio which
// is decoded and in the reader's audio queue, and the ms of unplayed audio
// which has been pushed to the audio hardware for playback. Note that after
// calling this, the audio hardware may play some of the audio pushed to
// hardware, so this can only be used as a upper bound. The decoder monitor
@ -427,25 +426,25 @@ protected:
// playback position is therefore |Now() - mPlayStartTime +
// mPlayDuration|, which must be adjusted by mStartTime if used with media
// timestamps. Accessed only via the state machine thread.
PRInt64 mPlayDuration;
TimeDuration mPlayDuration;
// Time that buffering started. Used for buffering timeout and only
// accessed on the state machine thread. This is null while we're not
// buffering.
TimeStamp mBufferingStart;
// Start time of the media, in microseconds. This is the presentation
// Start time of the media, in milliseconds. This is the presentation
// time of the first sample decoded from the media, and is used to calculate
// duration and as a bounds for seeking. Accessed on state machine and
// main thread. Access controlled by decoder monitor.
PRInt64 mStartTime;
// Time of the last page in the media, in microseconds. This is the
// Time of the last page in the media, in milliseconds. This is the
// end time of the last sample in the media. Accessed on state
// machine and main thread. Access controlled by decoder monitor.
PRInt64 mEndTime;
// Position to seek to in microseconds when the seek state transition occurs.
// Position to seek to in milliseconds when the seek state transition occurs.
// The decoder monitor lock must be obtained before reading or writing
// this value. Accessed on main and state machine thread.
PRInt64 mSeekTime;
@ -460,25 +459,24 @@ protected:
// in the play state machine's destructor.
nsAutoPtr<nsBuiltinDecoderReader> mReader;
// The time of the current frame in microseconds. This is referenced from
// The time of the current frame in milliseconds. This is referenced from
// 0 which is the initial playback position. Set by the state machine
// thread, and read-only from the main thread to get the current
// time value. Synchronised via decoder monitor.
PRInt64 mCurrentFrameTime;
// The presentation time of the first audio sample that was played in
// microseconds. We can add this to the audio stream position to determine
// the current audio time. Accessed on audio and state machine thread.
// Synchronized by decoder monitor.
// The presentation time of the first audio sample that was played. We can
// add this to the audio stream position to determine the current audio time.
// Accessed on audio and state machine thread. Synchronized by decoder monitor.
PRInt64 mAudioStartTime;
// The end time of the last audio sample that's been pushed onto the audio
// hardware in microseconds. This will approximately be the end time of the
// audio stream, unless another sample is pushed to the hardware.
// hardware. This will approximately be the end time of the audio stream,
// unless another sample is pushed to the hardware.
PRInt64 mAudioEndTime;
// The presentation end time of the last video frame which has been displayed
// in microseconds. Accessed from the state machine thread.
// The presentation end time of the last video frame which has been displayed.
// Accessed from the state machine thread.
PRInt64 mVideoFrameEndTime;
// Volume of playback. 0.0 = muted. 1.0 = full volume. Read/Written

Просмотреть файл

@ -749,7 +749,7 @@ static PRInt32 GetMaxBlocks()
// to the pref are applied.
// Cache size is in KB
PRInt32 cacheSize = nsContentUtils::GetIntPref("media.cache_size", 500*1024);
PRInt64 maxBlocks = static_cast<PRInt64>(cacheSize)*1024/nsMediaCache::BLOCK_SIZE;
PRInt64 maxBlocks = PRInt64(cacheSize)*1024/nsMediaCache::BLOCK_SIZE;
maxBlocks = PR_MAX(maxBlocks, 1);
return PRInt32(PR_MIN(maxBlocks, PR_INT32_MAX));
}
@ -1041,7 +1041,7 @@ nsMediaCache::PredictNextUse(TimeStamp aNow, PRInt32 aBlock)
case PLAYED_BLOCK:
// This block should be managed in LRU mode, and we should impose
// a "replay delay" to reflect the likelihood of replay happening
NS_ASSERTION(static_cast<PRInt64>(bo->mStreamBlock)*BLOCK_SIZE <
NS_ASSERTION(PRInt64(bo->mStreamBlock)*BLOCK_SIZE <
bo->mStream->mStreamOffset,
"Played block after the current stream position?");
prediction = aNow - bo->mLastUseTime +
@ -1049,7 +1049,7 @@ nsMediaCache::PredictNextUse(TimeStamp aNow, PRInt32 aBlock)
break;
case READAHEAD_BLOCK: {
PRInt64 bytesAhead =
static_cast<PRInt64>(bo->mStreamBlock)*BLOCK_SIZE - bo->mStream->mStreamOffset;
PRInt64(bo->mStreamBlock)*BLOCK_SIZE - bo->mStream->mStreamOffset;
NS_ASSERTION(bytesAhead >= 0,
"Readahead block before the current stream position?");
PRInt64 millisecondsAhead =

Просмотреть файл

@ -280,10 +280,10 @@ public:
// Return the frame decode/paint related statistics.
FrameStatistics& GetFrameStatistics() { return mFrameStats; }
// Set the duration of the media resource in units of seconds.
// Set the duration of the media resource in units of milliseconds.
// This is called via a channel listener if it can pick up the duration
// from a content header. Must be called from the main thread only.
virtual void SetDuration(double aDuration) = 0;
virtual void SetDuration(PRInt64 aDuration) = 0;
// Set a flag indicating whether seeking is supported
virtual void SetSeekable(PRBool aSeekable) = 0;

Просмотреть файл

@ -224,7 +224,7 @@ nsMediaChannelStream::OnStartRequest(nsIRequest* aRequest)
if (NS_SUCCEEDED(rv)) {
double duration = durationText.ToDouble(&ec);
if (ec == NS_OK && duration >= 0) {
mDecoder->SetDuration(duration);
mDecoder->SetDuration(PRInt64(NS_round(duration*1000)));
}
}
}

Просмотреть файл

@ -107,7 +107,7 @@ public:
*aReliable = seconds >= 1.0;
if (seconds <= 0.0)
return 0.0;
return static_cast<double>(mAccumulatedBytes)/seconds;
return double(mAccumulatedBytes)/seconds;
}
double GetRate(TimeStamp aNow, PRPackedBool* aReliable) {
TimeDuration time = mAccumulatedTime;
@ -118,7 +118,7 @@ public:
*aReliable = seconds >= 3.0;
if (seconds <= 0.0)
return 0.0;
return static_cast<double>(mAccumulatedBytes)/seconds;
return double(mAccumulatedBytes)/seconds;
}
private:
PRInt64 mAccumulatedBytes;

Просмотреть файл

@ -126,6 +126,7 @@ nsTheoraState::nsTheoraState(ogg_page* aBosPage) :
nsOggCodecState(aBosPage),
mSetup(0),
mCtx(0),
mFrameDuration(0),
mPixelAspectRatio(0)
{
MOZ_COUNT_CTOR(nsTheoraState);
@ -145,9 +146,22 @@ PRBool nsTheoraState::Init() {
if (!mActive)
return PR_FALSE;
PRInt64 n = mInfo.aspect_numerator;
PRInt64 d = mInfo.aspect_denominator;
PRInt64 n = mInfo.fps_numerator;
PRInt64 d = mInfo.fps_denominator;
PRInt64 f;
if (!MulOverflow(1000, d, f)) {
return mActive = PR_FALSE;
}
f /= n;
if (f > PR_UINT32_MAX) {
return mActive = PR_FALSE;
}
mFrameDuration = static_cast<PRUint32>(f);
n = mInfo.aspect_numerator;
d = mInfo.aspect_denominator;
mPixelAspectRatio = (n == 0 || d == 0) ?
1.0f : static_cast<float>(n) / static_cast<float>(d);
@ -230,7 +244,7 @@ PRInt64 nsTheoraState::Time(th_info* aInfo, PRInt64 aGranulepos)
PRInt64 frameno = iframe + pframe - TH_VERSION_CHECK(aInfo, 3, 2, 1);
if (!AddOverflow(frameno, 1, t))
return -1;
if (!MulOverflow(t, USECS_PER_S, t))
if (!MulOverflow(t, 1000, t))
return -1;
if (!MulOverflow(t, aInfo->fps_denominator, t))
return -1;
@ -243,7 +257,7 @@ PRInt64 nsTheoraState::StartTime(PRInt64 granulepos) {
}
PRInt64 t = 0;
PRInt64 frameno = th_granule_frame(mCtx, granulepos);
if (!MulOverflow(frameno, USECS_PER_S, t))
if (!MulOverflow(frameno, 1000, t))
return -1;
if (!MulOverflow(t, mInfo.fps_denominator, t))
return -1;
@ -253,22 +267,25 @@ PRInt64 nsTheoraState::StartTime(PRInt64 granulepos) {
PRInt64
nsTheoraState::MaxKeyframeOffset()
{
// Determine the maximum time in microseconds by which a key frame could
// Determine the maximum time in milliseconds by which a key frame could
// offset for the theora bitstream. Theora granulepos encode time as:
// ((key_frame_number << granule_shift) + frame_offset).
// Therefore the maximum possible time by which any frame could be offset
// from a keyframe is the duration of (1 << granule_shift) - 1) frames.
PRInt64 frameDuration;
// Max number of frames keyframe could possibly be offset.
PRInt64 keyframeDiff = (1 << mInfo.keyframe_granule_shift) - 1;
PRInt64 keyframeDiff;
// Length of frame in usecs.
PRInt64 shift = mInfo.keyframe_granule_shift;
// Max number of frames keyframe could possibly be offset.
keyframeDiff = (1 << shift) - 1;
// Length of frame in ms.
PRInt64 d = 0; // d will be 0 if multiplication overflows.
MulOverflow(USECS_PER_S, mInfo.fps_denominator, d);
MulOverflow(1000, mInfo.fps_denominator, d);
frameDuration = d / mInfo.fps_numerator;
// Total time in usecs keyframe can be offset from any given frame.
// Total time in ms keyframe can be offset from any given frame.
return frameDuration * keyframeDiff;
}
@ -373,7 +390,7 @@ PRInt64 nsVorbisState::Time(vorbis_info* aInfo, PRInt64 aGranulepos)
return -1;
}
PRInt64 t = 0;
MulOverflow(USECS_PER_S, aGranulepos, t);
MulOverflow(1000, aGranulepos, t);
return t / aInfo->rate;
}
@ -505,7 +522,7 @@ PRBool nsSkeletonState::DecodeIndex(ogg_packet* aPacket)
// Extract the start time.
n = LEInt64(p + INDEX_FIRST_NUMER_OFFSET);
PRInt64 t;
if (!MulOverflow(n, USECS_PER_S, t)) {
if (!MulOverflow(n, 1000, t)) {
return (mActive = PR_FALSE);
} else {
startTime = t / timeDenom;
@ -513,7 +530,7 @@ PRBool nsSkeletonState::DecodeIndex(ogg_packet* aPacket)
// Extract the end time.
n = LEInt64(p + INDEX_LAST_NUMER_OFFSET);
if (!MulOverflow(n, USECS_PER_S, t)) {
if (!MulOverflow(n, 1000, t)) {
return (mActive = PR_FALSE);
} else {
endTime = t / timeDenom;
@ -573,11 +590,11 @@ PRBool nsSkeletonState::DecodeIndex(ogg_packet* aPacket)
{
return (mActive = PR_FALSE);
}
PRInt64 timeUsecs = 0;
if (!MulOverflow(time, USECS_PER_S, timeUsecs))
PRInt64 timeMs = 0;
if (!MulOverflow(time, 1000, timeMs))
return mActive = PR_FALSE;
timeUsecs /= timeDenom;
keyPoints->Add(offset, timeUsecs);
timeMs /= timeDenom;
keyPoints->Add(offset, timeMs);
numKeyPointsRead++;
}
@ -696,7 +713,7 @@ PRBool nsSkeletonState::DecodeHeader(ogg_packet* aPacket)
// presentation time exists in all versions.
PRInt64 n = LEInt64(aPacket->packet + SKELETON_PRESENTATION_TIME_NUMERATOR_OFFSET);
PRInt64 d = LEInt64(aPacket->packet + SKELETON_PRESENTATION_TIME_DENOMINATOR_OFFSET);
mPresentationTime = d == 0 ? 0 : (static_cast<float>(n) / static_cast<float>(d)) * USECS_PER_S;
mPresentationTime = d == 0 ? 0 : (static_cast<float>(n) / static_cast<float>(d)) * 1000;
mVersion = SKELETON_VERSION(verMajor, verMinor);
if (mVersion < SKELETON_VERSION(4,0) ||

Просмотреть файл

@ -183,7 +183,7 @@ public:
virtual PRInt64 StartTime(PRInt64 granulepos);
virtual PRBool Init();
// Returns the maximum number of microseconds which a keyframe can be offset
// Returns the maximum number of milliseconds which a keyframe can be offset
// from any given interframe.
PRInt64 MaxKeyframeOffset();
@ -195,6 +195,9 @@ public:
th_setup_info *mSetup;
th_dec_ctx* mCtx;
// Frame duration in ms.
PRUint32 mFrameDuration;
float mPixelAspectRatio;
};
@ -230,7 +233,7 @@ public:
// Offset from start of segment/link-in-the-chain in bytes.
PRInt64 mOffset;
// Presentation time in usecs.
// Presentation time in ms.
PRInt64 mTime;
PRBool IsNull() {
@ -317,10 +320,10 @@ private:
return mKeyPoints.Length();
}
// Presentation time of the first sample in this stream in usecs.
// Presentation time of the first sample in this stream in ms.
const PRInt64 mStartTime;
// End time of the last sample in this stream in usecs.
// End time of the last sample in this stream in ms.
const PRInt64 mEndTime;
private:

Просмотреть файл

@ -68,15 +68,15 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
// position, we'll just decode forwards rather than performing a bisection
// search. If we have Theora video we use the maximum keyframe interval as
// this value, rather than SEEK_DECODE_MARGIN. This makes small seeks faster.
#define SEEK_DECODE_MARGIN 2000000
#define SEEK_DECODE_MARGIN 2000
// The number of microseconds of "fuzz" we use in a bisection search over
// The number of milliseconds of "fuzz" we use in a bisection search over
// HTTP. When we're seeking with fuzz, we'll stop the search if a bisection
// lands between the seek target and SEEK_FUZZ_USECS microseconds before the
// lands between the seek target and SEEK_FUZZ_MS milliseconds before the
// seek target. This is becaue it's usually quicker to just keep downloading
// from an exisiting connection than to do another bisection inside that
// small range, which would open a new HTTP connetion.
#define SEEK_FUZZ_USECS 500000
#define SEEK_FUZZ_MS 500
enum PageSyncResult {
PAGE_SYNC_ERROR = 1,
@ -390,7 +390,8 @@ nsresult nsOggReader::DecodeVorbis(nsTArray<nsAutoPtr<SoundData> >& aChunks,
}
PRInt64 duration = mVorbisState->Time((PRInt64)samples);
PRInt64 startTime = mVorbisState->Time(mVorbisGranulepos);
PRInt64 startTime = (mVorbisGranulepos != -1) ?
mVorbisState->Time(mVorbisGranulepos) : -1;
SoundData* s = new SoundData(mPageOffset,
startTime,
duration,
@ -543,8 +544,9 @@ nsresult nsOggReader::DecodeTheora(nsTArray<nsAutoPtr<VideoData> >& aFrames,
if (ret != 0 && ret != TH_DUPFRAME) {
return NS_ERROR_FAILURE;
}
PRInt64 time = mTheoraState->StartTime(aPacket->granulepos);
PRInt64 endTime = mTheoraState->Time(aPacket->granulepos);
PRInt64 time = (aPacket->granulepos != -1)
? mTheoraState->StartTime(aPacket->granulepos) : -1;
PRInt64 endTime = time != -1 ? time + mTheoraState->mFrameDuration : -1;
if (ret == TH_DUPFRAME) {
VideoData* v = VideoData::CreateDuplicate(mPageOffset,
time,
@ -702,7 +704,7 @@ PRBool nsOggReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
"Granulepos calculation is incorrect!");
frames[i]->mTime = mTheoraState->StartTime(granulepos);
frames[i]->mEndTime = mTheoraState->Time(granulepos);
frames[i]->mEndTime = frames[i]->mTime + mTheoraState->mFrameDuration;
NS_ASSERTION(frames[i]->mEndTime >= frames[i]->mTime, "Frame must start before it ends.");
frames[i]->mTimecode = granulepos;
}
@ -1255,7 +1257,7 @@ nsresult nsOggReader::SeekInBufferedRange(PRInt64 aTarget,
aStartTime,
aEndTime,
PR_FALSE);
res = SeekBisection(keyframeTime, k, SEEK_FUZZ_USECS);
res = SeekBisection(keyframeTime, k, SEEK_FUZZ_MS);
NS_ASSERTION(mTheoraGranulepos == -1, "SeekBisection must reset Theora decode");
NS_ASSERTION(mVorbisGranulepos == -1, "SeekBisection must reset Vorbis decode");
}
@ -1281,7 +1283,7 @@ nsresult nsOggReader::SeekInUnbuffered(PRInt64 aTarget,
LOG(PR_LOG_DEBUG, ("%p Seeking in unbuffered data to %lldms using bisection search", mDecoder, aTarget));
// If we've got an active Theora bitstream, determine the maximum possible
// time in usecs which a keyframe could be before a given interframe. We
// time in ms which a keyframe could be before a given interframe. We
// subtract this from our seek target, seek to the new target, and then
// will decode forward to the original seek target. We should encounter a
// keyframe in that interval. This prevents us from needing to run two
@ -1300,7 +1302,7 @@ nsresult nsOggReader::SeekInUnbuffered(PRInt64 aTarget,
// Minimize the bisection search space using the known timestamps from the
// buffered ranges.
SeekRange k = SelectSeekRange(aRanges, seekTarget, aStartTime, aEndTime, PR_FALSE);
nsresult res = SeekBisection(seekTarget, k, SEEK_FUZZ_USECS);
nsresult res = SeekBisection(seekTarget, k, SEEK_FUZZ_MS);
NS_ASSERTION(mTheoraGranulepos == -1, "SeekBisection must reset Theora decode");
NS_ASSERTION(mVorbisGranulepos == -1, "SeekBisection must reset Vorbis decode");
return res;
@ -1784,8 +1786,9 @@ nsresult nsOggReader::GetBuffered(nsTimeRanges* aBuffered, PRInt64 aStartTime)
// find an end time.
PRInt64 endTime = FindEndTime(startOffset, endOffset, PR_TRUE, &state);
if (endTime != -1) {
aBuffered->Add(startTime / static_cast<double>(USECS_PER_S),
(endTime - aStartTime) / static_cast<double>(USECS_PER_S));
endTime -= aStartTime;
aBuffered->Add(static_cast<double>(startTime) / 1000.0,
static_cast<double>(endTime) / 1000.0);
}
}
}

Просмотреть файл

@ -104,8 +104,8 @@ private:
// Returns PR_TRUE if we should decode up to the seek target rather than
// seeking to the target using a bisection search or index-assisted seek.
// We should do this if the seek target (aTarget, in usecs), lies not too far
// ahead of the current playback position (aCurrentTime, in usecs).
// We should do this if the seek target (aTarget, in ms), lies not too far
// ahead of the current playback position (aCurrentTime, in ms).
PRBool CanDecodeToTarget(PRInt64 aTarget,
PRInt64 aCurrentTime);
@ -152,10 +152,10 @@ private:
}
PRInt64 mOffsetStart, mOffsetEnd; // in bytes.
PRInt64 mTimeStart, mTimeEnd; // in usecs.
PRInt64 mTimeStart, mTimeEnd; // in ms.
};
// Seeks to aTarget usecs in the buffered range aRange using bisection search,
// Seeks to aTarget ms in the buffered range aRange using bisection search,
// or to the keyframe prior to aTarget if we have video. aStartTime must be
// the presentation time at the start of media, and aEndTime the time at
// end of media. aRanges must be the time/byte ranges buffered in the media
@ -166,7 +166,7 @@ private:
const nsTArray<SeekRange>& aRanges,
const SeekRange& aRange);
// Seeks to before aTarget usecs in media using bisection search. If the media
// Seeks to before aTarget ms in media using bisection search. If the media
// has video, this will seek to before the keyframe required to render the
// media at aTarget. Will use aRanges in order to narrow the bisection
// search space. aStartTime must be the presentation time at the start of
@ -208,11 +208,11 @@ private:
PRBool ReadOggPacket(nsOggCodecState* aCodecState, ogg_packet* aPacket);
// Performs a seek bisection to move the media stream's read cursor to the
// last ogg page boundary which has end time before aTarget usecs on both the
// last ogg page boundary which has end time before aTarget ms on both the
// Theora and Vorbis bitstreams. Limits its search to data inside aRange;
// i.e. it will only read inside of the aRange's start and end offsets.
// aFuzz is the number of usecs of leniency we'll allow; we'll terminate the
// seek when we land in the range (aTime - aFuzz, aTime) usecs.
// aFuzz is the number of ms of leniency we'll allow; we'll terminate the
// seek when we land in the range (aTime - aFuzz, aTime) ms.
nsresult SeekBisection(PRInt64 aTarget,
const SeekRange& aRange,
PRUint32 aFuzz);
@ -228,7 +228,7 @@ private:
nsresult GetSeekRanges(nsTArray<SeekRange>& aRanges);
// Returns the range in which you should perform a seek bisection if
// you wish to seek to aTarget usecs, given the known (buffered) byte ranges
// you wish to seek to aTarget ms, given the known (buffered) byte ranges
// in aRanges. If aExact is PR_TRUE, we only return an exact copy of a
// range in which aTarget lies, or a null range if aTarget isn't contained
// in any of the (buffered) ranges. Otherwise, when aExact is PR_FALSE,

Просмотреть файл

@ -129,7 +129,7 @@ nsresult nsRawReader::ReadMetadata(nsVideoInfo* aInfo)
if (length != -1) {
mozilla::MonitorAutoExit autoExitMonitor(mMonitor);
mozilla::MonitorAutoEnter autoMonitor(mDecoder->GetMonitor());
mDecoder->GetStateMachine()->SetDuration(USECS_PER_S *
mDecoder->GetStateMachine()->SetDuration(1000 *
(length - sizeof(nsRawVideoHeader)) /
(mFrameSize * mFrameRate));
}
@ -184,7 +184,7 @@ PRBool nsRawReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
if (!mFrameSize)
return PR_FALSE; // Metadata read failed. We should refuse to play.
PRInt64 currentFrameTime = USECS_PER_S * mCurrentFrame / mFrameRate;
PRInt64 currentFrameTime = 1000 * mCurrentFrame / mFrameRate;
PRUint32 length = mFrameSize - sizeof(nsRawPacketHeader);
nsAutoPtr<PRUint8> buffer(new PRUint8[length]);
@ -212,7 +212,7 @@ PRBool nsRawReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
break;
mCurrentFrame++;
currentFrameTime += static_cast<double>(USECS_PER_S) / mFrameRate;
currentFrameTime += 1000.0 / mFrameRate;
}
VideoData::YCbCrBuffer b;
@ -237,7 +237,7 @@ PRBool nsRawReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
mDecoder->GetImageContainer(),
-1,
currentFrameTime,
currentFrameTime + (USECS_PER_S / mFrameRate),
currentFrameTime + (1000 / mFrameRate),
b,
1, // In raw video every frame is a keyframe
-1);
@ -247,7 +247,7 @@ PRBool nsRawReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
mVideoQueue.Push(v);
mCurrentFrame++;
decoded++;
currentFrameTime += USECS_PER_S / mFrameRate;
currentFrameTime += 1000 / mFrameRate;
return PR_TRUE;
}
@ -264,7 +264,7 @@ nsresult nsRawReader::Seek(PRInt64 aTime, PRInt64 aStartTime, PRInt64 aEndTime,
PRUint32 frame = mCurrentFrame;
if (aTime >= UINT_MAX)
return NS_ERROR_FAILURE;
mCurrentFrame = aTime * mFrameRate / USECS_PER_S;
mCurrentFrame = aTime * mFrameRate / 1000;
PRUint32 offset;
if (!MulOverflow32(mCurrentFrame, mFrameSize, offset))

Просмотреть файл

@ -13,7 +13,7 @@
function on_metadataloaded() {
var v = document.getElementById('v');
var d = Math.round(v.duration*1000);
ok(d == 4000, "Checking duration: " + d);
ok(d == 3999, "Checking duration: " + d);
SimpleTest.finish();
}

Просмотреть файл

@ -171,8 +171,9 @@ nsresult nsWaveReader::ReadMetadata(nsVideoInfo* aInfo)
MonitorAutoExit exitReaderMon(mMonitor);
MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
mDecoder->GetStateMachine()->SetDuration(
static_cast<PRInt64>(BytesToTime(GetDataLength()) * USECS_PER_S));
float d = floorf(BytesToTime(GetDataLength() * 1000));
NS_ASSERTION(d <= PR_INT64_MAX, "Duration overflow");
mDecoder->GetStateMachine()->SetDuration(static_cast<PRInt64>(d));
return NS_OK;
}
@ -228,18 +229,16 @@ PRBool nsWaveReader::DecodeAudioData()
}
}
double posTime = BytesToTime(pos);
double readSizeTime = BytesToTime(readSize);
NS_ASSERTION(posTime <= PR_INT64_MAX / USECS_PER_S, "posTime overflow");
NS_ASSERTION(readSizeTime <= PR_INT64_MAX / USECS_PER_S, "readSizeTime overflow");
float posTime = BytesToTime(pos);
float readSizeTime = BytesToTime(readSize);
NS_ASSERTION(posTime <= PR_INT64_MAX / 1000, "posTime overflow");
NS_ASSERTION(readSizeTime <= PR_INT64_MAX / 1000, "readSizeTime overflow");
NS_ASSERTION(samples < PR_INT32_MAX, "samples overflow");
mAudioQueue.Push(new SoundData(pos,
static_cast<PRInt64>(posTime * USECS_PER_S),
static_cast<PRInt64>(readSizeTime * USECS_PER_S),
mAudioQueue.Push(new SoundData(pos, static_cast<PRInt64>(posTime * 1000),
static_cast<PRInt64>(readSizeTime * 1000),
static_cast<PRInt32>(samples),
sampleBuffer.forget(),
mChannels));
sampleBuffer.forget(), mChannels));
return PR_TRUE;
}
@ -259,15 +258,15 @@ nsresult nsWaveReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTim
MonitorAutoEnter mon(mMonitor);
NS_ASSERTION(mDecoder->OnStateMachineThread(),
"Should be on state machine thread.");
LOG(PR_LOG_DEBUG, ("%p About to seek to %lld", mDecoder, aTarget));
LOG(PR_LOG_DEBUG, ("%p About to seek to %lldms", mDecoder, aTarget));
if (NS_FAILED(ResetDecode())) {
return NS_ERROR_FAILURE;
}
double d = BytesToTime(GetDataLength());
NS_ASSERTION(d < PR_INT64_MAX / USECS_PER_S, "Duration overflow");
PRInt64 duration = static_cast<PRInt64>(d * USECS_PER_S);
double seekTime = NS_MIN(aTarget, duration) / static_cast<double>(USECS_PER_S);
PRInt64 position = RoundDownToSample(static_cast<PRInt64>(TimeToBytes(seekTime)));
float d = BytesToTime(GetDataLength());
NS_ASSERTION(d < PR_INT64_MAX / 1000, "Duration overflow");
PRInt64 duration = static_cast<PRInt64>(d) * 1000;
PRInt64 seekTime = NS_MIN(aTarget, duration);
PRInt64 position = RoundDownToSample(static_cast<PRInt64>(TimeToBytes(seekTime) / 1000.f));
NS_ASSERTION(PR_INT64_MAX - mWavePCMOffset > position, "Integer overflow during wave seek");
position += mWavePCMOffset;
return mDecoder->GetCurrentStream()->Seek(nsISeekableStream::NS_SEEK_SET, position);
@ -282,8 +281,8 @@ nsresult nsWaveReader::GetBuffered(nsTimeRanges* aBuffered, PRInt64 aStartTime)
NS_ASSERTION(startOffset >= mWavePCMOffset, "Integer underflow in GetBuffered");
NS_ASSERTION(endOffset >= mWavePCMOffset, "Integer underflow in GetBuffered");
aBuffered->Add(BytesToTime(startOffset - mWavePCMOffset),
BytesToTime(endOffset - mWavePCMOffset));
aBuffered->Add(floorf(BytesToTime(startOffset - mWavePCMOffset) * 1000.f) / 1000.0,
floorf(BytesToTime(endOffset - mWavePCMOffset) * 1000.f) / 1000.0);
startOffset = mDecoder->GetCurrentStream()->GetNextCachedData(endOffset);
}
return NS_OK;
@ -509,7 +508,7 @@ nsWaveReader::FindDataOffset()
return PR_TRUE;
}
double
float
nsWaveReader::BytesToTime(PRInt64 aBytes) const
{
NS_ABORT_IF_FALSE(aBytes >= 0, "Must be >= 0");
@ -517,7 +516,7 @@ nsWaveReader::BytesToTime(PRInt64 aBytes) const
}
PRInt64
nsWaveReader::TimeToBytes(double aTime) const
nsWaveReader::TimeToBytes(float aTime) const
{
NS_ABORT_IF_FALSE(aTime >= 0.0f, "Must be >= 0");
return RoundDownToSample(PRInt64(aTime * mSampleRate * mSampleSize));

Просмотреть файл

@ -76,13 +76,13 @@ private:
// Returns the number of seconds that aBytes represents based on the
// current audio parameters. e.g. 176400 bytes is 1 second at 16-bit
// stereo 44.1kHz. The time is rounded to the nearest microsecond.
double BytesToTime(PRInt64 aBytes) const;
// stereo 44.1kHz. The time is rounded to the nearest millisecond.
float BytesToTime(PRInt64 aBytes) const;
// Returns the number of bytes that aTime represents based on the current
// audio parameters. e.g. 1 second is 176400 bytes at 16-bit stereo
// 44.1kHz.
PRInt64 TimeToBytes(double aTime) const;
PRInt64 TimeToBytes(float aTime) const;
// Rounds aBytes down to the nearest complete sample. Assumes beginning
// of byte range is already sample aligned by caller.

Просмотреть файл

@ -44,6 +44,7 @@
using mozilla::MonitorAutoEnter;
static const double NS_PER_S = 1e9;
static const double MS_PER_S = 1e3;
static PRUint32
VIntLength(unsigned char aFirstByte, PRUint32* aMask)

Просмотреть файл

@ -65,13 +65,14 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
#define SEEK_LOG(type, msg)
#endif
static const unsigned NS_PER_USEC = 1000;
static const unsigned NS_PER_MS = 1000000;
static const double NS_PER_S = 1e9;
static const double MS_PER_S = 1e3;
// If a seek request is within SEEK_DECODE_MARGIN microseconds of the
// If a seek request is within SEEK_DECODE_MARGIN milliseconds of the
// current time, decode ahead from the current frame rather than performing
// a full seek.
static const int SEEK_DECODE_MARGIN = 250000;
static const int SEEK_DECODE_MARGIN = 250;
NS_SPECIALIZE_TEMPLATE
class nsAutoRefTraits<NesteggPacketHolder> : public nsPointerRefTraits<NesteggPacketHolder>
@ -135,7 +136,7 @@ nsWebMReader::nsWebMReader(nsBuiltinDecoder* aDecoder)
mChannels(0),
mVideoTrack(0),
mAudioTrack(0),
mAudioStartUsec(-1),
mAudioStartMs(-1),
mAudioSamples(0),
mHasVideo(PR_FALSE),
mHasAudio(PR_FALSE)
@ -183,7 +184,7 @@ nsresult nsWebMReader::Init(nsBuiltinDecoderReader* aCloneDonor)
nsresult nsWebMReader::ResetDecode()
{
mAudioSamples = 0;
mAudioStartUsec = -1;
mAudioStartMs = -1;
nsresult res = NS_OK;
if (NS_FAILED(nsBuiltinDecoderReader::ResetDecode())) {
res = NS_ERROR_FAILURE;
@ -228,7 +229,7 @@ nsresult nsWebMReader::ReadMetadata(nsVideoInfo* aInfo)
if (r == 0) {
MonitorAutoExit exitReaderMon(mMonitor);
MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
mDecoder->GetStateMachine()->SetDuration(duration / NS_PER_USEC);
mDecoder->GetStateMachine()->SetDuration(duration / NS_PER_MS);
}
unsigned int ntracks = 0;
@ -434,23 +435,23 @@ PRBool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
}
const PRUint32 rate = mVorbisDsp.vi->rate;
PRUint64 tstamp_usecs = tstamp / NS_PER_USEC;
if (mAudioStartUsec == -1) {
PRUint64 tstamp_ms = tstamp / NS_PER_MS;
if (mAudioStartMs == -1) {
// This is the first audio chunk. Assume the start time of our decode
// is the start of this chunk.
mAudioStartUsec = tstamp_usecs;
mAudioStartMs = tstamp_ms;
}
// If there's a gap between the start of this sound chunk and the end of
// the previous sound chunk, we need to increment the packet count so that
// the vorbis decode doesn't use data from before the gap to help decode
// from after the gap.
PRInt64 tstamp_samples = 0;
if (!UsecsToSamples(tstamp_usecs, rate, tstamp_samples)) {
if (!MsToSamples(tstamp_ms, rate, tstamp_samples)) {
NS_WARNING("Int overflow converting WebM timestamp to samples");
return PR_FALSE;
}
PRInt64 decoded_samples = 0;
if (!UsecsToSamples(mAudioStartUsec, rate, decoded_samples)) {
if (!MsToSamples(mAudioStartMs, rate, decoded_samples)) {
NS_WARNING("Int overflow converting WebM start time to samples");
return PR_FALSE;
}
@ -460,13 +461,13 @@ PRBool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
}
if (tstamp_samples > decoded_samples) {
#ifdef DEBUG
PRInt64 usecs = 0;
LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lld, %lld samples, in audio stream\n",
SamplesToUsecs(tstamp_samples - decoded_samples, rate, usecs) ? usecs: -1,
PRInt64 ms = 0;
LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lldms, %lld samples, in audio stream\n",
SamplesToMs(tstamp_samples - decoded_samples, rate, ms) ? ms: -1,
tstamp_samples - decoded_samples));
#endif
mPacketCount++;
mAudioStartUsec = tstamp_usecs;
mAudioStartMs = tstamp_ms;
mAudioSamples = 0;
}
@ -502,17 +503,17 @@ PRBool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
}
PRInt64 duration = 0;
if (!SamplesToUsecs(samples, rate, duration)) {
if (!SamplesToMs(samples, rate, duration)) {
NS_WARNING("Int overflow converting WebM audio duration");
return PR_FALSE;
}
PRInt64 total_duration = 0;
if (!SamplesToUsecs(total_samples, rate, total_duration)) {
if (!SamplesToMs(total_samples, rate, total_duration)) {
NS_WARNING("Int overflow converting WebM audio total_duration");
return PR_FALSE;
}
PRInt64 time = tstamp_usecs + total_duration;
PRInt64 time = tstamp_ms + total_duration;
total_samples += samples;
SoundData* s = new SoundData(aOffset,
time,
@ -667,11 +668,11 @@ PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
if (endTime == -1) {
return PR_FALSE;
}
next_tstamp = endTime * NS_PER_USEC;
next_tstamp = endTime * NS_PER_MS;
}
}
PRInt64 tstamp_usecs = tstamp / NS_PER_USEC;
PRInt64 tstamp_ms = tstamp / NS_PER_MS;
for (PRUint32 i = 0; i < count; ++i) {
unsigned char* data;
size_t length;
@ -684,7 +685,7 @@ PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
memset(&si, 0, sizeof(si));
si.sz = sizeof(si);
vpx_codec_peek_stream_info(&vpx_codec_vp8_dx_algo, data, length, &si);
if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) {
if (aKeyframeSkip && (!si.is_kf || tstamp_ms < aTimeThreshold)) {
// Skipping to next keyframe...
parsed++; // Assume 1 frame per chunk.
continue;
@ -701,7 +702,7 @@ PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
// If the timestamp of the video frame is less than
// the time threshold required then it is not added
// to the video queue and won't be displayed.
if (tstamp_usecs < aTimeThreshold) {
if (tstamp_ms < aTimeThreshold) {
parsed++; // Assume 1 frame per chunk.
continue;
}
@ -732,8 +733,8 @@ PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
VideoData *v = VideoData::Create(mInfo,
mDecoder->GetImageContainer(),
holder->mOffset,
tstamp_usecs,
next_tstamp / NS_PER_USEC,
tstamp_ms,
next_tstamp / NS_PER_MS,
b,
si.is_kf,
-1);
@ -773,7 +774,7 @@ nsresult nsWebMReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTim
return NS_ERROR_FAILURE;
}
PRUint32 trackToSeek = mHasVideo ? mVideoTrack : mAudioTrack;
int r = nestegg_track_seek(mContext, trackToSeek, aTarget * NS_PER_USEC);
int r = nestegg_track_seek(mContext, trackToSeek, aTarget * NS_PER_MS);
if (r != 0) {
return NS_ERROR_FAILURE;
}
@ -802,7 +803,7 @@ nsresult nsWebMReader::GetBuffered(nsTimeRanges* aBuffered, PRInt64 aStartTime)
nsresult res = stream->GetCachedRanges(ranges);
NS_ENSURE_SUCCESS(res, res);
PRInt64 startTimeOffsetNS = aStartTime * NS_PER_USEC;
PRInt64 startTimeOffsetNS = aStartTime * NS_PER_MS;
for (PRUint32 index = 0; index < ranges.Length(); index++) {
mBufferedState->CalculateBufferedForRange(aBuffered,
ranges[index].mStart,

Просмотреть файл

@ -192,8 +192,8 @@ private:
// Returns PR_TRUE if we should decode up to the seek target rather than
// seeking to the target using an index-assisted seek. We should do this
// if the seek target (aTarget, in usecs), lies not too far ahead of the
// current playback position (aCurrentTime, in usecs).
// if the seek target (aTarget, in ms), lies not too far ahead of the
// current playback position (aCurrentTime, in ms).
PRBool CanDecodeToTarget(PRInt64 aTarget, PRInt64 aCurrentTime);
private:
@ -221,8 +221,8 @@ private:
PRUint32 mVideoTrack;
PRUint32 mAudioTrack;
// Time in microseconds of the start of the first audio sample we've decoded.
PRInt64 mAudioStartUsec;
// Time in ms of the start of the first audio sample we've decoded.
PRInt64 mAudioStartMs;
// Number of samples we've decoded since decoding began at mAudioStartMs.
PRUint64 mAudioSamples;