Bug 1179094: Use TimeUnit in PlatformDecoderModule. r=cpearce

This commit is contained in:
Jean-Yves Avenard 2015-07-01 16:50:27 +10:00
Родитель f41604e20c
Коммит c7b6fa505d
17 изменённых файлов: 128 добавлений и 92 удалений

Просмотреть файл

@ -119,6 +119,14 @@ public:
return TimeUnit(INT64_MAX);
}
static TimeUnit Invalid() {
TimeUnit ret;
ret.mValue = CheckedInt64(INT64_MAX);
// Force an overflow to render the CheckedInt invalid.
ret.mValue += 1;
return ret;
}
int64_t ToMicroseconds() const {
return mValue.value();
}

Просмотреть файл

@ -29,6 +29,10 @@ CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate) {
return (CheckedInt64(aFrames) * USECS_PER_S) / aRate;
}
media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate) {
return (media::TimeUnit::FromMicroseconds(aFrames) * USECS_PER_S) / aRate;
}
// Converts from microseconds to number of audio frames, given the specified
// audio rate.
CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {

Просмотреть файл

@ -127,10 +127,11 @@ media::TimeIntervals GetEstimatedBufferedTimeRanges(mozilla::MediaResource* aStr
int64_t aDurationUsecs);
// Converts from number of audio frames (aFrames) to microseconds, given
// the specified audio rate (aRate). Stores result in aOutUsecs. Returns true
// if the operation succeeded, or false if there was an integer overflow
// while calulating the conversion.
// the specified audio rate (aRate).
CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate);
// Converts from number of audio frames (aFrames) TimeUnit, given
// the specified audio rate (aRate).
media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate);
// Converts from microseconds (aUsecs) to number of audio frames, given the
// specified audio rate (aRate). Stores the result in aOutFrames. Returns

Просмотреть файл

@ -423,7 +423,7 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo,
}
// Get the duration, and report it to the decoder if we have it.
Microseconds duration;
mp4_demuxer::Microseconds duration;
{
MonitorAutoLock lock(mDemuxerMonitor);
duration = mDemuxer->Duration();
@ -561,7 +561,7 @@ MP4Reader::GetDecoderData(TrackType aTrack)
return mVideo;
}
Microseconds
mp4_demuxer::Microseconds
MP4Reader::GetNextKeyframeTime()
{
MonitorAutoLock mon(mDemuxerMonitor);
@ -596,7 +596,7 @@ MP4Reader::ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold)
// if the time threshold (the current playback position) is after the next
// keyframe in the stream. This means we'll only skip frames that we have
// no hope of ever playing.
Microseconds nextKeyframe = -1;
mp4_demuxer::Microseconds nextKeyframe = -1;
if (!sDemuxSkipToNextKeyframe ||
(nextKeyframe = GetNextKeyframeTime()) == -1) {
return aSkipToNextKeyframe;
@ -1090,7 +1090,7 @@ MP4Reader::GetBuffered()
nsresult rv = resource->GetCachedRanges(ranges);
if (NS_SUCCEEDED(rv)) {
nsTArray<Interval<Microseconds>> timeRanges;
nsTArray<Interval<mp4_demuxer::Microseconds>> timeRanges;
mDemuxer->ConvertByteRangesToTime(ranges, &timeRanges);
for (size_t i = 0; i < timeRanges.Length(); i++) {
buffered += media::TimeInterval(

Просмотреть файл

@ -118,7 +118,7 @@ private:
bool IsSupportedVideoMimeType(const nsACString& aMimeType);
virtual bool IsWaitingOnCDMResource() override;
Microseconds GetNextKeyframeTime();
mp4_demuxer::Microseconds GetNextKeyframeTime();
bool ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold);
size_t SizeOfQueue(TrackType aTrack);

Просмотреть файл

@ -27,7 +27,6 @@ class MediaDataDecoder;
class MediaDataDecoderCallback;
class FlushableMediaTaskQueue;
class CDMProxy;
typedef int64_t Microseconds;
// The PlatformDecoderModule interface is used by the MP4Reader to abstract
// access to the H264 and Audio (AAC/MP3) decoders provided by various platforms.

Просмотреть файл

@ -13,6 +13,7 @@
#include "ImageContainer.h"
#include "MediaInfo.h"
#include "MediaTaskQueue.h"
#include "TimeUnits.h"
namespace mozilla {
@ -51,9 +52,10 @@ public:
}
NS_IMETHOD Run() override
{
nsRefPtr<MediaData> data = mCreator->Create(mSample->mTime,
mSample->mDuration,
mSample->mOffset);
nsRefPtr<MediaData> data =
mCreator->Create(media::TimeUnit::FromMicroseconds(mSample->mTime),
media::TimeUnit::FromMicroseconds(mSample->mDuration),
mSample->mOffset);
mCallback->Output(data);
return NS_OK;
}
@ -103,7 +105,7 @@ public:
}
already_AddRefed<MediaData>
Create(Microseconds aDTS, Microseconds aDuration, int64_t aOffsetInStream)
Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
{
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
@ -141,11 +143,11 @@ public:
mImageContainer,
nullptr,
aOffsetInStream,
aDTS,
aDuration,
aDTS.ToMicroseconds(),
aDuration.ToMicroseconds(),
buffer,
true,
aDTS,
aDTS.ToMicroseconds(),
mPicture);
}
private:
@ -164,13 +166,14 @@ public:
{
}
MediaData* Create(Microseconds aDTS,
Microseconds aDuration,
MediaData* Create(const media::TimeUnit& aDTS,
const media::TimeUnit& aDuration,
int64_t aOffsetInStream)
{
// Convert duration to frames. We add 1 to duration to account for
// rounding errors, so we get a consistent tone.
CheckedInt64 frames = UsecsToFrames(aDuration+1, mSampleRate);
CheckedInt64 frames =
UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
if (!frames.isValid() ||
!mChannelCount ||
!mSampleRate ||
@ -189,8 +192,8 @@ public:
mFrameSum++;
}
return new AudioData(aOffsetInStream,
aDTS,
aDuration,
aDTS.ToMicroseconds(),
aDuration.ToMicroseconds(),
uint32_t(frames.value()),
samples,
mChannelCount,

Просмотреть файл

@ -105,7 +105,8 @@ public:
return eglImage;
}
virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat, Microseconds aDuration) override {
virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
const media::TimeUnit& aDuration) override {
if (!EnsureGLContext()) {
return NS_ERROR_FAILURE;
}
@ -168,7 +169,7 @@ public:
mImageContainer,
offset,
presentationTimeUs,
aDuration,
aDuration.ToMicroseconds(),
img,
isSync,
presentationTimeUs,
@ -213,7 +214,9 @@ public:
}
}
nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, Microseconds aDuration) {
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
MediaFormat::Param aFormat,
const media::TimeUnit& aDuration) {
// The output on Android is always 16-bit signed
nsresult rv;
@ -239,7 +242,7 @@ public:
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
aDuration,
aDuration.ToMicroseconds(),
numFrames,
audio,
numChannels,
@ -485,7 +488,7 @@ void MediaCodecDataDecoder::DecoderLoop()
sample->mTime, 0);
HANDLE_DECODER_ERROR();
mDurations.push(sample->mDuration);
mDurations.push(media::TimeUnit::FromMicroseconds(sample->mDuration));
sample = nullptr;
outputDone = false;
}
@ -543,7 +546,7 @@ void MediaCodecDataDecoder::DecoderLoop()
MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");
Microseconds duration = 0;
media::TimeUnit duration;
if (!mDurations.empty()) {
duration = mDurations.front();
mDurations.pop();

Просмотреть файл

@ -9,6 +9,7 @@
#include "AndroidSurfaceTexture.h"
#include "MediaCodec.h"
#include "TimeUnits.h"
#include "mozilla/Monitor.h"
#include <queue>
@ -81,12 +82,13 @@ protected:
bool mStopping;
SampleQueue mQueue;
std::queue<Microseconds> mDurations;
// Durations are stored in microseconds.
std::queue<media::TimeUnit> mDurations;
virtual nsresult InitDecoder(widget::sdk::Surface::Param aSurface);
virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
virtual void Cleanup() {};
nsresult ResetInputBuffers();

Просмотреть файл

@ -261,8 +261,8 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
size_t numFrames = outputData.Length() / channels;
int rate = mOutputFormat.mSampleRate;
CheckedInt<Microseconds> duration = FramesToUsecs(numFrames, rate);
if (!duration.isValid()) {
media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
if (!duration.IsValid()) {
NS_WARNING("Invalid count of accumulated audio samples");
return NS_ERROR_FAILURE;
}
@ -270,14 +270,14 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
#ifdef LOG_SAMPLE_DECODE
LOG("pushed audio at time %lfs; duration %lfs\n",
(double)aSample->mTime / USECS_PER_S,
(double)duration.value() / USECS_PER_S);
duration.ToSeconds());
#endif
nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
PodCopy(data.get(), &outputData[0], outputData.Length());
nsRefPtr<AudioData> audio = new AudioData(aSample->mOffset,
aSample->mTime,
duration.value(),
duration.ToMicroseconds(),
numFrames,
data.forget(),
channels,

Просмотреть файл

@ -195,9 +195,9 @@ PlatformCallback(void* decompressionOutputRefCon,
AutoCFRelease<CFNumberRef> kfref =
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_KEYFRAME"));
Microseconds dts;
Microseconds pts;
Microseconds duration;
int64_t dts;
int64_t pts;
int64_t duration;
int64_t byte_offset;
char is_sync_point;
@ -208,11 +208,12 @@ PlatformCallback(void* decompressionOutputRefCon,
CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point);
nsAutoPtr<AppleVDADecoder::AppleFrameRef> frameRef(
new AppleVDADecoder::AppleFrameRef(dts,
pts,
duration,
byte_offset,
is_sync_point == 1));
new AppleVDADecoder::AppleFrameRef(
media::TimeUnit::FromMicroseconds(dts),
media::TimeUnit::FromMicroseconds(pts),
media::TimeUnit::FromMicroseconds(duration),
byte_offset,
is_sync_point == 1));
// Forward the data back to an object method which can access
// the correct MP4Reader callback.
@ -252,9 +253,9 @@ AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
aFrameRef->byte_offset,
aFrameRef->decode_timestamp,
aFrameRef->composition_timestamp,
aFrameRef->duration,
aFrameRef->decode_timestamp.ToMicroseconds(),
aFrameRef->composition_timestamp.ToMicroseconds(),
aFrameRef->duration.ToMicroseconds(),
aFrameRef->is_sync_point ? " keyframe" : ""
);
@ -277,10 +278,11 @@ AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
data = VideoData::CreateFromImage(info,
mImageContainer,
aFrameRef->byte_offset,
aFrameRef->composition_timestamp,
aFrameRef->duration, image.forget(),
aFrameRef->composition_timestamp.ToMicroseconds(),
aFrameRef->duration.ToMicroseconds(),
image.forget(),
aFrameRef->is_sync_point,
aFrameRef->decode_timestamp,
aFrameRef->decode_timestamp.ToMicroseconds(),
visible);
if (!data) {

Просмотреть файл

@ -13,6 +13,7 @@
#include "MP4Decoder.h"
#include "nsIThread.h"
#include "ReorderQueue.h"
#include "TimeUnits.h"
#include "VideoDecodeAcceleration/VDADecoder.h"
@ -28,24 +29,24 @@ class AppleVDADecoder : public MediaDataDecoder {
public:
class AppleFrameRef {
public:
Microseconds decode_timestamp;
Microseconds composition_timestamp;
Microseconds duration;
media::TimeUnit decode_timestamp;
media::TimeUnit composition_timestamp;
media::TimeUnit duration;
int64_t byte_offset;
bool is_sync_point;
explicit AppleFrameRef(const MediaRawData& aSample)
: decode_timestamp(aSample.mTimecode)
, composition_timestamp(aSample.mTime)
, duration(aSample.mDuration)
: decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
, duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
, byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe)
{
}
AppleFrameRef(Microseconds aDts,
Microseconds aPts,
Microseconds aDuration,
AppleFrameRef(const media::TimeUnit& aDts,
const media::TimeUnit& aPts,
const media::TimeUnit& aDuration,
int64_t aByte_offset,
bool aIs_sync_point)
: decode_timestamp(aDts)

Просмотреть файл

@ -8,6 +8,7 @@
#include "FFmpegRuntimeLinker.h"
#include "FFmpegAudioDecoder.h"
#include "TimeUnits.h"
#define MAX_CHANNELS 16
@ -97,7 +98,7 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample)
}
int64_t samplePosition = aSample->mOffset;
Microseconds pts = aSample->mTime;
media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
while (packet.size > 0) {
int decoded;
@ -117,23 +118,28 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample)
nsAutoArrayPtr<AudioDataValue> audio(
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));
CheckedInt<Microseconds> duration =
FramesToUsecs(mFrame->nb_samples, samplingRate);
if (!duration.isValid()) {
media::TimeUnit duration =
FramesToTimeUnit(mFrame->nb_samples, samplingRate);
if (!duration.IsValid()) {
NS_WARNING("Invalid count of accumulated audio samples");
mCallback->Error();
return;
}
nsRefPtr<AudioData> data = new AudioData(samplePosition,
pts,
duration.value(),
pts.ToMicroseconds(),
duration.ToMicroseconds(),
mFrame->nb_samples,
audio.forget(),
numChannels,
samplingRate);
mCallback->Output(data);
pts += duration.value();
pts += duration;
if (!pts.IsValid()) {
NS_WARNING("Invalid count of accumulated audio samples");
mCallback->Error();
return;
}
}
packet.data += bytesConsumed;
packet.size -= bytesConsumed;

Просмотреть файл

@ -9,6 +9,7 @@
#include "VideoUtils.h"
#include "WMFUtils.h"
#include "nsTArray.h"
#include "TimeUnits.h"
#include "mozilla/Logging.h"
@ -290,17 +291,18 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
buffer->Unlock();
CheckedInt64 timestamp = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, mAudioRate);
NS_ENSURE_TRUE(timestamp.isValid(), E_FAIL);
media::TimeUnit timestamp =
FramesToTimeUnit(mAudioFrameOffset + mAudioFrameSum, mAudioRate);
NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL);
mAudioFrameSum += numFrames;
CheckedInt64 duration = FramesToUsecs(numFrames, mAudioRate);
NS_ENSURE_TRUE(duration.isValid(), E_FAIL);
media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
aOutData = new AudioData(aStreamOffset,
timestamp.value(),
duration.value(),
timestamp.ToMicroseconds(),
duration.ToMicroseconds(),
numFrames,
audioData.forget(),
mAudioChannels,
@ -308,7 +310,7 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
timestamp, duration, currentLength);
timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
#endif
return S_OK;

Просмотреть файл

@ -70,23 +70,23 @@ MFOffsetToInt32(const MFOffset& aOffset)
return int32_t(aOffset.value + (aOffset.fract / 65536.0f));
}
int64_t
media::TimeUnit
GetSampleDuration(IMFSample* aSample)
{
NS_ENSURE_TRUE(aSample, -1);
NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
int64_t duration = 0;
aSample->GetSampleDuration(&duration);
return HNsToUsecs(duration);
return media::TimeUnit::FromMicroseconds(HNsToUsecs(duration));
}
int64_t
media::TimeUnit
GetSampleTime(IMFSample* aSample)
{
NS_ENSURE_TRUE(aSample, -1);
NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
LONGLONG timestampHns = 0;
HRESULT hr = aSample->GetSampleTime(&timestampHns);
NS_ENSURE_TRUE(SUCCEEDED(hr), -1);
return HNsToUsecs(timestampHns);
NS_ENSURE_TRUE(SUCCEEDED(hr), media::TimeUnit::Invalid());
return media::TimeUnit::FromMicroseconds(HNsToUsecs(timestampHns));
}
// Gets the sub-region of the video frame that should be displayed.

Просмотреть файл

@ -10,6 +10,7 @@
#include "WMF.h"
#include "nsString.h"
#include "nsRect.h"
#include "TimeUnits.h"
#include "VideoUtils.h"
// Various utilities shared by WMF backend files.
@ -46,14 +47,14 @@ MFOffsetToInt32(const MFOffset& aOffset);
HRESULT
GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion);
// Returns the duration of a IMFSample in microseconds.
// Returns -1 on failure.
int64_t
// Returns the duration of a IMFSample in TimeUnit.
// Returns media::TimeUnit::Invalid() on failure.
media::TimeUnit
GetSampleDuration(IMFSample* aSample);
// Returns the presentation time of a IMFSample in microseconds.
// Returns -1 on failure.
int64_t
// Returns the presentation time of a IMFSample in TimeUnit.
// Returns media::TimeUnit::Invalid() on failure.
media::TimeUnit
GetSampleTime(IMFSample* aSample);
inline bool

Просмотреть файл

@ -410,8 +410,10 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 0;
Microseconds pts = GetSampleTime(aSample);
Microseconds duration = GetSampleDuration(aSample);
media::TimeUnit pts = GetSampleTime(aSample);
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
media::TimeUnit duration = GetSampleDuration(aSample);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
nsRefPtr<layers::PlanarYCbCrImage> image =
new IMFYCbCrImage(buffer, twoDBuffer);
@ -426,8 +428,8 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
VideoData::CreateFromImage(mVideoInfo,
mImageContainer,
aStreamOffset,
std::max(0LL, pts),
duration,
pts.ToMicroseconds(),
duration.ToMicroseconds(),
image.forget(),
false,
-1,
@ -458,13 +460,15 @@ WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
NS_ENSURE_TRUE(image, E_FAIL);
Microseconds pts = GetSampleTime(aSample);
Microseconds duration = GetSampleDuration(aSample);
media::TimeUnit pts = GetSampleTime(aSample);
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
media::TimeUnit duration = GetSampleDuration(aSample);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
nsRefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
mImageContainer,
aStreamOffset,
pts,
duration,
pts.ToMicroseconds(),
duration.ToMicroseconds(),
image.forget(),
false,
-1,