зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1179094: Use TimeUnit in PlatformDecoderModule. r=cpearce
This commit is contained in:
Родитель
f41604e20c
Коммит
c7b6fa505d
|
@ -119,6 +119,14 @@ public:
|
||||||
return TimeUnit(INT64_MAX);
|
return TimeUnit(INT64_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static TimeUnit Invalid() {
|
||||||
|
TimeUnit ret;
|
||||||
|
ret.mValue = CheckedInt64(INT64_MAX);
|
||||||
|
// Force an overflow to render the CheckedInt invalid.
|
||||||
|
ret.mValue += 1;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int64_t ToMicroseconds() const {
|
int64_t ToMicroseconds() const {
|
||||||
return mValue.value();
|
return mValue.value();
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,10 @@ CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate) {
|
||||||
return (CheckedInt64(aFrames) * USECS_PER_S) / aRate;
|
return (CheckedInt64(aFrames) * USECS_PER_S) / aRate;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate) {
|
||||||
|
return (media::TimeUnit::FromMicroseconds(aFrames) * USECS_PER_S) / aRate;
|
||||||
|
}
|
||||||
|
|
||||||
// Converts from microseconds to number of audio frames, given the specified
|
// Converts from microseconds to number of audio frames, given the specified
|
||||||
// audio rate.
|
// audio rate.
|
||||||
CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {
|
CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {
|
||||||
|
|
|
@ -127,10 +127,11 @@ media::TimeIntervals GetEstimatedBufferedTimeRanges(mozilla::MediaResource* aStr
|
||||||
int64_t aDurationUsecs);
|
int64_t aDurationUsecs);
|
||||||
|
|
||||||
// Converts from number of audio frames (aFrames) to microseconds, given
|
// Converts from number of audio frames (aFrames) to microseconds, given
|
||||||
// the specified audio rate (aRate). Stores result in aOutUsecs. Returns true
|
// the specified audio rate (aRate).
|
||||||
// if the operation succeeded, or false if there was an integer overflow
|
|
||||||
// while calulating the conversion.
|
|
||||||
CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate);
|
CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate);
|
||||||
|
// Converts from number of audio frames (aFrames) TimeUnit, given
|
||||||
|
// the specified audio rate (aRate).
|
||||||
|
media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate);
|
||||||
|
|
||||||
// Converts from microseconds (aUsecs) to number of audio frames, given the
|
// Converts from microseconds (aUsecs) to number of audio frames, given the
|
||||||
// specified audio rate (aRate). Stores the result in aOutFrames. Returns
|
// specified audio rate (aRate). Stores the result in aOutFrames. Returns
|
||||||
|
|
|
@ -423,7 +423,7 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the duration, and report it to the decoder if we have it.
|
// Get the duration, and report it to the decoder if we have it.
|
||||||
Microseconds duration;
|
mp4_demuxer::Microseconds duration;
|
||||||
{
|
{
|
||||||
MonitorAutoLock lock(mDemuxerMonitor);
|
MonitorAutoLock lock(mDemuxerMonitor);
|
||||||
duration = mDemuxer->Duration();
|
duration = mDemuxer->Duration();
|
||||||
|
@ -561,7 +561,7 @@ MP4Reader::GetDecoderData(TrackType aTrack)
|
||||||
return mVideo;
|
return mVideo;
|
||||||
}
|
}
|
||||||
|
|
||||||
Microseconds
|
mp4_demuxer::Microseconds
|
||||||
MP4Reader::GetNextKeyframeTime()
|
MP4Reader::GetNextKeyframeTime()
|
||||||
{
|
{
|
||||||
MonitorAutoLock mon(mDemuxerMonitor);
|
MonitorAutoLock mon(mDemuxerMonitor);
|
||||||
|
@ -596,7 +596,7 @@ MP4Reader::ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold)
|
||||||
// if the time threshold (the current playback position) is after the next
|
// if the time threshold (the current playback position) is after the next
|
||||||
// keyframe in the stream. This means we'll only skip frames that we have
|
// keyframe in the stream. This means we'll only skip frames that we have
|
||||||
// no hope of ever playing.
|
// no hope of ever playing.
|
||||||
Microseconds nextKeyframe = -1;
|
mp4_demuxer::Microseconds nextKeyframe = -1;
|
||||||
if (!sDemuxSkipToNextKeyframe ||
|
if (!sDemuxSkipToNextKeyframe ||
|
||||||
(nextKeyframe = GetNextKeyframeTime()) == -1) {
|
(nextKeyframe = GetNextKeyframeTime()) == -1) {
|
||||||
return aSkipToNextKeyframe;
|
return aSkipToNextKeyframe;
|
||||||
|
@ -1090,7 +1090,7 @@ MP4Reader::GetBuffered()
|
||||||
nsresult rv = resource->GetCachedRanges(ranges);
|
nsresult rv = resource->GetCachedRanges(ranges);
|
||||||
|
|
||||||
if (NS_SUCCEEDED(rv)) {
|
if (NS_SUCCEEDED(rv)) {
|
||||||
nsTArray<Interval<Microseconds>> timeRanges;
|
nsTArray<Interval<mp4_demuxer::Microseconds>> timeRanges;
|
||||||
mDemuxer->ConvertByteRangesToTime(ranges, &timeRanges);
|
mDemuxer->ConvertByteRangesToTime(ranges, &timeRanges);
|
||||||
for (size_t i = 0; i < timeRanges.Length(); i++) {
|
for (size_t i = 0; i < timeRanges.Length(); i++) {
|
||||||
buffered += media::TimeInterval(
|
buffered += media::TimeInterval(
|
||||||
|
|
|
@ -118,7 +118,7 @@ private:
|
||||||
bool IsSupportedVideoMimeType(const nsACString& aMimeType);
|
bool IsSupportedVideoMimeType(const nsACString& aMimeType);
|
||||||
virtual bool IsWaitingOnCDMResource() override;
|
virtual bool IsWaitingOnCDMResource() override;
|
||||||
|
|
||||||
Microseconds GetNextKeyframeTime();
|
mp4_demuxer::Microseconds GetNextKeyframeTime();
|
||||||
bool ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold);
|
bool ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold);
|
||||||
|
|
||||||
size_t SizeOfQueue(TrackType aTrack);
|
size_t SizeOfQueue(TrackType aTrack);
|
||||||
|
|
|
@ -27,7 +27,6 @@ class MediaDataDecoder;
|
||||||
class MediaDataDecoderCallback;
|
class MediaDataDecoderCallback;
|
||||||
class FlushableMediaTaskQueue;
|
class FlushableMediaTaskQueue;
|
||||||
class CDMProxy;
|
class CDMProxy;
|
||||||
typedef int64_t Microseconds;
|
|
||||||
|
|
||||||
// The PlatformDecoderModule interface is used by the MP4Reader to abstract
|
// The PlatformDecoderModule interface is used by the MP4Reader to abstract
|
||||||
// access to the H264 and Audio (AAC/MP3) decoders provided by various platforms.
|
// access to the H264 and Audio (AAC/MP3) decoders provided by various platforms.
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include "ImageContainer.h"
|
#include "ImageContainer.h"
|
||||||
#include "MediaInfo.h"
|
#include "MediaInfo.h"
|
||||||
#include "MediaTaskQueue.h"
|
#include "MediaTaskQueue.h"
|
||||||
|
#include "TimeUnits.h"
|
||||||
|
|
||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
|
@ -51,9 +52,10 @@ public:
|
||||||
}
|
}
|
||||||
NS_IMETHOD Run() override
|
NS_IMETHOD Run() override
|
||||||
{
|
{
|
||||||
nsRefPtr<MediaData> data = mCreator->Create(mSample->mTime,
|
nsRefPtr<MediaData> data =
|
||||||
mSample->mDuration,
|
mCreator->Create(media::TimeUnit::FromMicroseconds(mSample->mTime),
|
||||||
mSample->mOffset);
|
media::TimeUnit::FromMicroseconds(mSample->mDuration),
|
||||||
|
mSample->mOffset);
|
||||||
mCallback->Output(data);
|
mCallback->Output(data);
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
|
@ -103,7 +105,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
already_AddRefed<MediaData>
|
already_AddRefed<MediaData>
|
||||||
Create(Microseconds aDTS, Microseconds aDuration, int64_t aOffsetInStream)
|
Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
|
||||||
{
|
{
|
||||||
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
|
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
|
||||||
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
|
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
|
||||||
|
@ -141,11 +143,11 @@ public:
|
||||||
mImageContainer,
|
mImageContainer,
|
||||||
nullptr,
|
nullptr,
|
||||||
aOffsetInStream,
|
aOffsetInStream,
|
||||||
aDTS,
|
aDTS.ToMicroseconds(),
|
||||||
aDuration,
|
aDuration.ToMicroseconds(),
|
||||||
buffer,
|
buffer,
|
||||||
true,
|
true,
|
||||||
aDTS,
|
aDTS.ToMicroseconds(),
|
||||||
mPicture);
|
mPicture);
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
|
@ -164,13 +166,14 @@ public:
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
MediaData* Create(Microseconds aDTS,
|
MediaData* Create(const media::TimeUnit& aDTS,
|
||||||
Microseconds aDuration,
|
const media::TimeUnit& aDuration,
|
||||||
int64_t aOffsetInStream)
|
int64_t aOffsetInStream)
|
||||||
{
|
{
|
||||||
// Convert duration to frames. We add 1 to duration to account for
|
// Convert duration to frames. We add 1 to duration to account for
|
||||||
// rounding errors, so we get a consistent tone.
|
// rounding errors, so we get a consistent tone.
|
||||||
CheckedInt64 frames = UsecsToFrames(aDuration+1, mSampleRate);
|
CheckedInt64 frames =
|
||||||
|
UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
|
||||||
if (!frames.isValid() ||
|
if (!frames.isValid() ||
|
||||||
!mChannelCount ||
|
!mChannelCount ||
|
||||||
!mSampleRate ||
|
!mSampleRate ||
|
||||||
|
@ -189,8 +192,8 @@ public:
|
||||||
mFrameSum++;
|
mFrameSum++;
|
||||||
}
|
}
|
||||||
return new AudioData(aOffsetInStream,
|
return new AudioData(aOffsetInStream,
|
||||||
aDTS,
|
aDTS.ToMicroseconds(),
|
||||||
aDuration,
|
aDuration.ToMicroseconds(),
|
||||||
uint32_t(frames.value()),
|
uint32_t(frames.value()),
|
||||||
samples,
|
samples,
|
||||||
mChannelCount,
|
mChannelCount,
|
||||||
|
|
|
@ -105,7 +105,8 @@ public:
|
||||||
return eglImage;
|
return eglImage;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat, Microseconds aDuration) override {
|
virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
|
||||||
|
const media::TimeUnit& aDuration) override {
|
||||||
if (!EnsureGLContext()) {
|
if (!EnsureGLContext()) {
|
||||||
return NS_ERROR_FAILURE;
|
return NS_ERROR_FAILURE;
|
||||||
}
|
}
|
||||||
|
@ -168,7 +169,7 @@ public:
|
||||||
mImageContainer,
|
mImageContainer,
|
||||||
offset,
|
offset,
|
||||||
presentationTimeUs,
|
presentationTimeUs,
|
||||||
aDuration,
|
aDuration.ToMicroseconds(),
|
||||||
img,
|
img,
|
||||||
isSync,
|
isSync,
|
||||||
presentationTimeUs,
|
presentationTimeUs,
|
||||||
|
@ -213,7 +214,9 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, Microseconds aDuration) {
|
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
|
||||||
|
MediaFormat::Param aFormat,
|
||||||
|
const media::TimeUnit& aDuration) {
|
||||||
// The output on Android is always 16-bit signed
|
// The output on Android is always 16-bit signed
|
||||||
|
|
||||||
nsresult rv;
|
nsresult rv;
|
||||||
|
@ -239,7 +242,7 @@ public:
|
||||||
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
|
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
|
||||||
|
|
||||||
nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
|
nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
|
||||||
aDuration,
|
aDuration.ToMicroseconds(),
|
||||||
numFrames,
|
numFrames,
|
||||||
audio,
|
audio,
|
||||||
numChannels,
|
numChannels,
|
||||||
|
@ -485,7 +488,7 @@ void MediaCodecDataDecoder::DecoderLoop()
|
||||||
sample->mTime, 0);
|
sample->mTime, 0);
|
||||||
HANDLE_DECODER_ERROR();
|
HANDLE_DECODER_ERROR();
|
||||||
|
|
||||||
mDurations.push(sample->mDuration);
|
mDurations.push(media::TimeUnit::FromMicroseconds(sample->mDuration));
|
||||||
sample = nullptr;
|
sample = nullptr;
|
||||||
outputDone = false;
|
outputDone = false;
|
||||||
}
|
}
|
||||||
|
@ -543,7 +546,7 @@ void MediaCodecDataDecoder::DecoderLoop()
|
||||||
|
|
||||||
MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");
|
MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");
|
||||||
|
|
||||||
Microseconds duration = 0;
|
media::TimeUnit duration;
|
||||||
if (!mDurations.empty()) {
|
if (!mDurations.empty()) {
|
||||||
duration = mDurations.front();
|
duration = mDurations.front();
|
||||||
mDurations.pop();
|
mDurations.pop();
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "AndroidSurfaceTexture.h"
|
#include "AndroidSurfaceTexture.h"
|
||||||
|
|
||||||
#include "MediaCodec.h"
|
#include "MediaCodec.h"
|
||||||
|
#include "TimeUnits.h"
|
||||||
#include "mozilla/Monitor.h"
|
#include "mozilla/Monitor.h"
|
||||||
|
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
@ -81,12 +82,13 @@ protected:
|
||||||
bool mStopping;
|
bool mStopping;
|
||||||
|
|
||||||
SampleQueue mQueue;
|
SampleQueue mQueue;
|
||||||
std::queue<Microseconds> mDurations;
|
// Durations are stored in microseconds.
|
||||||
|
std::queue<media::TimeUnit> mDurations;
|
||||||
|
|
||||||
virtual nsresult InitDecoder(widget::sdk::Surface::Param aSurface);
|
virtual nsresult InitDecoder(widget::sdk::Surface::Param aSurface);
|
||||||
|
|
||||||
virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
|
virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
|
||||||
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
|
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
|
||||||
virtual void Cleanup() {};
|
virtual void Cleanup() {};
|
||||||
|
|
||||||
nsresult ResetInputBuffers();
|
nsresult ResetInputBuffers();
|
||||||
|
|
|
@ -261,8 +261,8 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
|
||||||
|
|
||||||
size_t numFrames = outputData.Length() / channels;
|
size_t numFrames = outputData.Length() / channels;
|
||||||
int rate = mOutputFormat.mSampleRate;
|
int rate = mOutputFormat.mSampleRate;
|
||||||
CheckedInt<Microseconds> duration = FramesToUsecs(numFrames, rate);
|
media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
|
||||||
if (!duration.isValid()) {
|
if (!duration.IsValid()) {
|
||||||
NS_WARNING("Invalid count of accumulated audio samples");
|
NS_WARNING("Invalid count of accumulated audio samples");
|
||||||
return NS_ERROR_FAILURE;
|
return NS_ERROR_FAILURE;
|
||||||
}
|
}
|
||||||
|
@ -270,14 +270,14 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
|
||||||
#ifdef LOG_SAMPLE_DECODE
|
#ifdef LOG_SAMPLE_DECODE
|
||||||
LOG("pushed audio at time %lfs; duration %lfs\n",
|
LOG("pushed audio at time %lfs; duration %lfs\n",
|
||||||
(double)aSample->mTime / USECS_PER_S,
|
(double)aSample->mTime / USECS_PER_S,
|
||||||
(double)duration.value() / USECS_PER_S);
|
duration.ToSeconds());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
|
nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
|
||||||
PodCopy(data.get(), &outputData[0], outputData.Length());
|
PodCopy(data.get(), &outputData[0], outputData.Length());
|
||||||
nsRefPtr<AudioData> audio = new AudioData(aSample->mOffset,
|
nsRefPtr<AudioData> audio = new AudioData(aSample->mOffset,
|
||||||
aSample->mTime,
|
aSample->mTime,
|
||||||
duration.value(),
|
duration.ToMicroseconds(),
|
||||||
numFrames,
|
numFrames,
|
||||||
data.forget(),
|
data.forget(),
|
||||||
channels,
|
channels,
|
||||||
|
|
|
@ -195,9 +195,9 @@ PlatformCallback(void* decompressionOutputRefCon,
|
||||||
AutoCFRelease<CFNumberRef> kfref =
|
AutoCFRelease<CFNumberRef> kfref =
|
||||||
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_KEYFRAME"));
|
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_KEYFRAME"));
|
||||||
|
|
||||||
Microseconds dts;
|
int64_t dts;
|
||||||
Microseconds pts;
|
int64_t pts;
|
||||||
Microseconds duration;
|
int64_t duration;
|
||||||
int64_t byte_offset;
|
int64_t byte_offset;
|
||||||
char is_sync_point;
|
char is_sync_point;
|
||||||
|
|
||||||
|
@ -208,11 +208,12 @@ PlatformCallback(void* decompressionOutputRefCon,
|
||||||
CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point);
|
CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point);
|
||||||
|
|
||||||
nsAutoPtr<AppleVDADecoder::AppleFrameRef> frameRef(
|
nsAutoPtr<AppleVDADecoder::AppleFrameRef> frameRef(
|
||||||
new AppleVDADecoder::AppleFrameRef(dts,
|
new AppleVDADecoder::AppleFrameRef(
|
||||||
pts,
|
media::TimeUnit::FromMicroseconds(dts),
|
||||||
duration,
|
media::TimeUnit::FromMicroseconds(pts),
|
||||||
byte_offset,
|
media::TimeUnit::FromMicroseconds(duration),
|
||||||
is_sync_point == 1));
|
byte_offset,
|
||||||
|
is_sync_point == 1));
|
||||||
|
|
||||||
// Forward the data back to an object method which can access
|
// Forward the data back to an object method which can access
|
||||||
// the correct MP4Reader callback.
|
// the correct MP4Reader callback.
|
||||||
|
@ -252,9 +253,9 @@ AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
|
||||||
|
|
||||||
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
|
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
|
||||||
aFrameRef->byte_offset,
|
aFrameRef->byte_offset,
|
||||||
aFrameRef->decode_timestamp,
|
aFrameRef->decode_timestamp.ToMicroseconds(),
|
||||||
aFrameRef->composition_timestamp,
|
aFrameRef->composition_timestamp.ToMicroseconds(),
|
||||||
aFrameRef->duration,
|
aFrameRef->duration.ToMicroseconds(),
|
||||||
aFrameRef->is_sync_point ? " keyframe" : ""
|
aFrameRef->is_sync_point ? " keyframe" : ""
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -277,10 +278,11 @@ AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
|
||||||
data = VideoData::CreateFromImage(info,
|
data = VideoData::CreateFromImage(info,
|
||||||
mImageContainer,
|
mImageContainer,
|
||||||
aFrameRef->byte_offset,
|
aFrameRef->byte_offset,
|
||||||
aFrameRef->composition_timestamp,
|
aFrameRef->composition_timestamp.ToMicroseconds(),
|
||||||
aFrameRef->duration, image.forget(),
|
aFrameRef->duration.ToMicroseconds(),
|
||||||
|
image.forget(),
|
||||||
aFrameRef->is_sync_point,
|
aFrameRef->is_sync_point,
|
||||||
aFrameRef->decode_timestamp,
|
aFrameRef->decode_timestamp.ToMicroseconds(),
|
||||||
visible);
|
visible);
|
||||||
|
|
||||||
if (!data) {
|
if (!data) {
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include "MP4Decoder.h"
|
#include "MP4Decoder.h"
|
||||||
#include "nsIThread.h"
|
#include "nsIThread.h"
|
||||||
#include "ReorderQueue.h"
|
#include "ReorderQueue.h"
|
||||||
|
#include "TimeUnits.h"
|
||||||
|
|
||||||
#include "VideoDecodeAcceleration/VDADecoder.h"
|
#include "VideoDecodeAcceleration/VDADecoder.h"
|
||||||
|
|
||||||
|
@ -28,24 +29,24 @@ class AppleVDADecoder : public MediaDataDecoder {
|
||||||
public:
|
public:
|
||||||
class AppleFrameRef {
|
class AppleFrameRef {
|
||||||
public:
|
public:
|
||||||
Microseconds decode_timestamp;
|
media::TimeUnit decode_timestamp;
|
||||||
Microseconds composition_timestamp;
|
media::TimeUnit composition_timestamp;
|
||||||
Microseconds duration;
|
media::TimeUnit duration;
|
||||||
int64_t byte_offset;
|
int64_t byte_offset;
|
||||||
bool is_sync_point;
|
bool is_sync_point;
|
||||||
|
|
||||||
explicit AppleFrameRef(const MediaRawData& aSample)
|
explicit AppleFrameRef(const MediaRawData& aSample)
|
||||||
: decode_timestamp(aSample.mTimecode)
|
: decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
|
||||||
, composition_timestamp(aSample.mTime)
|
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
|
||||||
, duration(aSample.mDuration)
|
, duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
|
||||||
, byte_offset(aSample.mOffset)
|
, byte_offset(aSample.mOffset)
|
||||||
, is_sync_point(aSample.mKeyframe)
|
, is_sync_point(aSample.mKeyframe)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
AppleFrameRef(Microseconds aDts,
|
AppleFrameRef(const media::TimeUnit& aDts,
|
||||||
Microseconds aPts,
|
const media::TimeUnit& aPts,
|
||||||
Microseconds aDuration,
|
const media::TimeUnit& aDuration,
|
||||||
int64_t aByte_offset,
|
int64_t aByte_offset,
|
||||||
bool aIs_sync_point)
|
bool aIs_sync_point)
|
||||||
: decode_timestamp(aDts)
|
: decode_timestamp(aDts)
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include "FFmpegRuntimeLinker.h"
|
#include "FFmpegRuntimeLinker.h"
|
||||||
|
|
||||||
#include "FFmpegAudioDecoder.h"
|
#include "FFmpegAudioDecoder.h"
|
||||||
|
#include "TimeUnits.h"
|
||||||
|
|
||||||
#define MAX_CHANNELS 16
|
#define MAX_CHANNELS 16
|
||||||
|
|
||||||
|
@ -97,7 +98,7 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample)
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t samplePosition = aSample->mOffset;
|
int64_t samplePosition = aSample->mOffset;
|
||||||
Microseconds pts = aSample->mTime;
|
media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
|
||||||
|
|
||||||
while (packet.size > 0) {
|
while (packet.size > 0) {
|
||||||
int decoded;
|
int decoded;
|
||||||
|
@ -117,23 +118,28 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample)
|
||||||
nsAutoArrayPtr<AudioDataValue> audio(
|
nsAutoArrayPtr<AudioDataValue> audio(
|
||||||
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));
|
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));
|
||||||
|
|
||||||
CheckedInt<Microseconds> duration =
|
media::TimeUnit duration =
|
||||||
FramesToUsecs(mFrame->nb_samples, samplingRate);
|
FramesToTimeUnit(mFrame->nb_samples, samplingRate);
|
||||||
if (!duration.isValid()) {
|
if (!duration.IsValid()) {
|
||||||
NS_WARNING("Invalid count of accumulated audio samples");
|
NS_WARNING("Invalid count of accumulated audio samples");
|
||||||
mCallback->Error();
|
mCallback->Error();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
nsRefPtr<AudioData> data = new AudioData(samplePosition,
|
nsRefPtr<AudioData> data = new AudioData(samplePosition,
|
||||||
pts,
|
pts.ToMicroseconds(),
|
||||||
duration.value(),
|
duration.ToMicroseconds(),
|
||||||
mFrame->nb_samples,
|
mFrame->nb_samples,
|
||||||
audio.forget(),
|
audio.forget(),
|
||||||
numChannels,
|
numChannels,
|
||||||
samplingRate);
|
samplingRate);
|
||||||
mCallback->Output(data);
|
mCallback->Output(data);
|
||||||
pts += duration.value();
|
pts += duration;
|
||||||
|
if (!pts.IsValid()) {
|
||||||
|
NS_WARNING("Invalid count of accumulated audio samples");
|
||||||
|
mCallback->Error();
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
packet.data += bytesConsumed;
|
packet.data += bytesConsumed;
|
||||||
packet.size -= bytesConsumed;
|
packet.size -= bytesConsumed;
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "VideoUtils.h"
|
#include "VideoUtils.h"
|
||||||
#include "WMFUtils.h"
|
#include "WMFUtils.h"
|
||||||
#include "nsTArray.h"
|
#include "nsTArray.h"
|
||||||
|
#include "TimeUnits.h"
|
||||||
|
|
||||||
#include "mozilla/Logging.h"
|
#include "mozilla/Logging.h"
|
||||||
|
|
||||||
|
@ -290,17 +291,18 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
|
||||||
|
|
||||||
buffer->Unlock();
|
buffer->Unlock();
|
||||||
|
|
||||||
CheckedInt64 timestamp = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, mAudioRate);
|
media::TimeUnit timestamp =
|
||||||
NS_ENSURE_TRUE(timestamp.isValid(), E_FAIL);
|
FramesToTimeUnit(mAudioFrameOffset + mAudioFrameSum, mAudioRate);
|
||||||
|
NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL);
|
||||||
|
|
||||||
mAudioFrameSum += numFrames;
|
mAudioFrameSum += numFrames;
|
||||||
|
|
||||||
CheckedInt64 duration = FramesToUsecs(numFrames, mAudioRate);
|
media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
|
||||||
NS_ENSURE_TRUE(duration.isValid(), E_FAIL);
|
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
|
||||||
|
|
||||||
aOutData = new AudioData(aStreamOffset,
|
aOutData = new AudioData(aStreamOffset,
|
||||||
timestamp.value(),
|
timestamp.ToMicroseconds(),
|
||||||
duration.value(),
|
duration.ToMicroseconds(),
|
||||||
numFrames,
|
numFrames,
|
||||||
audioData.forget(),
|
audioData.forget(),
|
||||||
mAudioChannels,
|
mAudioChannels,
|
||||||
|
@ -308,7 +310,7 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
|
||||||
|
|
||||||
#ifdef LOG_SAMPLE_DECODE
|
#ifdef LOG_SAMPLE_DECODE
|
||||||
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
|
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
|
||||||
timestamp, duration, currentLength);
|
timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return S_OK;
|
return S_OK;
|
||||||
|
|
|
@ -70,23 +70,23 @@ MFOffsetToInt32(const MFOffset& aOffset)
|
||||||
return int32_t(aOffset.value + (aOffset.fract / 65536.0f));
|
return int32_t(aOffset.value + (aOffset.fract / 65536.0f));
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t
|
media::TimeUnit
|
||||||
GetSampleDuration(IMFSample* aSample)
|
GetSampleDuration(IMFSample* aSample)
|
||||||
{
|
{
|
||||||
NS_ENSURE_TRUE(aSample, -1);
|
NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
|
||||||
int64_t duration = 0;
|
int64_t duration = 0;
|
||||||
aSample->GetSampleDuration(&duration);
|
aSample->GetSampleDuration(&duration);
|
||||||
return HNsToUsecs(duration);
|
return media::TimeUnit::FromMicroseconds(HNsToUsecs(duration));
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t
|
media::TimeUnit
|
||||||
GetSampleTime(IMFSample* aSample)
|
GetSampleTime(IMFSample* aSample)
|
||||||
{
|
{
|
||||||
NS_ENSURE_TRUE(aSample, -1);
|
NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
|
||||||
LONGLONG timestampHns = 0;
|
LONGLONG timestampHns = 0;
|
||||||
HRESULT hr = aSample->GetSampleTime(×tampHns);
|
HRESULT hr = aSample->GetSampleTime(×tampHns);
|
||||||
NS_ENSURE_TRUE(SUCCEEDED(hr), -1);
|
NS_ENSURE_TRUE(SUCCEEDED(hr), media::TimeUnit::Invalid());
|
||||||
return HNsToUsecs(timestampHns);
|
return media::TimeUnit::FromMicroseconds(HNsToUsecs(timestampHns));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets the sub-region of the video frame that should be displayed.
|
// Gets the sub-region of the video frame that should be displayed.
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include "WMF.h"
|
#include "WMF.h"
|
||||||
#include "nsString.h"
|
#include "nsString.h"
|
||||||
#include "nsRect.h"
|
#include "nsRect.h"
|
||||||
|
#include "TimeUnits.h"
|
||||||
#include "VideoUtils.h"
|
#include "VideoUtils.h"
|
||||||
|
|
||||||
// Various utilities shared by WMF backend files.
|
// Various utilities shared by WMF backend files.
|
||||||
|
@ -46,14 +47,14 @@ MFOffsetToInt32(const MFOffset& aOffset);
|
||||||
HRESULT
|
HRESULT
|
||||||
GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion);
|
GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion);
|
||||||
|
|
||||||
// Returns the duration of a IMFSample in microseconds.
|
// Returns the duration of a IMFSample in TimeUnit.
|
||||||
// Returns -1 on failure.
|
// Returns media::TimeUnit::Invalid() on failure.
|
||||||
int64_t
|
media::TimeUnit
|
||||||
GetSampleDuration(IMFSample* aSample);
|
GetSampleDuration(IMFSample* aSample);
|
||||||
|
|
||||||
// Returns the presentation time of a IMFSample in microseconds.
|
// Returns the presentation time of a IMFSample in TimeUnit.
|
||||||
// Returns -1 on failure.
|
// Returns media::TimeUnit::Invalid() on failure.
|
||||||
int64_t
|
media::TimeUnit
|
||||||
GetSampleTime(IMFSample* aSample);
|
GetSampleTime(IMFSample* aSample);
|
||||||
|
|
||||||
inline bool
|
inline bool
|
||||||
|
|
|
@ -410,8 +410,10 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
|
||||||
b.mPlanes[2].mOffset = 0;
|
b.mPlanes[2].mOffset = 0;
|
||||||
b.mPlanes[2].mSkip = 0;
|
b.mPlanes[2].mSkip = 0;
|
||||||
|
|
||||||
Microseconds pts = GetSampleTime(aSample);
|
media::TimeUnit pts = GetSampleTime(aSample);
|
||||||
Microseconds duration = GetSampleDuration(aSample);
|
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
|
||||||
|
media::TimeUnit duration = GetSampleDuration(aSample);
|
||||||
|
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
|
||||||
|
|
||||||
nsRefPtr<layers::PlanarYCbCrImage> image =
|
nsRefPtr<layers::PlanarYCbCrImage> image =
|
||||||
new IMFYCbCrImage(buffer, twoDBuffer);
|
new IMFYCbCrImage(buffer, twoDBuffer);
|
||||||
|
@ -426,8 +428,8 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
|
||||||
VideoData::CreateFromImage(mVideoInfo,
|
VideoData::CreateFromImage(mVideoInfo,
|
||||||
mImageContainer,
|
mImageContainer,
|
||||||
aStreamOffset,
|
aStreamOffset,
|
||||||
std::max(0LL, pts),
|
pts.ToMicroseconds(),
|
||||||
duration,
|
duration.ToMicroseconds(),
|
||||||
image.forget(),
|
image.forget(),
|
||||||
false,
|
false,
|
||||||
-1,
|
-1,
|
||||||
|
@ -458,13 +460,15 @@ WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
|
||||||
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
|
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
|
||||||
NS_ENSURE_TRUE(image, E_FAIL);
|
NS_ENSURE_TRUE(image, E_FAIL);
|
||||||
|
|
||||||
Microseconds pts = GetSampleTime(aSample);
|
media::TimeUnit pts = GetSampleTime(aSample);
|
||||||
Microseconds duration = GetSampleDuration(aSample);
|
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
|
||||||
|
media::TimeUnit duration = GetSampleDuration(aSample);
|
||||||
|
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
|
||||||
nsRefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
|
nsRefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
|
||||||
mImageContainer,
|
mImageContainer,
|
||||||
aStreamOffset,
|
aStreamOffset,
|
||||||
pts,
|
pts.ToMicroseconds(),
|
||||||
duration,
|
duration.ToMicroseconds(),
|
||||||
image.forget(),
|
image.forget(),
|
||||||
false,
|
false,
|
||||||
-1,
|
-1,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче