Bug 1287370: Remove VDA decoder. r=cpearce

The VDA decoder was no longer usable regardless.

MozReview-Commit-ID: 6Q94jrp0OIG

--HG--
extra : rebase_source : 368396b413995eb1c9382cb9cb134ec2fcf38d95
This commit is contained in:
Jean-Yves Avenard 2016-07-18 17:02:51 +10:00
Родитель 897e63f874
Коммит 0b57c68c53
11 изменённых файлов: 463 добавлений и 1130 удалений

Просмотреть файл

@ -7,8 +7,6 @@
#include "AppleATDecoder.h"
#include "AppleCMLinker.h"
#include "AppleDecoderModule.h"
#include "AppleVDADecoder.h"
#include "AppleVDALinker.h"
#include "AppleVTDecoder.h"
#include "AppleVTLinker.h"
#include "MacIOSurfaceImage.h"
@ -22,7 +20,6 @@ bool AppleDecoderModule::sInitialized = false;
bool AppleDecoderModule::sIsCoreMediaAvailable = false;
bool AppleDecoderModule::sIsVTAvailable = false;
bool AppleDecoderModule::sIsVTHWAvailable = false;
bool AppleDecoderModule::sIsVDAAvailable = false;
bool AppleDecoderModule::sCanUseHardwareVideoDecoder = true;
AppleDecoderModule::AppleDecoderModule()
@ -45,9 +42,6 @@ AppleDecoderModule::Init()
MacIOSurfaceLib::LoadLibrary();
const bool loaded = MacIOSurfaceLib::isInit();
// dlopen VideoDecodeAcceleration.framework if it's available.
sIsVDAAvailable = loaded && AppleVDALinker::Link();
// dlopen CoreMedia.framework if it's available.
sIsCoreMediaAvailable = AppleCMLinker::Link();
// dlopen VideoToolbox.framework if it's available.
@ -67,7 +61,7 @@ AppleDecoderModule::Init()
nsresult
AppleDecoderModule::Startup()
{
if (!sInitialized || (!sIsVDAAvailable && !sIsVTAvailable)) {
if (!sInitialized || !sIsVTAvailable) {
return NS_ERROR_FAILURE;
}
return NS_OK;
@ -76,27 +70,11 @@ AppleDecoderModule::Startup()
already_AddRefed<MediaDataDecoder>
AppleDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
RefPtr<MediaDataDecoder> decoder;
if (sIsVDAAvailable && (!sIsVTHWAvailable || MediaPrefs::AppleForceVDA())) {
decoder =
AppleVDADecoder::CreateVDADecoder(aParams.VideoConfig(),
aParams.mTaskQueue,
aParams.mCallback,
aParams.mImageContainer);
if (decoder) {
return decoder.forget();
}
}
// We fallback here if VDA isn't available, or is available but isn't
// supported by the current platform.
if (sIsVTAvailable) {
decoder =
new AppleVTDecoder(aParams.VideoConfig(),
aParams.mTaskQueue,
aParams.mCallback,
aParams.mImageContainer);
}
RefPtr<MediaDataDecoder> decoder =
new AppleVTDecoder(aParams.VideoConfig(),
aParams.mTaskQueue,
aParams.mCallback,
aParams.mImageContainer);
return decoder.forget();
}
@ -117,9 +95,8 @@ AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType,
return (sIsCoreMediaAvailable &&
(aMimeType.EqualsLiteral("audio/mpeg") ||
aMimeType.EqualsLiteral("audio/mp4a-latm"))) ||
((sIsVTAvailable || sIsVDAAvailable) &&
(aMimeType.EqualsLiteral("video/mp4") ||
aMimeType.EqualsLiteral("video/avc")));
(sIsVTAvailable && (aMimeType.EqualsLiteral("video/mp4") ||
aMimeType.EqualsLiteral("video/avc")));
}
PlatformDecoderModule::ConversionRequired

Просмотреть файл

@ -41,7 +41,6 @@ private:
static bool sIsCoreMediaAvailable;
static bool sIsVTAvailable;
static bool sIsVTHWAvailable;
static bool sIsVDAAvailable;
};
} // namespace mozilla

Просмотреть файл

@ -1,700 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <CoreFoundation/CFString.h>
#include "AppleDecoderModule.h"
#include "AppleUtils.h"
#include "AppleVDADecoder.h"
#include "AppleVDALinker.h"
#include "MediaInfo.h"
#include "mp4_demuxer/H264.h"
#include "MP4Decoder.h"
#include "MediaData.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/SyncRunnable.h"
#include "nsThreadUtils.h"
#include "mozilla/Logging.h"
#include "VideoUtils.h"
#include <algorithm>
#include "gfxPlatform.h"
#ifndef MOZ_WIDGET_UIKIT
#include "MacIOSurfaceImage.h"
#endif
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
//#define LOG_MEDIA_SHA1
namespace mozilla {
static uint32_t ComputeMaxRefFrames(const MediaByteBuffer* aExtraData)
{
uint32_t maxRefFrames = 4;
// Retrieve video dimensions from H264 SPS NAL.
mp4_demuxer::SPSData spsdata;
if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtraData, spsdata)) {
// max_num_ref_frames determines the size of the sliding window
// we need to queue that many frames in order to guarantee proper
// pts frames ordering. Use a minimum of 4 to ensure proper playback of
// non compliant videos.
maxRefFrames =
std::min(std::max(maxRefFrames, spsdata.max_num_ref_frames + 1), 16u);
}
return maxRefFrames;
}
AppleVDADecoder::AppleVDADecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer)
: mExtraData(aConfig.mExtraData)
, mCallback(aCallback)
, mPictureWidth(aConfig.mImage.width)
, mPictureHeight(aConfig.mImage.height)
, mDisplayWidth(aConfig.mDisplay.width)
, mDisplayHeight(aConfig.mDisplay.height)
, mQueuedSamples(0)
, mTaskQueue(aTaskQueue)
, mDecoder(nullptr)
, mMaxRefFrames(ComputeMaxRefFrames(aConfig.mExtraData))
, mImageContainer(aImageContainer)
, mInputIncoming(0)
, mIsShutDown(false)
#ifdef MOZ_WIDGET_UIKIT
, mUseSoftwareImages(true)
#else
, mUseSoftwareImages(false)
#endif
, mMonitor("AppleVideoDecoder")
, mIsFlushing(false)
{
MOZ_COUNT_CTOR(AppleVDADecoder);
// TODO: Verify aConfig.mime_type.
LOG("Creating AppleVDADecoder for %dx%d (%dx%d) h.264 video",
mPictureWidth,
mPictureHeight,
mDisplayWidth,
mDisplayHeight
);
}
AppleVDADecoder::~AppleVDADecoder()
{
MOZ_COUNT_DTOR(AppleVDADecoder);
}
RefPtr<MediaDataDecoder::InitPromise>
AppleVDADecoder::Init()
{
return InitPromise::CreateAndResolve(TrackType::kVideoTrack, __func__);
}
nsresult
AppleVDADecoder::Shutdown()
{
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
mIsShutDown = true;
if (mTaskQueue) {
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVDADecoder::ProcessShutdown);
mTaskQueue->Dispatch(runnable.forget());
} else {
ProcessShutdown();
}
return NS_OK;
}
void
AppleVDADecoder::ProcessShutdown()
{
if (mDecoder) {
LOG("%s: cleaning up decoder %p", __func__, mDecoder);
VDADecoderDestroy(mDecoder);
mDecoder = nullptr;
}
}
nsresult
AppleVDADecoder::Input(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
aSample,
aSample->mTime,
aSample->mDuration,
aSample->mKeyframe ? " keyframe" : "",
aSample->Size());
mInputIncoming++;
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &AppleVDADecoder::ProcessDecode, aSample));
return NS_OK;
}
nsresult
AppleVDADecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVDADecoder::ProcessFlush);
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
mIsFlushing = false;
// All ProcessDecode() tasks should be done.
MOZ_ASSERT(mInputIncoming == 0);
mSeekTargetThreshold.reset();
return NS_OK;
}
nsresult
AppleVDADecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVDADecoder::ProcessDrain);
mTaskQueue->Dispatch(runnable.forget());
return NS_OK;
}
void
AppleVDADecoder::ProcessFlush()
{
AssertOnTaskQueueThread();
OSStatus rv = VDADecoderFlush(mDecoder, 0 /*dont emit*/);
if (rv != noErr) {
LOG("AppleVDADecoder::Flush failed waiting for platform decoder "
"with error:%d.", rv);
}
ClearReorderedFrames();
}
void
AppleVDADecoder::ProcessDrain()
{
AssertOnTaskQueueThread();
OSStatus rv = VDADecoderFlush(mDecoder, kVDADecoderFlush_EmitFrames);
if (rv != noErr) {
LOG("AppleVDADecoder::Drain failed waiting for platform decoder "
"with error:%d.", rv);
}
DrainReorderedFrames();
mCallback->DrainComplete();
}
//
// Implementation details.
//
// Callback passed to the VideoToolbox decoder for returning data.
// This needs to be static because the API takes a C-style pair of
// function and userdata pointers. This validates parameters and
// forwards the decoded image back to an object method.
static void
PlatformCallback(void* decompressionOutputRefCon,
CFDictionaryRef frameInfo,
OSStatus status,
VDADecodeInfoFlags infoFlags,
CVImageBufferRef image)
{
LOG("AppleVDADecoder[%s] status %d flags %d retainCount %ld",
__func__, status, infoFlags, CFGetRetainCount(frameInfo));
// Validate our arguments.
// According to Apple's TN2267
// The output callback is still called for all flushed frames,
// but no image buffers will be returned.
// FIXME: Distinguish between errors and empty flushed frames.
if (status != noErr || !image) {
NS_WARNING("AppleVDADecoder decoder returned no data");
image = nullptr;
} else if (infoFlags & kVDADecodeInfo_FrameDropped) {
NS_WARNING(" ...frame dropped...");
image = nullptr;
} else {
MOZ_ASSERT(image || CFGetTypeID(image) == CVPixelBufferGetTypeID(),
"AppleVDADecoder returned an unexpected image type");
}
AppleVDADecoder* decoder =
static_cast<AppleVDADecoder*>(decompressionOutputRefCon);
AutoCFRelease<CFNumberRef> ptsref =
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_PTS"));
AutoCFRelease<CFNumberRef> dtsref =
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_DTS"));
AutoCFRelease<CFNumberRef> durref =
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_DURATION"));
AutoCFRelease<CFNumberRef> boref =
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_OFFSET"));
AutoCFRelease<CFNumberRef> kfref =
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_KEYFRAME"));
int64_t dts;
int64_t pts;
int64_t duration;
int64_t byte_offset;
char is_sync_point;
CFNumberGetValue(ptsref, kCFNumberSInt64Type, &pts);
CFNumberGetValue(dtsref, kCFNumberSInt64Type, &dts);
CFNumberGetValue(durref, kCFNumberSInt64Type, &duration);
CFNumberGetValue(boref, kCFNumberSInt64Type, &byte_offset);
CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point);
AppleVDADecoder::AppleFrameRef frameRef(
media::TimeUnit::FromMicroseconds(dts),
media::TimeUnit::FromMicroseconds(pts),
media::TimeUnit::FromMicroseconds(duration),
byte_offset,
is_sync_point == 1);
decoder->OutputFrame(image, frameRef);
}
AppleVDADecoder::AppleFrameRef*
AppleVDADecoder::CreateAppleFrameRef(const MediaRawData* aSample)
{
MOZ_ASSERT(aSample);
return new AppleFrameRef(*aSample);
}
void
AppleVDADecoder::DrainReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mCallback->Output(mReorderQueue.Pop().get());
}
mQueuedSamples = 0;
}
void
AppleVDADecoder::ClearReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mReorderQueue.Pop();
}
mQueuedSamples = 0;
}
void
AppleVDADecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
mSeekTargetThreshold = Some(aTime);
}
// Copy and return a decoded frame.
nsresult
AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
AppleVDADecoder::AppleFrameRef aFrameRef)
{
if (mIsShutDown || mIsFlushing) {
// We are in the process of flushing or shutting down; ignore frame.
return NS_OK;
}
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
aFrameRef.byte_offset,
aFrameRef.decode_timestamp.ToMicroseconds(),
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
aFrameRef.is_sync_point ? " keyframe" : ""
);
if (mQueuedSamples > mMaxRefFrames) {
// We had stopped requesting more input because we had received too much at
// the time. We can ask for more once again.
mCallback->InputExhausted();
}
MOZ_ASSERT(mQueuedSamples);
mQueuedSamples--;
if (!aImage) {
// Image was dropped by decoder.
return NS_OK;
}
bool useNullSample = false;
if (mSeekTargetThreshold.isSome()) {
if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
useNullSample = true;
} else {
mSeekTargetThreshold.reset();
}
}
// Where our resulting image will end up.
RefPtr<MediaData> data;
// Bounds.
VideoInfo info;
info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
gfx::IntRect visible = gfx::IntRect(0,
0,
mPictureWidth,
mPictureHeight);
if (useNullSample) {
data = new NullData(aFrameRef.byte_offset,
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds());
} else if (mUseSoftwareImages) {
size_t width = CVPixelBufferGetWidth(aImage);
size_t height = CVPixelBufferGetHeight(aImage);
DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
VideoData::YCbCrBuffer buffer;
// Lock the returned image data.
CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
if (rv != kCVReturnSuccess) {
NS_ERROR("error locking pixel data");
mCallback->Error(MediaDataDecoderError::DECODE_ERROR);
return NS_ERROR_FAILURE;
}
// Y plane.
buffer.mPlanes[0].mData =
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
buffer.mPlanes[0].mWidth = width;
buffer.mPlanes[0].mHeight = height;
buffer.mPlanes[0].mOffset = 0;
buffer.mPlanes[0].mSkip = 0;
// Cb plane.
buffer.mPlanes[1].mData =
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
buffer.mPlanes[1].mWidth = (width+1) / 2;
buffer.mPlanes[1].mHeight = (height+1) / 2;
buffer.mPlanes[1].mOffset = 0;
buffer.mPlanes[1].mSkip = 1;
// Cr plane.
buffer.mPlanes[2].mData =
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
buffer.mPlanes[2].mWidth = (width+1) / 2;
buffer.mPlanes[2].mHeight = (height+1) / 2;
buffer.mPlanes[2].mOffset = 1;
buffer.mPlanes[2].mSkip = 1;
// Copy the image data into our own format.
data =
VideoData::Create(info,
mImageContainer,
nullptr,
aFrameRef.byte_offset,
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
buffer,
aFrameRef.is_sync_point,
aFrameRef.decode_timestamp.ToMicroseconds(),
visible);
// Unlock the returned image data.
CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
} else {
#ifndef MOZ_WIDGET_UIKIT
IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
data =
VideoData::CreateFromImage(info,
mImageContainer,
aFrameRef.byte_offset,
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
image.forget(),
aFrameRef.is_sync_point,
aFrameRef.decode_timestamp.ToMicroseconds(),
visible);
#else
MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
}
if (!data) {
NS_ERROR("Couldn't create VideoData for frame");
mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
return NS_ERROR_FAILURE;
}
// Frames come out in DTS order but we need to output them
// in composition order.
MonitorAutoLock mon(mMonitor);
mReorderQueue.Push(data);
while (mReorderQueue.Length() > mMaxRefFrames) {
mCallback->Output(mReorderQueue.Pop().get());
}
LOG("%llu decoded frames queued",
static_cast<unsigned long long>(mReorderQueue.Length()));
return NS_OK;
}
nsresult
AppleVDADecoder::ProcessDecode(MediaRawData* aSample)
{
AssertOnTaskQueueThread();
mInputIncoming--;
if (mIsFlushing) {
return NS_OK;
}
auto rv = DoDecode(aSample);
// Ask for more data.
if (NS_SUCCEEDED(rv) && !mInputIncoming && mQueuedSamples <= mMaxRefFrames) {
LOG("%s task queue empty; requesting more data", GetDescriptionName());
mCallback->InputExhausted();
}
return rv;
}
nsresult
AppleVDADecoder::DoDecode(MediaRawData* aSample)
{
AssertOnTaskQueueThread();
AutoCFRelease<CFDataRef> block =
CFDataCreate(kCFAllocatorDefault, aSample->Data(), aSample->Size());
if (!block) {
NS_ERROR("Couldn't create CFData");
return NS_ERROR_FAILURE;
}
AutoCFRelease<CFNumberRef> pts =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt64Type,
&aSample->mTime);
AutoCFRelease<CFNumberRef> dts =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt64Type,
&aSample->mTimecode);
AutoCFRelease<CFNumberRef> duration =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt64Type,
&aSample->mDuration);
AutoCFRelease<CFNumberRef> byte_offset =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt64Type,
&aSample->mOffset);
char keyframe = aSample->mKeyframe ? 1 : 0;
AutoCFRelease<CFNumberRef> cfkeyframe =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt8Type,
&keyframe);
const void* keys[] = { CFSTR("FRAME_PTS"),
CFSTR("FRAME_DTS"),
CFSTR("FRAME_DURATION"),
CFSTR("FRAME_OFFSET"),
CFSTR("FRAME_KEYFRAME") };
const void* values[] = { pts,
dts,
duration,
byte_offset,
cfkeyframe };
static_assert(ArrayLength(keys) == ArrayLength(values),
"Non matching keys/values array size");
AutoCFRelease<CFDictionaryRef> frameInfo =
CFDictionaryCreate(kCFAllocatorDefault,
keys,
values,
ArrayLength(keys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
mQueuedSamples++;
OSStatus rv = VDADecoderDecode(mDecoder,
0,
block,
frameInfo);
if (rv != noErr) {
NS_WARNING("AppleVDADecoder: Couldn't pass frame to decoder");
mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
AppleVDADecoder::InitializeSession()
{
OSStatus rv;
AutoCFRelease<CFDictionaryRef> decoderConfig =
CreateDecoderSpecification();
AutoCFRelease<CFDictionaryRef> outputConfiguration =
CreateOutputConfiguration();
rv =
VDADecoderCreate(decoderConfig,
outputConfiguration,
(VDADecoderOutputCallback*)PlatformCallback,
this,
&mDecoder);
if (rv != noErr) {
NS_WARNING("AppleVDADecoder: Couldn't create hardware VDA decoder");
return NS_ERROR_FAILURE;
}
return NS_OK;
}
CFDictionaryRef
AppleVDADecoder::CreateDecoderSpecification()
{
const uint8_t* extradata = mExtraData->Elements();
int extrasize = mExtraData->Length();
OSType format = 'avc1';
AutoCFRelease<CFNumberRef> avc_width =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&mPictureWidth);
AutoCFRelease<CFNumberRef> avc_height =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&mPictureHeight);
AutoCFRelease<CFNumberRef> avc_format =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&format);
AutoCFRelease<CFDataRef> avc_data =
CFDataCreate(kCFAllocatorDefault,
extradata,
extrasize);
const void* decoderKeys[] = { AppleVDALinker::skPropWidth,
AppleVDALinker::skPropHeight,
AppleVDALinker::skPropSourceFormat,
AppleVDALinker::skPropAVCCData };
const void* decoderValue[] = { avc_width,
avc_height,
avc_format,
avc_data };
static_assert(ArrayLength(decoderKeys) == ArrayLength(decoderValue),
"Non matching keys/values array size");
return CFDictionaryCreate(kCFAllocatorDefault,
decoderKeys,
decoderValue,
ArrayLength(decoderKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
}
CFDictionaryRef
AppleVDADecoder::CreateOutputConfiguration()
{
if (mUseSoftwareImages) {
// Output format type:
SInt32 PixelFormatTypeValue =
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&PixelFormatTypeValue);
const void* outputKeys[] = { kCVPixelBufferPixelFormatTypeKey };
const void* outputValues[] = { PixelFormatTypeNumber };
static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
"Non matching keys/values array size");
return CFDictionaryCreate(kCFAllocatorDefault,
outputKeys,
outputValues,
ArrayLength(outputKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
}
#ifndef MOZ_WIDGET_UIKIT
// Output format type:
SInt32 PixelFormatTypeValue = kCVPixelFormatType_422YpCbCr8;
AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&PixelFormatTypeValue);
// Construct IOSurface Properties
const void* IOSurfaceKeys[] = { MacIOSurfaceLib::kPropIsGlobal };
const void* IOSurfaceValues[] = { kCFBooleanTrue };
static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
"Non matching keys/values array size");
// Contruct output configuration.
AutoCFRelease<CFDictionaryRef> IOSurfaceProperties =
CFDictionaryCreate(kCFAllocatorDefault,
IOSurfaceKeys,
IOSurfaceValues,
ArrayLength(IOSurfaceKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
const void* outputKeys[] = { kCVPixelBufferIOSurfacePropertiesKey,
kCVPixelBufferPixelFormatTypeKey,
kCVPixelBufferOpenGLCompatibilityKey };
const void* outputValues[] = { IOSurfaceProperties,
PixelFormatTypeNumber,
kCFBooleanTrue };
static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
"Non matching keys/values array size");
return CFDictionaryCreate(kCFAllocatorDefault,
outputKeys,
outputValues,
ArrayLength(outputKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
#else
MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
}
/* static */
already_AddRefed<AppleVDADecoder>
AppleVDADecoder::CreateVDADecoder(
const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer)
{
if (!AppleDecoderModule::sCanUseHardwareVideoDecoder) {
// This GPU is blacklisted for hardware decoding.
return nullptr;
}
RefPtr<AppleVDADecoder> decoder =
new AppleVDADecoder(aConfig, aTaskQueue, aCallback, aImageContainer);
if (NS_FAILED(decoder->InitializeSession())) {
return nullptr;
}
return decoder.forget();
}
} // namespace mozilla

Просмотреть файл

@ -1,159 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_AppleVDADecoder_h
#define mozilla_AppleVDADecoder_h
#include "PlatformDecoderModule.h"
#include "mozilla/Atomics.h"
#include "mozilla/ReentrantMonitor.h"
#include "MP4Decoder.h"
#include "nsIThread.h"
#include "ReorderQueue.h"
#include "TimeUnits.h"
#include "VideoDecodeAcceleration/VDADecoder.h"
namespace mozilla {
class TaskQueue;
class MediaDataDecoderCallback;
namespace layers {
class ImageContainer;
} // namespace layers
class AppleVDADecoder : public MediaDataDecoder {
public:
class AppleFrameRef {
public:
media::TimeUnit decode_timestamp;
media::TimeUnit composition_timestamp;
media::TimeUnit duration;
int64_t byte_offset;
bool is_sync_point;
explicit AppleFrameRef(const MediaRawData& aSample)
: decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
, duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
, byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe)
{
}
AppleFrameRef(const media::TimeUnit& aDts,
const media::TimeUnit& aPts,
const media::TimeUnit& aDuration,
int64_t aByte_offset,
bool aIs_sync_point)
: decode_timestamp(aDts)
, composition_timestamp(aPts)
, duration(aDuration)
, byte_offset(aByte_offset)
, is_sync_point(aIs_sync_point)
{
}
};
// Return a new created AppleVDADecoder or nullptr if media or hardware is
// not supported by current configuration.
static already_AddRefed<AppleVDADecoder> CreateVDADecoder(
const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer);
// Access from the taskqueue and the decoder's thread.
// OutputFrame is thread-safe.
nsresult OutputFrame(CVPixelBufferRef aImage,
AppleFrameRef aFrameRef);
RefPtr<InitPromise> Init() override;
nsresult Input(MediaRawData* aSample) override;
nsresult Flush() override;
nsresult Drain() override;
nsresult Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override
{
return true;
}
const char* GetDescriptionName() const override
{
return "apple VDA decoder";
}
void SetSeekThreshold(const media::TimeUnit& aTime) override;
protected:
AppleVDADecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer);
virtual ~AppleVDADecoder();
void AssertOnTaskQueueThread()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
}
AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
void DrainReorderedFrames();
void ClearReorderedFrames();
CFDictionaryRef CreateOutputConfiguration();
const RefPtr<MediaByteBuffer> mExtraData;
MediaDataDecoderCallback* mCallback;
const uint32_t mPictureWidth;
const uint32_t mPictureHeight;
const uint32_t mDisplayWidth;
const uint32_t mDisplayHeight;
// Number of times a sample was queued via Input(). Will be decreased upon
// the decoder's callback being invoked.
// This is used to calculate how many frames has been buffered by the decoder.
Atomic<uint32_t> mQueuedSamples;
private:
// Flush and Drain operation, always run
virtual void ProcessFlush();
virtual void ProcessDrain();
virtual void ProcessShutdown();
const RefPtr<TaskQueue> mTaskQueue;
VDADecoder mDecoder;
const uint32_t mMaxRefFrames;
const RefPtr<layers::ImageContainer> mImageContainer;
// Increased when Input is called, and decreased when ProcessFrame runs.
// Reaching 0 indicates that there's no pending Input.
Atomic<uint32_t> mInputIncoming;
Atomic<bool> mIsShutDown;
const bool mUseSoftwareImages;
// Protects mReorderQueue.
Monitor mMonitor;
// Set on reader/decode thread calling Flush() to indicate that output is
// not required and so input samples on mTaskQueue need not be processed.
// Cleared on mTaskQueue in ProcessDrain().
Atomic<bool> mIsFlushing;
ReorderQueue mReorderQueue;
// Decoded frame will be dropped if its pts is smaller than this
// value. It shold be initialized before Input() or after Flush(). So it is
// safe to access it in OutputFrame without protecting.
Maybe<media::TimeUnit> mSeekTargetThreshold;
// Method to set up the decompression session.
nsresult InitializeSession();
// Method to pass a frame to VideoToolbox for decoding.
nsresult ProcessDecode(MediaRawData* aSample);
virtual nsresult DoDecode(MediaRawData* aSample);
CFDictionaryRef CreateDecoderSpecification();
};
} // namespace mozilla
#endif // mozilla_AppleVDADecoder_h

Просмотреть файл

@ -1,12 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Construct references to each of the VDA symbols we use.
LINK_FUNC(VDADecoderCreate)
LINK_FUNC(VDADecoderDecode)
LINK_FUNC(VDADecoderFlush)
LINK_FUNC(VDADecoderDestroy)

Просмотреть файл

@ -1,103 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <dlfcn.h>
#include "AppleVDALinker.h"
#include "nsDebug.h"
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
namespace mozilla {
AppleVDALinker::LinkStatus
AppleVDALinker::sLinkStatus = LinkStatus_INIT;
void* AppleVDALinker::sLink = nullptr;
CFStringRef AppleVDALinker::skPropWidth = nullptr;
CFStringRef AppleVDALinker::skPropHeight = nullptr;
CFStringRef AppleVDALinker::skPropSourceFormat = nullptr;
CFStringRef AppleVDALinker::skPropAVCCData = nullptr;
#define LINK_FUNC(func) typeof(func) func;
#include "AppleVDAFunctions.h"
#undef LINK_FUNC
/* static */ bool
AppleVDALinker::Link()
{
if (sLinkStatus) {
return sLinkStatus == LinkStatus_SUCCEEDED;
}
const char* dlname =
"/System/Library/Frameworks/VideoDecodeAcceleration.framework/VideoDecodeAcceleration";
if (!(sLink = dlopen(dlname, RTLD_NOW | RTLD_LOCAL))) {
NS_WARNING("Couldn't load VideoDecodeAcceleration framework");
goto fail;
}
#define LINK_FUNC(func) \
func = (typeof(func))dlsym(sLink, #func); \
if (!func) { \
NS_WARNING("Couldn't load VideoDecodeAcceleration function " #func ); \
goto fail; \
}
#include "AppleVDAFunctions.h"
#undef LINK_FUNC
skPropWidth = GetIOConst("kVDADecoderConfiguration_Width");
skPropHeight = GetIOConst("kVDADecoderConfiguration_Height");
skPropSourceFormat = GetIOConst("kVDADecoderConfiguration_SourceFormat");
skPropAVCCData = GetIOConst("kVDADecoderConfiguration_avcCData");
if (!skPropWidth || !skPropHeight || !skPropSourceFormat || !skPropAVCCData) {
goto fail;
}
LOG("Loaded VideoDecodeAcceleration framework.");
sLinkStatus = LinkStatus_SUCCEEDED;
return true;
fail:
Unlink();
sLinkStatus = LinkStatus_FAILED;
return false;
}
/* static */ void
AppleVDALinker::Unlink()
{
if (sLink) {
LOG("Unlinking VideoDecodeAcceleration framework.");
#define LINK_FUNC(func) \
func = nullptr;
#include "AppleVDAFunctions.h"
#undef LINK_FUNC
dlclose(sLink);
sLink = nullptr;
skPropWidth = nullptr;
skPropHeight = nullptr;
skPropSourceFormat = nullptr;
skPropAVCCData = nullptr;
sLinkStatus = LinkStatus_INIT;
}
}
/* static */ CFStringRef
AppleVDALinker::GetIOConst(const char* symbol)
{
CFStringRef* address = (CFStringRef*)dlsym(sLink, symbol);
if (!address) {
return nullptr;
}
return *address;
}
} // namespace mozilla

Просмотреть файл

@ -1,49 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef AppleVDALinker_h
#define AppleVDALinker_h
extern "C" {
#pragma GCC visibility push(default)
#include "VideoDecodeAcceleration/VDADecoder.h"
#pragma GCC visibility pop
}
#include "nscore.h"
namespace mozilla {
class AppleVDALinker
{
public:
static bool Link();
static void Unlink();
static CFStringRef skPropWidth;
static CFStringRef skPropHeight;
static CFStringRef skPropSourceFormat;
static CFStringRef skPropAVCCData;
private:
static void* sLink;
static nsrefcnt sRefCount;
static enum LinkStatus {
LinkStatus_INIT = 0,
LinkStatus_FAILED,
LinkStatus_SUCCEEDED
} sLinkStatus;
static CFStringRef GetIOConst(const char* symbol);
};
#define LINK_FUNC(func) extern typeof(func)* func;
#include "AppleVDAFunctions.h"
#undef LINK_FUNC
} // namespace mozilla
#endif // AppleVDALinker_h

Просмотреть файл

@ -24,11 +24,45 @@
namespace mozilla {
static uint32_t ComputeMaxRefFrames(const MediaByteBuffer* aExtraData)
{
uint32_t maxRefFrames = 4;
// Retrieve video dimensions from H264 SPS NAL.
mp4_demuxer::SPSData spsdata;
if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtraData, spsdata)) {
// max_num_ref_frames determines the size of the sliding window
// we need to queue that many frames in order to guarantee proper
// pts frames ordering. Use a minimum of 4 to ensure proper playback of
// non compliant videos.
maxRefFrames =
std::min(std::max(maxRefFrames, spsdata.max_num_ref_frames + 1), 16u);
}
return maxRefFrames;
}
AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer)
: AppleVDADecoder(aConfig, aTaskQueue, aCallback, aImageContainer)
: mExtraData(aConfig.mExtraData)
, mCallback(aCallback)
, mPictureWidth(aConfig.mImage.width)
, mPictureHeight(aConfig.mImage.height)
, mDisplayWidth(aConfig.mDisplay.width)
, mDisplayHeight(aConfig.mDisplay.height)
, mQueuedSamples(0)
, mTaskQueue(aTaskQueue)
, mMaxRefFrames(ComputeMaxRefFrames(aConfig.mExtraData))
, mImageContainer(aImageContainer)
, mInputIncoming(0)
, mIsShutDown(false)
#ifdef MOZ_WIDGET_UIKIT
, mUseSoftwareImages(true)
#else
, mUseSoftwareImages(false)
#endif
, mIsFlushing(false)
, mMonitor("AppleVideoDecoder")
, mFormat(nullptr)
, mSession(nullptr)
, mIsHardwareAccelerated(false)
@ -58,6 +92,88 @@ AppleVTDecoder::Init()
return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
}
nsresult
AppleVTDecoder::Input(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
aSample,
aSample->mTime,
aSample->mDuration,
aSample->mKeyframe ? " keyframe" : "",
aSample->Size());
mInputIncoming++;
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &AppleVTDecoder::ProcessDecode, aSample));
return NS_OK;
}
nsresult
AppleVTDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVTDecoder::ProcessFlush);
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
mIsFlushing = false;
// All ProcessDecode() tasks should be done.
MOZ_ASSERT(mInputIncoming == 0);
mSeekTargetThreshold.reset();
return NS_OK;
}
nsresult
AppleVTDecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVTDecoder::ProcessDrain);
mTaskQueue->Dispatch(runnable.forget());
return NS_OK;
}
nsresult
AppleVTDecoder::Shutdown()
{
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
mIsShutDown = true;
if (mTaskQueue) {
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVTDecoder::ProcessShutdown);
mTaskQueue->Dispatch(runnable.forget());
} else {
ProcessShutdown();
}
return NS_OK;
}
nsresult
AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
{
AssertOnTaskQueueThread();
mInputIncoming--;
if (mIsFlushing) {
return NS_OK;
}
auto rv = DoDecode(aSample);
// Ask for more data.
if (NS_SUCCEEDED(rv) && !mInputIncoming && mQueuedSamples <= mMaxRefFrames) {
LOG("%s task queue empty; requesting more data", GetDescriptionName());
mCallback->InputExhausted();
}
return rv;
}
void
AppleVTDecoder::ProcessShutdown()
{
@ -99,6 +215,40 @@ AppleVTDecoder::ProcessDrain()
mCallback->DrainComplete();
}
AppleVTDecoder::AppleFrameRef*
AppleVTDecoder::CreateAppleFrameRef(const MediaRawData* aSample)
{
MOZ_ASSERT(aSample);
return new AppleFrameRef(*aSample);
}
void
AppleVTDecoder::DrainReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mCallback->Output(mReorderQueue.Pop().get());
}
mQueuedSamples = 0;
}
void
AppleVTDecoder::ClearReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mReorderQueue.Pop();
}
mQueuedSamples = 0;
}
void
AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
mSeekTargetThreshold = Some(aTime);
}
//
// Implementation details.
//
@ -136,6 +286,157 @@ PlatformCallback(void* decompressionOutputRefCon,
decoder->OutputFrame(image, *frameRef);
}
// Copy and return a decoded frame.
nsresult
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
AppleVTDecoder::AppleFrameRef aFrameRef)
{
if (mIsShutDown || mIsFlushing) {
// We are in the process of flushing or shutting down; ignore frame.
return NS_OK;
}
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
aFrameRef.byte_offset,
aFrameRef.decode_timestamp.ToMicroseconds(),
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
aFrameRef.is_sync_point ? " keyframe" : ""
);
if (mQueuedSamples > mMaxRefFrames) {
// We had stopped requesting more input because we had received too much at
// the time. We can ask for more once again.
mCallback->InputExhausted();
}
MOZ_ASSERT(mQueuedSamples);
mQueuedSamples--;
if (!aImage) {
// Image was dropped by decoder.
return NS_OK;
}
bool useNullSample = false;
if (mSeekTargetThreshold.isSome()) {
if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
useNullSample = true;
} else {
mSeekTargetThreshold.reset();
}
}
// Where our resulting image will end up.
RefPtr<MediaData> data;
// Bounds.
VideoInfo info;
info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
gfx::IntRect visible = gfx::IntRect(0,
0,
mPictureWidth,
mPictureHeight);
if (useNullSample) {
data = new NullData(aFrameRef.byte_offset,
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds());
} else if (mUseSoftwareImages) {
size_t width = CVPixelBufferGetWidth(aImage);
size_t height = CVPixelBufferGetHeight(aImage);
DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
VideoData::YCbCrBuffer buffer;
// Lock the returned image data.
CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
if (rv != kCVReturnSuccess) {
NS_ERROR("error locking pixel data");
mCallback->Error(MediaDataDecoderError::DECODE_ERROR);
return NS_ERROR_FAILURE;
}
// Y plane.
buffer.mPlanes[0].mData =
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
buffer.mPlanes[0].mWidth = width;
buffer.mPlanes[0].mHeight = height;
buffer.mPlanes[0].mOffset = 0;
buffer.mPlanes[0].mSkip = 0;
// Cb plane.
buffer.mPlanes[1].mData =
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
buffer.mPlanes[1].mWidth = (width+1) / 2;
buffer.mPlanes[1].mHeight = (height+1) / 2;
buffer.mPlanes[1].mOffset = 0;
buffer.mPlanes[1].mSkip = 1;
// Cr plane.
buffer.mPlanes[2].mData =
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
buffer.mPlanes[2].mWidth = (width+1) / 2;
buffer.mPlanes[2].mHeight = (height+1) / 2;
buffer.mPlanes[2].mOffset = 1;
buffer.mPlanes[2].mSkip = 1;
// Copy the image data into our own format.
data =
VideoData::Create(info,
mImageContainer,
nullptr,
aFrameRef.byte_offset,
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
buffer,
aFrameRef.is_sync_point,
aFrameRef.decode_timestamp.ToMicroseconds(),
visible);
// Unlock the returned image data.
CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
} else {
#ifndef MOZ_WIDGET_UIKIT
IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
data =
VideoData::CreateFromImage(info,
mImageContainer,
aFrameRef.byte_offset,
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
image.forget(),
aFrameRef.is_sync_point,
aFrameRef.decode_timestamp.ToMicroseconds(),
visible);
#else
MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
}
if (!data) {
NS_ERROR("Couldn't create VideoData for frame");
mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
return NS_ERROR_FAILURE;
}
// Frames come out in DTS order but we need to output them
// in composition order.
MonitorAutoLock mon(mMonitor);
mReorderQueue.Push(data);
while (mReorderQueue.Length() > mMaxRefFrames) {
mCallback->Output(mReorderQueue.Pop().get());
}
LOG("%llu decoded frames queued",
static_cast<unsigned long long>(mReorderQueue.Length()));
return NS_OK;
}
nsresult
AppleVTDecoder::WaitForAsynchronousFrames()
{
@ -339,4 +640,71 @@ AppleVTDecoder::CreateDecoderSpecification()
&kCFTypeDictionaryValueCallBacks);
}
CFDictionaryRef
AppleVTDecoder::CreateOutputConfiguration()
{
if (mUseSoftwareImages) {
// Output format type:
SInt32 PixelFormatTypeValue =
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&PixelFormatTypeValue);
const void* outputKeys[] = { kCVPixelBufferPixelFormatTypeKey };
const void* outputValues[] = { PixelFormatTypeNumber };
static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
"Non matching keys/values array size");
return CFDictionaryCreate(kCFAllocatorDefault,
outputKeys,
outputValues,
ArrayLength(outputKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
}
#ifndef MOZ_WIDGET_UIKIT
// Output format type:
SInt32 PixelFormatTypeValue = kCVPixelFormatType_422YpCbCr8;
AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&PixelFormatTypeValue);
// Construct IOSurface Properties
const void* IOSurfaceKeys[] = { MacIOSurfaceLib::kPropIsGlobal };
const void* IOSurfaceValues[] = { kCFBooleanTrue };
static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
"Non matching keys/values array size");
// Contruct output configuration.
AutoCFRelease<CFDictionaryRef> IOSurfaceProperties =
CFDictionaryCreate(kCFAllocatorDefault,
IOSurfaceKeys,
IOSurfaceValues,
ArrayLength(IOSurfaceKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
const void* outputKeys[] = { kCVPixelBufferIOSurfacePropertiesKey,
kCVPixelBufferPixelFormatTypeKey,
kCVPixelBufferOpenGLCompatibilityKey };
const void* outputValues[] = { IOSurfaceProperties,
PixelFormatTypeNumber,
kCFBooleanTrue };
static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
"Non matching keys/values array size");
return CFDictionaryCreate(kCFAllocatorDefault,
outputKeys,
outputValues,
ArrayLength(outputKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
#else
MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
}
} // namespace mozilla

Просмотреть файл

@ -7,20 +7,48 @@
#ifndef mozilla_AppleVTDecoder_h
#define mozilla_AppleVTDecoder_h
#include "AppleVDADecoder.h"
#include "PlatformDecoderModule.h"
#include "mozilla/Atomics.h"
#include "nsIThread.h"
#include "ReorderQueue.h"
#include "TimeUnits.h"
#include "VideoToolbox/VideoToolbox.h"
namespace mozilla {
class AppleVTDecoder : public AppleVDADecoder {
class AppleVTDecoder : public MediaDataDecoder {
public:
AppleVTDecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer);
class AppleFrameRef {
public:
media::TimeUnit decode_timestamp;
media::TimeUnit composition_timestamp;
media::TimeUnit duration;
int64_t byte_offset;
bool is_sync_point;
explicit AppleFrameRef(const MediaRawData& aSample)
: decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
, duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
, byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe)
{
}
};
RefPtr<InitPromise> Init() override;
nsresult Input(MediaRawData* aSample) override;
nsresult Flush() override;
nsresult Drain() override;
nsresult Shutdown() override;
void SetSeekThreshold(const media::TimeUnit& aTime) override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override
{
return mIsHardwareAccelerated;
@ -33,22 +61,71 @@ public:
: "apple software VT decoder";
}
// Access from the taskqueue and the decoder's thread.
// OutputFrame is thread-safe.
nsresult OutputFrame(CVPixelBufferRef aImage,
AppleFrameRef aFrameRef);
private:
virtual ~AppleVTDecoder();
void ProcessFlush() override;
void ProcessDrain() override;
void ProcessShutdown() override;
void ProcessFlush();
void ProcessDrain();
void ProcessShutdown();
nsresult ProcessDecode(MediaRawData* aSample);
CMVideoFormatDescriptionRef mFormat;
VTDecompressionSessionRef mSession;
void AssertOnTaskQueueThread()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
}
AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
void DrainReorderedFrames();
void ClearReorderedFrames();
CFDictionaryRef CreateOutputConfiguration();
const RefPtr<MediaByteBuffer> mExtraData;
MediaDataDecoderCallback* mCallback;
const uint32_t mPictureWidth;
const uint32_t mPictureHeight;
const uint32_t mDisplayWidth;
const uint32_t mDisplayHeight;
// Number of times a sample was queued via Input(). Will be decreased upon
// the decoder's callback being invoked.
// This is used to calculate how many frames has been buffered by the decoder.
Atomic<uint32_t> mQueuedSamples;
// Method to pass a frame to VideoToolbox for decoding.
nsresult DoDecode(MediaRawData* aSample) override;
// Method to set up the decompression session.
nsresult InitializeSession();
nsresult WaitForAsynchronousFrames();
CFDictionaryRef CreateDecoderSpecification();
CFDictionaryRef CreateDecoderExtensions();
// Method to pass a frame to VideoToolbox for decoding.
nsresult DoDecode(MediaRawData* aSample);
const RefPtr<TaskQueue> mTaskQueue;
const uint32_t mMaxRefFrames;
const RefPtr<layers::ImageContainer> mImageContainer;
// Increased when Input is called, and decreased when ProcessFrame runs.
// Reaching 0 indicates that there's no pending Input.
Atomic<uint32_t> mInputIncoming;
Atomic<bool> mIsShutDown;
const bool mUseSoftwareImages;
// Set on reader/decode thread calling Flush() to indicate that output is
// not required and so input samples on mTaskQueue need not be processed.
// Cleared on mTaskQueue in ProcessDrain().
Atomic<bool> mIsFlushing;
// Protects mReorderQueue.
Monitor mMonitor;
ReorderQueue mReorderQueue;
// Decoded frame will be dropped if its pts is smaller than this
// value. It shold be initialized before Input() or after Flush(). So it is
// safe to access it in OutputFrame without protecting.
Maybe<media::TimeUnit> mSeekTargetThreshold;
CMVideoFormatDescriptionRef mFormat;
VTDecompressionSessionRef mSession;
Atomic<bool> mIsHardwareAccelerated;
};

Просмотреть файл

@ -1,63 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Stub header for VideoDecodeAcceleration framework API.
// This is a private Framework on 10.6 see:
// https://developer.apple.com/library/mac/technotes/tn2267/_index.html
// We include our own copy so we can build on MacOS versions
// where it's not available.
#ifndef mozilla_VideoDecodeAcceleration_VDADecoder_h
#define mozilla_VideoDecodeAcceleration_VDADecoder_h
#include <CoreFoundation/CoreFoundation.h>
#include <CoreVideo/CoreVideo.h>
typedef uint32_t VDADecodeFrameFlags;
typedef uint32_t VDADecodeInfoFlags;
enum {
kVDADecodeInfo_Asynchronous = 1UL << 0,
kVDADecodeInfo_FrameDropped = 1UL << 1
};
enum {
kVDADecoderFlush_EmitFrames = 1 << 0
};
typedef struct OpaqueVDADecoder* VDADecoder;
typedef void (*VDADecoderOutputCallback)
(void* decompressionOutputRefCon,
CFDictionaryRef frameInfo,
OSStatus status,
uint32_t infoFlags,
CVImageBufferRef imageBuffer);
OSStatus
VDADecoderCreate(
CFDictionaryRef decoderConfiguration,
CFDictionaryRef destinationImageBufferAttributes, /* can be NULL */
VDADecoderOutputCallback* outputCallback,
void* decoderOutputCallbackRefcon,
VDADecoder* decoderOut);
OSStatus
VDADecoderDecode(
VDADecoder decoder,
uint32_t decodeFlags,
CFTypeRef compressedBuffer,
CFDictionaryRef frameInfo); /* can be NULL */
OSStatus
VDADecoderFlush(
VDADecoder decoder,
uint32_t flushFlags);
OSStatus
VDADecoderDestroy(VDADecoder decoder);
#endif // mozilla_VideoDecodeAcceleration_VDADecoder_h

Просмотреть файл

@ -63,8 +63,6 @@ if CONFIG['MOZ_APPLEMEDIA']:
'apple/AppleATDecoder.cpp',
'apple/AppleCMLinker.cpp',
'apple/AppleDecoderModule.cpp',
'apple/AppleVDADecoder.cpp',
'apple/AppleVDALinker.cpp',
'apple/AppleVTDecoder.cpp',
'apple/AppleVTLinker.cpp',
]