зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1059066 - Avoid picture frame copy and keep picture in GPU by using IOSurface objects. r=rillian
This commit is contained in:
Родитель
71e164f7e9
Коммит
1dc6df2eed
|
@ -6,17 +6,19 @@
|
||||||
|
|
||||||
#include <CoreFoundation/CFString.h>
|
#include <CoreFoundation/CFString.h>
|
||||||
|
|
||||||
|
#include "AppleCMLinker.h"
|
||||||
#include "AppleUtils.h"
|
#include "AppleUtils.h"
|
||||||
|
#include "AppleVTDecoder.h"
|
||||||
|
#include "AppleVTLinker.h"
|
||||||
#include "mp4_demuxer/DecoderData.h"
|
#include "mp4_demuxer/DecoderData.h"
|
||||||
#include "MP4Reader.h"
|
#include "MP4Reader.h"
|
||||||
#include "MP4Decoder.h"
|
#include "MP4Decoder.h"
|
||||||
|
#include "MediaData.h"
|
||||||
|
#include "MacIOSurfaceImage.h"
|
||||||
|
#include "mozilla/ArrayUtils.h"
|
||||||
#include "nsAutoPtr.h"
|
#include "nsAutoPtr.h"
|
||||||
#include "nsThreadUtils.h"
|
#include "nsThreadUtils.h"
|
||||||
#include "AppleCMLinker.h"
|
|
||||||
#include "AppleVTDecoder.h"
|
|
||||||
#include "AppleVTLinker.h"
|
|
||||||
#include "prlog.h"
|
#include "prlog.h"
|
||||||
#include "MediaData.h"
|
|
||||||
#include "VideoUtils.h"
|
#include "VideoUtils.h"
|
||||||
|
|
||||||
#ifdef PR_LOGGING
|
#ifdef PR_LOGGING
|
||||||
|
@ -238,81 +240,34 @@ nsresult
|
||||||
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
|
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
|
||||||
nsAutoPtr<FrameRef> aFrameRef)
|
nsAutoPtr<FrameRef> aFrameRef)
|
||||||
{
|
{
|
||||||
size_t width = CVPixelBufferGetWidth(aImage);
|
IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
|
||||||
size_t height = CVPixelBufferGetHeight(aImage);
|
MOZ_ASSERT(surface, "VideoToolbox didn't return an IOSurface backed buffer");
|
||||||
LOG(" got decoded frame data... %ux%u %s", width, height,
|
|
||||||
CVPixelBufferIsPlanar(aImage) ? "planar" : "chunked");
|
|
||||||
#ifdef DEBUG
|
|
||||||
size_t planes = CVPixelBufferGetPlaneCount(aImage);
|
|
||||||
for (size_t i = 0; i < planes; ++i) {
|
|
||||||
size_t stride = CVPixelBufferGetBytesPerRowOfPlane(aImage, i);
|
|
||||||
LOG(" plane %u %ux%u rowbytes %u",
|
|
||||||
(unsigned)i,
|
|
||||||
CVPixelBufferGetWidthOfPlane(aImage, i),
|
|
||||||
CVPixelBufferGetHeightOfPlane(aImage, i),
|
|
||||||
(unsigned)stride);
|
|
||||||
}
|
|
||||||
MOZ_ASSERT(planes == 2);
|
|
||||||
#endif // DEBUG
|
|
||||||
|
|
||||||
VideoData::YCbCrBuffer buffer;
|
|
||||||
|
|
||||||
// Lock the returned image data.
|
|
||||||
CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
|
|
||||||
if (rv != kCVReturnSuccess) {
|
|
||||||
NS_ERROR("error locking pixel data");
|
|
||||||
mCallback->Error();
|
|
||||||
return NS_ERROR_FAILURE;
|
|
||||||
}
|
|
||||||
// Y plane.
|
|
||||||
buffer.mPlanes[0].mData =
|
|
||||||
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
|
|
||||||
buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
|
|
||||||
buffer.mPlanes[0].mWidth = width;
|
|
||||||
buffer.mPlanes[0].mHeight = height;
|
|
||||||
buffer.mPlanes[0].mOffset = 0;
|
|
||||||
buffer.mPlanes[0].mSkip = 0;
|
|
||||||
// Cb plane.
|
|
||||||
buffer.mPlanes[1].mData =
|
|
||||||
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
|
|
||||||
buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
|
|
||||||
buffer.mPlanes[1].mWidth = (width+1) / 2;
|
|
||||||
buffer.mPlanes[1].mHeight = (height+1) / 2;
|
|
||||||
buffer.mPlanes[1].mOffset = 0;
|
|
||||||
buffer.mPlanes[1].mSkip = 1;
|
|
||||||
// Cr plane.
|
|
||||||
buffer.mPlanes[2].mData =
|
|
||||||
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
|
|
||||||
buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
|
|
||||||
buffer.mPlanes[2].mWidth = (width+1) / 2;
|
|
||||||
buffer.mPlanes[2].mHeight = (height+1) / 2;
|
|
||||||
buffer.mPlanes[2].mOffset = 1;
|
|
||||||
buffer.mPlanes[2].mSkip = 1;
|
|
||||||
|
|
||||||
|
nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
|
||||||
// Bounds.
|
// Bounds.
|
||||||
VideoInfo info;
|
VideoInfo info;
|
||||||
info.mDisplay = nsIntSize(width, height);
|
info.mDisplay = nsIntSize(macSurface->GetWidth(), macSurface->GetHeight());
|
||||||
info.mHasVideo = true;
|
info.mHasVideo = true;
|
||||||
gfx::IntRect visible = gfx::IntRect(0,
|
gfx::IntRect visible = gfx::IntRect(0,
|
||||||
0,
|
0,
|
||||||
mConfig.display_width,
|
mConfig.display_width,
|
||||||
mConfig.display_height);
|
mConfig.display_height);
|
||||||
|
|
||||||
// Copy the image data into our own format.
|
nsRefPtr<layers::Image> image =
|
||||||
|
mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE);
|
||||||
|
layers::MacIOSurfaceImage* videoImage =
|
||||||
|
static_cast<layers::MacIOSurfaceImage*>(image.get());
|
||||||
|
videoImage->SetSurface(macSurface);
|
||||||
|
|
||||||
nsAutoPtr<VideoData> data;
|
nsAutoPtr<VideoData> data;
|
||||||
data =
|
data = VideoData::CreateFromImage(info,
|
||||||
VideoData::Create(info,
|
mImageContainer,
|
||||||
mImageContainer,
|
aFrameRef->byte_offset,
|
||||||
nullptr,
|
aFrameRef->composition_timestamp,
|
||||||
aFrameRef->byte_offset,
|
aFrameRef->duration, image.forget(),
|
||||||
aFrameRef->composition_timestamp,
|
aFrameRef->is_sync_point,
|
||||||
aFrameRef->duration,
|
aFrameRef->decode_timestamp,
|
||||||
buffer,
|
visible);
|
||||||
aFrameRef->is_sync_point,
|
|
||||||
aFrameRef->decode_timestamp,
|
|
||||||
visible);
|
|
||||||
// Unlock the returned image data.
|
|
||||||
CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
|
|
||||||
|
|
||||||
if (!data) {
|
if (!data) {
|
||||||
NS_ERROR("Couldn't create VideoData for frame");
|
NS_ERROR("Couldn't create VideoData for frame");
|
||||||
|
@ -447,11 +402,38 @@ AppleVTDecoder::InitializeSession()
|
||||||
// Contruct video decoder selection spec.
|
// Contruct video decoder selection spec.
|
||||||
AutoCFRelease<CFDictionaryRef> spec = CreateDecoderSpecification();
|
AutoCFRelease<CFDictionaryRef> spec = CreateDecoderSpecification();
|
||||||
|
|
||||||
|
// Contruct output configuration.
|
||||||
|
AutoCFRelease<CFDictionaryRef> IOSurfaceProperties =
|
||||||
|
CFDictionaryCreate(NULL,
|
||||||
|
NULL,
|
||||||
|
NULL,
|
||||||
|
0,
|
||||||
|
&kCFTypeDictionaryKeyCallBacks,
|
||||||
|
&kCFTypeDictionaryValueCallBacks);
|
||||||
|
|
||||||
|
SInt32 PixelFormatTypeValue = kCVPixelFormatType_32BGRA;
|
||||||
|
AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
|
||||||
|
CFNumberCreate(NULL, kCFNumberSInt32Type, &PixelFormatTypeValue);
|
||||||
|
|
||||||
|
const void* outputKeys[] = { kCVPixelBufferIOSurfacePropertiesKey,
|
||||||
|
kCVPixelBufferPixelFormatTypeKey,
|
||||||
|
kCVPixelBufferOpenGLCompatibilityKey };
|
||||||
|
const void* outputValues[] = { IOSurfaceProperties,
|
||||||
|
PixelFormatTypeNumber,
|
||||||
|
kCFBooleanTrue };
|
||||||
|
AutoCFRelease<CFDictionaryRef> outputConfiguration =
|
||||||
|
CFDictionaryCreate(NULL,
|
||||||
|
outputKeys,
|
||||||
|
outputValues,
|
||||||
|
ArrayLength(outputKeys),
|
||||||
|
&kCFTypeDictionaryKeyCallBacks,
|
||||||
|
&kCFTypeDictionaryValueCallBacks);
|
||||||
|
|
||||||
VTDecompressionOutputCallbackRecord cb = { PlatformCallback, this };
|
VTDecompressionOutputCallbackRecord cb = { PlatformCallback, this };
|
||||||
rv = VTDecompressionSessionCreate(NULL, // Allocator.
|
rv = VTDecompressionSessionCreate(NULL, // Allocator.
|
||||||
mFormat,
|
mFormat,
|
||||||
spec, // Video decoder selection.
|
spec, // Video decoder selection.
|
||||||
NULL, // Output video format.
|
outputConfiguration, // Output video format.
|
||||||
&cb,
|
&cb,
|
||||||
&mSession);
|
&mSession);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче