Backed out 4 changesets (bug 1713276) for bustages on FFmpegVideoDecoder.cpp . CLOSED TREE

Backed out changeset 12a9f3fad481 (bug 1713276)
Backed out changeset cdc8216b3f18 (bug 1713276)
Backed out changeset b23498172031 (bug 1713276)
Backed out changeset 13ce7037ff27 (bug 1713276)
This commit is contained in:
Narcis Beleuzu 2022-01-20 07:26:12 +02:00
Родитель a68df3aa20
Коммит c88b1a661d
17 изменённых файлов: 33 добавлений и 598 удалений

Просмотреть файл

@ -142,14 +142,11 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(av_parser_init, AV_FUNC_AVCODEC_ALL)
AV_FUNC(av_parser_close, AV_FUNC_AVCODEC_ALL)
AV_FUNC(av_parser_parse2, AV_FUNC_AVCODEC_ALL)
AV_FUNC(avcodec_align_dimensions, AV_FUNC_AVCODEC_ALL)
AV_FUNC(avcodec_alloc_frame, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_get_frame_defaults, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_free_frame, AV_FUNC_54)
AV_FUNC(avcodec_send_packet, AV_FUNC_58)
AV_FUNC(avcodec_receive_frame, AV_FUNC_58)
AV_FUNC(avcodec_default_get_buffer2,
(AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58))
AV_FUNC_OPTION(av_rdft_init, AV_FUNC_AVCODEC_ALL)
AV_FUNC_OPTION(av_rdft_calc, AV_FUNC_AVCODEC_ALL)
AV_FUNC_OPTION(av_rdft_end, AV_FUNC_AVCODEC_ALL)
@ -162,15 +159,8 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58))
AV_FUNC(av_frame_unref, (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 |
AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58))
AV_FUNC(av_image_check_size, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_image_get_buffer_size, AV_FUNC_AVUTIL_ALL)
AV_FUNC_OPTION(av_buffer_get_opaque,
(AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58))
AV_FUNC(av_buffer_create, (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 |
AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58))
AV_FUNC_OPTION(av_frame_get_colorspace, AV_FUNC_AVUTIL_ALL)
AV_FUNC_OPTION(av_frame_get_color_range, AV_FUNC_AVUTIL_ALL)
#ifdef MOZ_WAYLAND
AV_FUNC_OPTION_SILENT(avcodec_get_hw_config, AV_FUNC_58)
AV_FUNC_OPTION_SILENT(av_codec_iterate, AV_FUNC_58)

Просмотреть файл

@ -18,10 +18,10 @@ struct AVCodecParserContext;
struct PRLibrary;
#ifdef MOZ_WAYLAND
struct AVCodecHWConfig;
struct AVBufferRef;
struct AVVAAPIHWConfig;
struct AVHWFramesConstraints;
#endif
struct AVBufferRef;
namespace mozilla {
@ -88,19 +88,13 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
int64_t dts, int64_t pos);
AVCodec* (*av_codec_iterate)(void** opaque);
int (*av_codec_is_decoder)(const AVCodec* codec);
void (*avcodec_align_dimensions)(AVCodecContext* s, int* width, int* height);
// only used in libavcodec <= 54
AVFrame* (*avcodec_alloc_frame)();
void (*avcodec_get_frame_defaults)(AVFrame* pic);
// libavcodec v54 only
void (*avcodec_free_frame)(AVFrame** frame);
// libavcodec >= v55
int (*avcodec_default_get_buffer2)(AVCodecContext* s, AVFrame* frame,
int flags);
// libavcodec v58 and later only
int (*avcodec_send_packet)(AVCodecContext* avctx, const AVPacket* avpkt);
int (*avcodec_receive_frame)(AVCodecContext* avctx, AVFrame* frame);
@ -114,21 +108,11 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
void (*av_log_set_level)(int level);
void* (*av_malloc)(size_t size);
void (*av_freep)(void* ptr);
int (*av_image_check_size)(unsigned int w, unsigned int h, int log_offset,
void* log_ctx);
int (*av_image_get_buffer_size)(int pix_fmt, int width, int height,
int align);
// libavutil v55 and later only
AVFrame* (*av_frame_alloc)();
void (*av_frame_free)(AVFrame** frame);
void (*av_frame_unref)(AVFrame* frame);
AVBufferRef* (*av_buffer_create)(uint8_t* data, int size,
void (*free)(void* opaque, uint8_t* data),
void* opaque, int flags);
// libavutil >= v56
void* (*av_buffer_get_opaque)(const AVBufferRef* buf);
// libavutil optional
int (*av_frame_get_colorspace)(const AVFrame* frame);

Просмотреть файл

@ -17,7 +17,4 @@
MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("FFMPEG: " str, ##__VA_ARGS__))
#endif
#define FFMPEG_LOGV(...) \
MOZ_LOG(sPDMLog, mozilla::LogLevel::Verbose, (__VA_ARGS__))
#endif // __FFmpegLog_h__

Просмотреть файл

@ -16,9 +16,6 @@
#if defined(MOZ_AV1) && defined(FFVPX_VERSION) && defined(MOZ_WAYLAND)
# include "AOMDecoder.h"
#endif
#if LIBAVCODEC_VERSION_MAJOR >= 57
# include "mozilla/layers/TextureClient.h"
#endif
#ifdef MOZ_WAYLAND_USE_VAAPI
# include "H264.h"
# include "mozilla/layers/DMABUFSurfaceImage.h"
@ -40,6 +37,7 @@
# define AV_PIX_FMT_NONE PIX_FMT_NONE
#endif
#include "mozilla/PodOperations.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/TaskQueue.h"
#include "nsThreadUtils.h"
@ -56,10 +54,6 @@ typedef int VAStatus;
// Use some extra HW frames for potential rendering lags.
#define EXTRA_HW_FRAMES 6
#if LIBAVCODEC_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MAJOR >= 56
# define CUSTOMIZED_BUFFER_ALLOCATION 1
#endif
typedef mozilla::layers::Image Image;
typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
@ -408,13 +402,6 @@ FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(
#endif
}
FFmpegVideoDecoder<LIBAV_VER>::~FFmpegVideoDecoder() {
#ifdef CUSTOMIZED_BUFFER_ALLOCATION
MOZ_DIAGNOSTIC_ASSERT(mAllocatedImages.IsEmpty(),
"Should release all shmem buffers before destroy!");
#endif
}
RefPtr<MediaDataDecoder::InitPromise> FFmpegVideoDecoder<LIBAV_VER>::Init() {
MediaResult rv;
@ -436,296 +423,6 @@ RefPtr<MediaDataDecoder::InitPromise> FFmpegVideoDecoder<LIBAV_VER>::Init() {
return InitPromise::CreateAndReject(rv, __func__);
}
#ifdef CUSTOMIZED_BUFFER_ALLOCATION
static int GetVideoBufferWrapper(struct AVCodecContext* aCodecContext,
AVFrame* aFrame, int aFlags) {
auto* decoder =
static_cast<FFmpegVideoDecoder<LIBAV_VER>*>(aCodecContext->opaque);
int rv = decoder->GetVideoBuffer(aCodecContext, aFrame, aFlags);
return rv < 0 ? decoder->GetVideoBufferDefault(aCodecContext, aFrame, aFlags)
: rv;
}
static void ReleaseVideoBufferWrapper(void* opaque, uint8_t* data) {
if (opaque) {
FFMPEG_LOGV("ReleaseVideoBufferWrapper: PlanarYCbCrImage=%p", opaque);
RefPtr<ImageBufferWrapper> image = static_cast<ImageBufferWrapper*>(opaque);
image->ReleaseBuffer();
}
}
static gfx::YUVColorSpace TransferAVColorSpaceToYUVColorSpace(
AVColorSpace aSpace) {
switch (aSpace) {
case AVCOL_SPC_BT2020_NCL:
case AVCOL_SPC_BT2020_CL:
return gfx::YUVColorSpace::BT2020;
case AVCOL_SPC_BT709:
return gfx::YUVColorSpace::BT709;
case AVCOL_SPC_SMPTE170M:
case AVCOL_SPC_BT470BG:
return gfx::YUVColorSpace::BT601;
default:
return gfx::YUVColorSpace::Default;
}
}
static bool IsColorFormatSupportedForUsingCustomizedBuffer(
const AVPixelFormat& aFormat) {
// For now, we only support for YUV420P, YUVJ420P and YUV444 which are the
// only non-HW accelerated format supported by FFmpeg's H264 and VP9 decoder.
return aFormat == AV_PIX_FMT_YUV420P || aFormat == AV_PIX_FMT_YUVJ420P ||
aFormat == AV_PIX_FMT_YUV420P10LE ||
aFormat == AV_PIX_FMT_YUV420P12LE || aFormat == AV_PIX_FMT_YUV444P ||
aFormat == AV_PIX_FMT_YUV444P10LE || aFormat == AV_PIX_FMT_YUV444P12LE;
}
static gfx::ColorDepth GetColorDepth(const AVPixelFormat& aFormat) {
switch (aFormat) {
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUVJ420P:
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV444P:
return gfx::ColorDepth::COLOR_8;
case AV_PIX_FMT_YUV420P10LE:
case AV_PIX_FMT_YUV422P10LE:
case AV_PIX_FMT_YUV444P10LE:
return gfx::ColorDepth::COLOR_10;
case AV_PIX_FMT_YUV420P12LE:
case AV_PIX_FMT_YUV422P12LE:
case AV_PIX_FMT_YUV444P12LE:
return gfx::ColorDepth::COLOR_12;
default:
MOZ_ASSERT_UNREACHABLE("Not supported format?");
return gfx::ColorDepth::COLOR_8;
}
}
static bool IsYUV420Sampling(const AVPixelFormat& aFormat) {
return aFormat == AV_PIX_FMT_YUV420P || aFormat == AV_PIX_FMT_YUVJ420P ||
aFormat == AV_PIX_FMT_YUV420P10LE || aFormat == AV_PIX_FMT_YUV420P12LE;
}
layers::TextureClient*
FFmpegVideoDecoder<LIBAV_VER>::AllocateTextueClientForImage(
struct AVCodecContext* aCodecContext, PlanarYCbCrImage* aImage) {
layers::PlanarYCbCrData data =
CreateEmptyPlanarYCbCrData(aCodecContext, mInfo);
// Allocate a shmem buffer for image.
if (!aImage->CreateEmptyBuffer(data)) {
return nullptr;
}
return aImage->GetTextureClient(mImageAllocator);
}
layers::PlanarYCbCrData
FFmpegVideoDecoder<LIBAV_VER>::CreateEmptyPlanarYCbCrData(
struct AVCodecContext* aCodecContext, const VideoInfo& aInfo) {
MOZ_ASSERT(
IsColorFormatSupportedForUsingCustomizedBuffer(aCodecContext->pix_fmt));
// FFmpeg will store images with color depth > 8 bits in 16 bits with extra
// padding.
const int32_t bytesPerChannel =
GetColorDepth(aCodecContext->pix_fmt) == gfx::ColorDepth::COLOR_8 ? 1 : 2;
// If adjusted Ysize is larger than the actual image size (coded_width *
// coded_height), that means ffmpeg decoder needs extra padding on both width
// and height. If that happens, the planes will need to be cropped later in
// order to avoid visible incorrect border on the right and bottom of the
// actual image.
//
// Here are examples of various sizes video in YUV420P format, the width and
// height would need to be adjusted in order to align padding.
//
// Eg1. video (1920*1080)
// plane Y
// width 1920 height 1080 -> adjusted-width 1920 adjusted-height 1088
// plane Cb/Cr
// width 960 height 540 -> adjusted-width 1024 adjusted-height 544
//
// Eg2. video (2560*1440)
// plane Y
// width 2560 height 1440 -> adjusted-width 2560 adjusted-height 1440
// plane Cb/Cr
// width 1280 height 720 -> adjusted-width 1280 adjusted-height 736
layers::PlanarYCbCrData data;
auto paddedYSize =
gfx::IntSize{aCodecContext->coded_width, aCodecContext->coded_height};
mLib->avcodec_align_dimensions(aCodecContext, &paddedYSize.width,
&paddedYSize.height);
data.mYSize = gfx::IntSize{paddedYSize.Width(), paddedYSize.Height()};
data.mYStride = data.mYSize.Width() * bytesPerChannel;
data.mCroppedYSize = Some(
gfx::IntSize{aCodecContext->coded_width, aCodecContext->coded_height});
MOZ_ASSERT(
IsColorFormatSupportedForUsingCustomizedBuffer(aCodecContext->pix_fmt));
const auto yDims =
gfx::IntSize{aCodecContext->coded_width, aCodecContext->coded_height};
auto uvDims = yDims;
if (IsYUV420Sampling(aCodecContext->pix_fmt)) {
uvDims.width = (uvDims.width + 1) / 2;
uvDims.height = (uvDims.height + 1) / 2;
}
auto paddedCbCrSize = uvDims;
mLib->avcodec_align_dimensions(aCodecContext, &paddedCbCrSize.width,
&paddedCbCrSize.height);
data.mCbCrSize =
gfx::IntSize{paddedCbCrSize.Width(), paddedCbCrSize.Height()};
data.mCbCrStride = data.mCbCrSize.Width() * bytesPerChannel;
data.mCroppedCbCrSize = Some(gfx::IntSize{uvDims.Width(), uvDims.Height()});
// Setting other attributes
data.mPicSize =
gfx::IntSize{aCodecContext->coded_width, aCodecContext->coded_height};
const gfx::IntRect picture =
aInfo.ScaledImageRect(data.mPicSize.Width(), data.mPicSize.Height());
data.mPicX = picture.x;
data.mPicY = picture.y;
data.mStereoMode = aInfo.mStereoMode;
if (aCodecContext->colorspace != AVCOL_SPC_UNSPECIFIED) {
data.mYUVColorSpace =
TransferAVColorSpaceToYUVColorSpace(aCodecContext->colorspace);
} else {
data.mYUVColorSpace = aInfo.mColorSpace ? *aInfo.mColorSpace
: DefaultColorSpace(data.mPicSize);
}
data.mColorDepth = GetColorDepth(aCodecContext->pix_fmt);
data.mColorRange = aCodecContext->color_range == AVCOL_RANGE_JPEG
? gfx::ColorRange::FULL
: gfx::ColorRange::LIMITED;
FFMPEG_LOGV(
"Created plane data, YSize=(%d, %d), CbCrSize=(%d, %d), "
"CroppedYSize=(%d, %d), CroppedCbCrSize=(%d, %d), ColorDepth=%hhu",
data.mYSize.Width(), data.mYSize.Height(), data.mCbCrSize.Width(),
data.mCbCrSize.Height(), data.mCroppedYSize->Width(),
data.mCroppedYSize->Height(), data.mCroppedCbCrSize->Width(),
data.mCroppedCbCrSize->Height(), data.mColorDepth);
return data;
}
int FFmpegVideoDecoder<LIBAV_VER>::GetVideoBuffer(
struct AVCodecContext* aCodecContext, AVFrame* aFrame, int aFlags) {
FFMPEG_LOGV("GetVideoBuffer: aCodecContext=%p aFrame=%p", aCodecContext,
aFrame);
if (!StaticPrefs::media_ffmpeg_customized_buffer_allocation()) {
return AVERROR(EINVAL);
}
if (mIsUsingShmemBufferForDecode && !*mIsUsingShmemBufferForDecode) {
return AVERROR(EINVAL);
}
// Codec doesn't support custom allocator.
if (!(aCodecContext->codec->capabilities & AV_CODEC_CAP_DR1)) {
return AVERROR(EINVAL);
}
// Pre-allocation is only for sw decoding. During decoding, ffmpeg decoder
// will need to reference decoded frames, if those frames are on shmem buffer,
// then it would cause a need to read CPU data from GPU, which is slow.
if (IsHardwareAccelerated()) {
return AVERROR(EINVAL);
}
# ifdef MOZ_WAYLAND_USE_VAAPI
// For SW decoding + DMABuf case, it's the opposite from the above case, we
// don't want to access GPU data too frequently from CPU.
if (mUseDMABufSurfaces) {
return AVERROR(EINVAL);
}
# endif
if (!IsColorFormatSupportedForUsingCustomizedBuffer(aCodecContext->pix_fmt)) {
FFMPEG_LOG("Not support color format %d", aCodecContext->pix_fmt);
return AVERROR(EINVAL);
}
if (aCodecContext->lowres != 0) {
FFMPEG_LOG("Not support low resolution decoding");
return AVERROR(EINVAL);
}
const gfx::IntSize size(aCodecContext->width, aCodecContext->height);
int rv = mLib->av_image_check_size(size.Width(), size.Height(), 0, nullptr);
if (rv < 0) {
FFMPEG_LOG("Invalid image size");
return rv;
}
CheckedInt32 dataSize = mLib->av_image_get_buffer_size(
aCodecContext->pix_fmt, aCodecContext->coded_width,
aCodecContext->coded_height, 16);
if (!dataSize.isValid()) {
FFMPEG_LOG("Data size overflow!");
return AVERROR(EINVAL);
}
if (!mImageContainer) {
FFMPEG_LOG("No Image container!");
return AVERROR(EINVAL);
}
RefPtr<PlanarYCbCrImage> image = mImageContainer->CreatePlanarYCbCrImage();
if (!image) {
FFMPEG_LOG("Failed to create YCbCr image");
return AVERROR(EINVAL);
}
RefPtr<layers::TextureClient> texture =
AllocateTextueClientForImage(aCodecContext, image);
if (!texture) {
FFMPEG_LOG("Failed to allocate a texture client");
return AVERROR(EINVAL);
}
if (!texture->Lock(layers::OpenMode::OPEN_WRITE)) {
FFMPEG_LOG("Failed to lock the texture");
return AVERROR(EINVAL);
}
layers::MappedYCbCrTextureData mapped;
if (!texture->BorrowMappedYCbCrData(mapped)) {
FFMPEG_LOG("Failed to borrow mapped data for the texture");
texture->Unlock();
return AVERROR(EINVAL);
}
aFrame->data[0] = mapped.y.data;
aFrame->data[1] = mapped.cb.data;
aFrame->data[2] = mapped.cr.data;
aFrame->linesize[0] = mapped.y.stride;
aFrame->linesize[1] = mapped.cb.stride;
aFrame->linesize[2] = mapped.cr.stride;
aFrame->width = aCodecContext->coded_width;
aFrame->height = aCodecContext->coded_height;
aFrame->format = aCodecContext->pix_fmt;
aFrame->extended_data = aFrame->data;
aFrame->reordered_opaque = aCodecContext->reordered_opaque;
MOZ_ASSERT(aFrame->data[0] && aFrame->data[1] && aFrame->data[2]);
// This will hold a reference to image, and the reference would be dropped
// when ffmpeg tells us that the buffer is no longer needed.
auto imageWrapper = MakeRefPtr<ImageBufferWrapper>(image.get(), this);
aFrame->buf[0] =
mLib->av_buffer_create(aFrame->data[0], dataSize.value(),
ReleaseVideoBufferWrapper, imageWrapper.get(), 0);
if (!aFrame->buf[0]) {
FFMPEG_LOG("Failed to allocate buffer");
return AVERROR(EINVAL);
}
FFMPEG_LOG("Created av buffer, buf=%p, data=%p, image=%p, sz=%d",
aFrame->buf[0], aFrame->data[0], image.get(), dataSize.value());
mAllocatedImages.Insert(imageWrapper.get());
mIsUsingShmemBufferForDecode = Some(true);
return 0;
}
#endif
void FFmpegVideoDecoder<LIBAV_VER>::InitCodecContext() {
mCodecContext->width = mInfo.mImage.width;
mCodecContext->height = mInfo.mImage.height;
@ -758,14 +455,6 @@ void FFmpegVideoDecoder<LIBAV_VER>::InitCodecContext() {
// FFmpeg will call back to this to negotiate a video pixel format.
mCodecContext->get_format = ChoosePixelFormat;
#ifdef CUSTOMIZED_BUFFER_ALLOCATION
FFMPEG_LOG("Set get_buffer2 for customized buffer allocation");
mCodecContext->get_buffer2 = GetVideoBufferWrapper;
mCodecContext->opaque = this;
# if FF_API_THREAD_SAFE_CALLBACKS
mCodecContext->thread_safe_callbacks = 1;
# endif
#endif
}
#ifdef MOZ_WAYLAND_USE_VAAPI
@ -817,7 +506,7 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
}
do {
if (!PrepareFrame()) {
NS_WARNING("FFmpeg decoder failed to allocate frame.");
NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
}
@ -850,7 +539,7 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
MediaResult rv;
# ifdef MOZ_WAYLAND_USE_VAAPI
if (IsHardwareAccelerated()) {
if (mVAAPIDeviceContext) {
rv = CreateImageVAAPI(mFrame->pkt_pos, mFrame->pkt_pts,
mFrame->pkt_duration, aResults);
// If VA-API playback failed, just quit. Decoder is going to be restarted
@ -889,7 +578,7 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
aSample->mDuration.ToMicroseconds());
if (!PrepareFrame()) {
NS_WARNING("FFmpeg decoder failed to allocate frame.");
NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
}
@ -1043,35 +732,11 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImage(
: gfx::ColorRange::LIMITED;
}
RefPtr<VideoData> v;
#ifdef CUSTOMIZED_BUFFER_ALLOCATION
if (mIsUsingShmemBufferForDecode && *mIsUsingShmemBufferForDecode) {
RefPtr<ImageBufferWrapper> wrapper = static_cast<ImageBufferWrapper*>(
mLib->av_buffer_get_opaque(mFrame->buf[0]));
MOZ_ASSERT(wrapper);
auto* image = wrapper->AsPlanarYCbCrImage();
RefPtr<layers::TextureClient> texture = image->GetTextureClient(nullptr);
if (!texture) {
NS_WARNING("Failed to get the texture client!");
} else {
// Texture was locked to ensure no one can modify or access texture's data
// except ffmpeg decoder. After finisheing decoding, texture's data would
// be avaliable for accessing for everyone so we unlock texture.
texture->Unlock();
v = VideoData::CreateFromImage(
mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
TimeUnit::FromMicroseconds(aDuration), image, !!mFrame->key_frame,
TimeUnit::FromMicroseconds(-1));
}
}
#endif
if (!v) {
v = VideoData::CreateAndCopyData(
RefPtr<VideoData> v = VideoData::CreateAndCopyData(
mInfo, mImageContainer, aOffset, TimeUnit::FromMicroseconds(aPts),
TimeUnit::FromMicroseconds(aDuration), b, !!mFrame->key_frame,
TimeUnit::FromMicroseconds(-1),
mInfo.ScaledImageRect(mFrame->width, mFrame->height), mImageAllocator);
}
if (!v) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY,
@ -1227,20 +892,19 @@ void FFmpegVideoDecoder<LIBAV_VER>::ProcessShutdown() {
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
#ifdef MOZ_WAYLAND_USE_VAAPI
mVideoFramePool = nullptr;
if (IsHardwareAccelerated()) {
if (mVAAPIDeviceContext) {
mLib->av_buffer_unref(&mVAAPIDeviceContext);
}
#endif
FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown();
}
#ifdef MOZ_WAYLAND_USE_VAAPI
bool FFmpegVideoDecoder<LIBAV_VER>::IsHardwareAccelerated(
nsACString& aFailureReason) const {
#ifdef MOZ_WAYLAND_USE_VAAPI
return !!mVAAPIDeviceContext;
#endif
return false;
}
#endif
#ifdef MOZ_WAYLAND_USE_VAAPI
bool FFmpegVideoDecoder<LIBAV_VER>::IsFormatAccelerated(

Просмотреть файл

@ -7,22 +7,15 @@
#ifndef __FFmpegVideoDecoder_h__
#define __FFmpegVideoDecoder_h__
#include "ImageContainer.h"
#include "FFmpegDataDecoder.h"
#include "FFmpegLibWrapper.h"
#include "SimpleMap.h"
#include "mozilla/ScopeExit.h"
#include "nsTHashSet.h"
#if LIBAVCODEC_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MAJOR >= 56
# include "mozilla/layers/TextureClient.h"
#endif
struct _VADRMPRIMESurfaceDescriptor;
typedef struct _VADRMPRIMESurfaceDescriptor VADRMPRIMESurfaceDescriptor;
namespace mozilla {
class ImageBufferWrapper;
class VideoFramePool;
template <int V>
@ -48,8 +41,6 @@ class FFmpegVideoDecoder<LIBAV_VER>
ImageContainer* aImageContainer, bool aLowLatency,
bool aDisableHardwareDecoding);
~FFmpegVideoDecoder();
RefPtr<InitPromise> Init() override;
void InitCodecContext() override;
nsCString GetDescriptionName() const override {
@ -65,19 +56,6 @@ class FFmpegVideoDecoder<LIBAV_VER>
static AVCodecID GetCodecId(const nsACString& aMimeType);
#if LIBAVCODEC_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MAJOR >= 56
int GetVideoBuffer(struct AVCodecContext* aCodecContext, AVFrame* aFrame,
int aFlags);
int GetVideoBufferDefault(struct AVCodecContext* aCodecContext,
AVFrame* aFrame, int aFlags) {
mIsUsingShmemBufferForDecode = Some(false);
return mLib->avcodec_default_get_buffer2(aCodecContext, aFrame, aFlags);
}
void ReleaseAllocatedImage(ImageBufferWrapper* aImage) {
mAllocatedImages.Remove(aImage);
}
#endif
private:
RefPtr<FlushPromise> ProcessFlush() override;
void ProcessShutdown() override;
@ -100,30 +78,13 @@ class FFmpegVideoDecoder<LIBAV_VER>
MediaResult CreateImage(int64_t aOffset, int64_t aPts, int64_t aDuration,
MediaDataDecoder::DecodedData& aResults) const;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
bool IsHardwareAccelerated() const {
nsAutoCString dummy;
return IsHardwareAccelerated(dummy);
}
#if LIBAVCODEC_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MAJOR >= 56
layers::TextureClient* AllocateTextueClientForImage(
struct AVCodecContext* aCodecContext, layers::PlanarYCbCrImage* aImage);
layers::PlanarYCbCrData CreateEmptyPlanarYCbCrData(
struct AVCodecContext* aCodecContext, const VideoInfo& aInfo);
gfx::IntSize GetAlignmentVideoFrameSize(struct AVCodecContext* aCodecContext,
int32_t aWidth,
int32_t aHeight) const;
#endif
#ifdef MOZ_WAYLAND_USE_VAAPI
void InitHWDecodingPrefs();
MediaResult InitVAAPIDecoder();
bool CreateVAAPIDeviceContext();
void InitVAAPICodecContext();
AVCodec* FindVAAPICodec();
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
bool GetVAAPISurfaceDescriptor(VADRMPRIMESurfaceDescriptor* aVaDesc);
void AddAcceleratedFormats(nsTArray<AVCodecID>& aCodecList,
AVCodecID aCodecID, AVVAAPIHWConfig* hwconfig);
@ -167,65 +128,8 @@ class FFmpegVideoDecoder<LIBAV_VER>
DurationMap mDurationMap;
const bool mLowLatency;
// True if we're allocating shmem for ffmpeg decode buffer.
Maybe<Atomic<bool>> mIsUsingShmemBufferForDecode;
#if LIBAVCODEC_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MAJOR >= 56
// These images are buffers for ffmpeg in order to store decoded data when
// using custom allocator for decoding. We want to explictly track all images
// we allocate to ensure that we won't leak any of them.
nsTHashSet<RefPtr<ImageBufferWrapper>> mAllocatedImages;
#endif
};
#if LIBAVCODEC_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MAJOR >= 56
class ImageBufferWrapper final {
public:
typedef mozilla::layers::Image Image;
typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageBufferWrapper)
ImageBufferWrapper(Image* aImage, void* aDecoder)
: mImage(aImage), mDecoder(aDecoder) {
MOZ_ASSERT(aImage);
MOZ_ASSERT(mDecoder);
}
PlanarYCbCrImage* AsPlanarYCbCrImage() {
return mImage->AsPlanarYCbCrImage();
}
void ReleaseBuffer() {
auto clear = MakeScopeExit([&]() {
auto* decoder = static_cast<FFmpegVideoDecoder<LIBAV_VER>*>(mDecoder);
decoder->ReleaseAllocatedImage(this);
});
if (!mImage) {
return;
}
PlanarYCbCrImage* image = mImage->AsPlanarYCbCrImage();
RefPtr<layers::TextureClient> texture = image->GetTextureClient(nullptr);
// Usually the decoded video buffer would be locked when it is allocated,
// and gets unlocked when we create the video data via `DoDecode`. However,
// sometime the buffer won't be used for the decoded data (maybe just as
// an internal temporary usage?) so we will need to unlock the texture here
// before sending it back to recycle.
if (!texture) {
NS_WARNING("Failed to get the texture client during release!");
} else if (texture->IsLocked()) {
texture->Unlock();
}
}
private:
~ImageBufferWrapper() = default;
const RefPtr<Image> mImage;
void* const MOZ_NON_OWNING_REF mDecoder;
};
#endif
} // namespace mozilla
#endif // __FFmpegVideoDecoder_h__

Просмотреть файл

@ -26,6 +26,4 @@ if CONFIG['CC_TYPE'] == 'gcc':
'-Wno-attributes',
]
include("/ipc/chromium/chromium-config.mozbuild")
FINAL_LIBRARY = 'xul'

Просмотреть файл

@ -34,6 +34,4 @@ if CONFIG['MOZ_WAYLAND']:
]
USE_LIBS += ['mozva']
include("/ipc/chromium/chromium-config.mozbuild")
FINAL_LIBRARY = 'xul'

Просмотреть файл

@ -42,6 +42,4 @@ if CONFIG["MOZ_WAYLAND"]:
DEFINES["MOZ_WAYLAND_USE_VAAPI"] = 1
USE_LIBS += ["mozva"]
include("/ipc/chromium/chromium-config.mozbuild")
FINAL_LIBRARY = "xul"

Просмотреть файл

@ -10,10 +10,6 @@
DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: " arg, __func__, \
##__VA_ARGS__)
#define LOGV(arg, ...) \
DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Verbose, "::%s: " arg, __func__, \
##__VA_ARGS__)
namespace mozilla {
using media::TimeInterval;
@ -122,7 +118,7 @@ RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::HandleDecodedResult(
mTrimmers.RemoveElementAt(0);
if (!trimmer) {
// Those frames didn't need trimming.
LOGV("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64
LOG("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64
"] no trimming needed",
rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(),
sampleInterval.mEnd.ToMicroseconds());
@ -203,7 +199,7 @@ void AudioTrimmer::PrepareTrimmers(MediaRawData* aRaw) {
aRaw->mTime = aRaw->mOriginalPresentationWindow->mStart;
aRaw->mDuration = aRaw->mOriginalPresentationWindow->Length();
} else {
LOGV("sample[%" PRId64 ",%" PRId64 "] no trimming information",
LOG("sample[%" PRId64 ",%" PRId64 "] no trimming information",
aRaw->mTime.ToMicroseconds(), aRaw->GetEndTime().ToMicroseconds());
mTrimmers.AppendElement(Nothing());
}

Просмотреть файл

@ -14,39 +14,32 @@ defaults pref(media.av1.enabled,true)
# -
# yuv420p
fuzzy(16-51,5234-5622) fuzzy-if(swgl,32-38,1600-91746) fuzzy-if(useDrawSnapshot,16-16,11600-11600) fuzzy-if(OSX,16-73,5212-5622) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm ../reftest_img.html?src=color_quads/720p.png
fuzzy-if(winWidget&&swgl,0-20,0-5620) fuzzy-if(Android,254-255,273680-273807) fuzzy-if(OSX,0-35,0-1947) fuzzy-if(OSX&&swgl,0-67,0-5451) fuzzy-if(appleSilicon,30-48,1760-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
fuzzy(16-50,5234-5622) fuzzy-if(swgl,32-38,1600-91746) fuzzy-if(useDrawSnapshot,16-16,11600-11600) fuzzy-if(OSX,16-73,5212-5622) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm ../reftest_img.html?src=color_quads/720p.png
fuzzy-if(Android,254-255,273680-273807) fuzzy-if(appleSilicon,30-48,1760-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
== ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
skip-if(winWidget&&isCoverageBuild) fuzzy(0-2,75-225) fuzzy-if(Android,254-255,273680-273807) fuzzy-if(OSX,30-32,187326-187407) fuzzy-if(appleSilicon,30-48,1835-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
fuzzy-if(winWidget&&swgl,0-20,0-5620) fuzzy-if(Android,254-255,273680-273807) fuzzy-if(OSX,0-35,0-1947) fuzzy-if(OSX&&swgl,0-67,0-5451) fuzzy-if(appleSilicon,30-48,1760-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
fuzzy(0-1,0-75) fuzzy-if(Android,254-255,273680-273807) fuzzy-if(appleSilicon,30-48,1760-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
skip-if(Android) fuzzy(16-48,8349-8818) fuzzy-if(winWidget&&swgl,31-38,8240-184080) fuzzy-if(appleSilicon,33-38,8819-11705) fuzzy-if(useDrawSnapshot,20-20,187200-187200) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) fuzzy-if(Android,255-255,273726-273726) fuzzy-if(appleSilicon,36-49,187329-187407) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
skip-if(Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
# On Windows & sw render, we noticed that the comparison image captured from AV1 is not equal to its displayed video frame, so we would need to compare other codecs directly to PNG file. That should be fixed in bug 1748540.
skip-if(Android) skip-if(winWidget&&swgl) fuzzy-if(Android,255-255,273726-273726) fuzzy-if(OSX,0-16,0-1718) fuzzy-if(OSX&&swgl,0-20,0-2423) fuzzy-if(appleSilicon,36-49,187329-187407) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
skip-if(Android) skip-if(winWidget&&swgl) fuzzy-if(Android,255-255,273726-273726) fuzzy-if(OSX,2-36,184281-187407) fuzzy-if(winWidget,0-1,0-7) fuzzy-if(appleSilicon,36-49,187329-187407) fuzzy-if(useDrawSnapshot,0-1,0-10) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
skip-if(Android) skip-if(winWidget&&swgl) fuzzy-if(Android,255-255,273726-273726) fuzzy-if(OSX,0-16,0-1718) fuzzy-if(OSX&&swgl,0-20,0-2423) fuzzy-if(appleSilicon,36-49,187329-187407) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
skip-if(Android) skip-if(!(winWidget&&swgl)) fuzzy(0-31,0-8240) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.webm ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) skip-if(!(winWidget&&swgl)) fuzzy(0-31,0-8240) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.mp4 ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) fuzzy-if(Android,255-255,273726-273726) fuzzy-if(appleSilicon,36-49,187329-187407) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
# -
# yuv420p10
skip-if(Android) fuzzy(33-49,2346-2579) fuzzy-if(swgl,34-52,181053-270528) fuzzy-if(appleSilicon,49-49,2263-2263) fuzzy-if(useDrawSnapshot,16-16,183840-183840) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) fuzzy-if(winWidget&&swgl,0-17,0-264604) fuzzy-if(appleSilicon,38-38,273726-273726) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
skip-if(Android) fuzzy-if(appleSilicon,38-38,273726-273726) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
skip-if(Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
#[2] skip-if(Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
skip-if(Android) fuzzy-if(winWidget&&swgl,0-17,0-264604) fuzzy-if(appleSilicon,38-38,273726-273726) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
skip-if(Android) fuzzy-if(appleSilicon,38-38,273726-273726) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
skip-if(Android) fuzzy(33-49,174699-175092) fuzzy-if(swgl&&!winWidget,36-52,11553-11555) fuzzy-if(swgl&&winWidget,40-52,11555-187200) fuzzy-if(useDrawSnapshot,20-20,186800-186800) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) fuzzy-if(appleSilicon,30-30,187328-187328) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
skip-if(Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
# On Windows & sw render, we noticed that the comparison image captured from AV1 is not equal to its displayed video frame, so we would need to compare other codecs directly to PNG file. That should be fixed in bug 1748540.
skip-if(Android) skip-if(winWidget&&swgl) fuzzy-if(appleSilicon,30-30,187328-187328) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
#[2] skip-if(Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
skip-if(Android) skip-if(winWidget&&swgl) fuzzy-if(appleSilicon,30-30,187328-187328) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
skip-if(Android) skip-if(!(winWidget&&swgl)) fuzzy(0-52,0-11555) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.webm ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) skip-if(!(winWidget&&swgl)) fuzzy(0-52,0-11555) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.mp4 ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) fuzzy-if(appleSilicon,30-30,187328-187328) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
# Android is really broken in a variety of ways for p10.
#[2]: yuv420p10 broken in h264.mp4: https://bugzilla.mozilla.org/show_bug.cgi?id=1711812

Просмотреть файл

@ -95,9 +95,6 @@ class ShmemTextureData : public BufferTextureData {
virtual size_t GetBufferSize() override { return mShmem.Size<uint8_t>(); }
bool CropYCbCrPlanes(const gfx::IntSize& aYSize,
const gfx::IntSize& aCbCrSize) override;
protected:
mozilla::ipc::Shmem mShmem;
};
@ -509,26 +506,5 @@ void ShmemTextureData::Deallocate(LayersIPCChannel* aAllocator) {
aAllocator->DeallocShmem(mShmem);
}
bool ShmemTextureData::CropYCbCrPlanes(const gfx::IntSize& aYSize,
const gfx::IntSize& aCbCrSize) {
if (mDescriptor.type() != BufferDescriptor::TYCbCrDescriptor) {
return false;
}
const auto& current = mDescriptor.get_YCbCrDescriptor();
if (current.ySize() < aYSize || current.cbCrSize() < aCbCrSize) {
NS_WARNING("Cropped size should not exceed the original size!");
return false;
}
auto newDescritor = YCbCrDescriptor(
current.display(), aYSize, current.yStride(), aCbCrSize,
current.cbCrStride(), current.yOffset(), current.cbOffset(),
current.crOffset(), current.stereoMode(), current.colorDepth(),
current.yUVColorSpace(), current.colorRange());
mDescriptor = BufferDescriptor(newDescritor);
return true;
}
} // namespace layers
} // namespace mozilla

Просмотреть файл

@ -661,12 +661,6 @@ struct PlanarYCbCrData {
}
static Maybe<PlanarYCbCrData> From(const SurfaceDescriptorBuffer&);
// We would use mPicSize, but that's not hooked up in WR for RawData
// ExternalImages, so we manually clip sizes later on. We should fix WR,
// but not in this patch. Do not use unless mPicSize doesn't work for you.
Maybe<gfx::IntSize> mCroppedYSize;
Maybe<gfx::IntSize> mCroppedCbCrSize;
};
// This type is currently only used for AVIF and therefore makes some
@ -733,11 +727,6 @@ class PlanarYCbCrImage : public Image {
*/
virtual bool AdoptData(const Data& aData);
/**
* This will create an empty data buffers according to the input data's size.
*/
virtual bool CreateEmptyBuffer(const Data& aData) { return false; }
/**
* Ask this Image to not convert YUV to RGB during SetData, and make
* the original data available through GetData. This is optional,

Просмотреть файл

@ -850,14 +850,6 @@ bool TextureClient::ToSurfaceDescriptor(SurfaceDescriptor& aOutDescriptor) {
return mData ? mData->Serialize(aOutDescriptor) : false;
}
bool TextureClient::CropYCbCrPlanes(const gfx::IntSize& aYSize,
const gfx::IntSize& aCbCrSize) {
if (!mData) {
return false;
}
return mData->CropYCbCrPlanes(aYSize, aCbCrSize);
}
// static
PTextureChild* TextureClient::CreateIPDLActor() {
TextureChild* c = new TextureChild();

Просмотреть файл

@ -307,17 +307,6 @@ class TextureData {
return mozilla::ipc::FileDescriptor();
}
/**
* Crop YCbCr planes to a smaller size. An use case is that we would need to
* allocate a larger size for planes in order to meet the special alignement
* requirement (eg. for ffmpeg video decoding), but crop planes to a correct
* range after allocation is done.
*/
virtual bool CropYCbCrPlanes(const gfx::IntSize& aYSize,
const gfx::IntSize& aCbCrSize) {
return false;
}
protected:
MOZ_COUNTED_DEFAULT_CTOR(TextureData)
};
@ -480,15 +469,6 @@ class TextureClient : public AtomicRefCountedWithFinalize<TextureClient> {
bool CopyToTextureClient(TextureClient* aTarget, const gfx::IntRect* aRect,
const gfx::IntPoint* aPoint);
/**
* Crop YCbCr planes to a smaller size. An use case is that we would need to
* allocate a larger size for planes in order to meet the special alignement
* requirement (eg. for ffmpeg video decoding), but crop planes to a correct
* range after allocation is done.
*/
bool CropYCbCrPlanes(const gfx::IntSize& aYSize,
const gfx::IntSize& aCbCrSize);
/**
* Allocate and deallocate a TextureChild actor.
*

Просмотреть файл

@ -108,11 +108,6 @@ bool SharedPlanarYCbCrImage::AdoptData(const Data& aData) {
return false;
}
bool SharedPlanarYCbCrImage::CreateEmptyBuffer(const Data& aData) {
auto data = aData;
return Allocate(data);
}
bool SharedPlanarYCbCrImage::IsValid() const {
return mTextureClient && mTextureClient->IsValid();
}
@ -132,18 +127,6 @@ bool SharedPlanarYCbCrImage::Allocate(PlanarYCbCrData& aData) {
return false;
}
gfx::IntSize imageYSize =
aData.mCroppedYSize ? *aData.mCroppedYSize : aData.mYSize;
gfx::IntSize imageCbCrSize =
aData.mCroppedCbCrSize ? *aData.mCroppedCbCrSize : aData.mCbCrSize;
if (aData.mCroppedYSize || aData.mCroppedCbCrSize) {
// If cropping fails, then reset Y&CbCr sizes to non-cropped sizes.
if (!mTextureClient->CropYCbCrPlanes(imageYSize, imageCbCrSize)) {
imageYSize = aData.mYSize;
imageCbCrSize = aData.mCbCrSize;
}
}
MappedYCbCrTextureData mapped;
// The locking here is sort of a lie. The SharedPlanarYCbCrImage just pulls
// pointers out of the TextureClient and keeps them around, which works only
@ -163,8 +146,8 @@ bool SharedPlanarYCbCrImage::Allocate(PlanarYCbCrData& aData) {
mData.mYChannel = aData.mYChannel;
mData.mCbChannel = aData.mCbChannel;
mData.mCrChannel = aData.mCrChannel;
mData.mYSize = imageYSize;
mData.mCbCrSize = imageCbCrSize;
mData.mYSize = aData.mYSize;
mData.mCbCrSize = aData.mCbCrSize;
mData.mPicX = aData.mPicX;
mData.mPicY = aData.mPicY;
mData.mPicSize = aData.mPicSize;

Просмотреть файл

@ -38,7 +38,8 @@ class SharedPlanarYCbCrImage : public PlanarYCbCrImage {
already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
bool CopyData(const PlanarYCbCrData& aData) override;
bool AdoptData(const Data& aData) override;
bool CreateEmptyBuffer(const Data& aData) override;
bool Allocate(PlanarYCbCrData& aData);
bool IsValid() const override;
@ -51,8 +52,6 @@ class SharedPlanarYCbCrImage : public PlanarYCbCrImage {
TextureClientRecycleAllocator* RecycleAllocator();
private:
bool Allocate(PlanarYCbCrData& aData);
RefPtr<TextureClient> mTextureClient;
RefPtr<ImageClient> mCompositable;
RefPtr<TextureClientRecycleAllocator> mRecycleAllocator;

Просмотреть файл

@ -8692,12 +8692,6 @@
mirror: always
#endif
# Allow ffmpeg decoder to decode directly onto shmem buffer
- name: media.ffmpeg.customized-buffer-allocation
type: RelaxedAtomicBool
value: @IS_NIGHTLY_BUILD@
mirror: always
#ifdef MOZ_FFMPEG
- name: media.ffmpeg.enabled
type: RelaxedAtomicBool