Bug 1846703 - Update vendored ffmpeg to d9d56953. r=alwu

Differential Revision: https://phabricator.services.mozilla.com/D185921
This commit is contained in:
Paul Adenot 2023-08-11 13:11:36 +00:00
Родитель 51fe33dea2
Коммит 46f3e6019f
109 изменённых файлов: 2531 добавлений и 906 удалений

Просмотреть файл

@ -3,9 +3,9 @@
This directory contains files used in gecko builds from FFmpeg
(http://ffmpeg.org). The current files are from FFmpeg as of
revision
37cde570bc2dcd64a15c5d9a37b9fa0d78d84f9f
d9d56953
git clone https://git.ffmpeg.org/ffmpeg.git ffmpeg
git checkout 37cde570bc2dcd64a15c5d9a37b9fa0d78d84f9f
git checkout d9d56953
All source files match their path from the library's source archive.

Просмотреть файл

@ -66,7 +66,14 @@ typedef CONDITION_VARIABLE pthread_cond_t;
#define PTHREAD_CANCEL_ENABLE 1
#define PTHREAD_CANCEL_DISABLE 0
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
#if HAVE_WINRT
#define THREADFUNC_RETTYPE DWORD
#else
#define THREADFUNC_RETTYPE unsigned
#endif
static av_unused THREADFUNC_RETTYPE
__stdcall attribute_align_arg win32thread_worker(void *arg)
{
pthread_t *h = (pthread_t*)arg;
h->ret = h->func(h->arg);

Просмотреть файл

@ -341,7 +341,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_X11 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1
#define HAVE_OS2THREADS 0

Просмотреть файл

@ -341,6 +341,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -341,6 +341,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -319,6 +319,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -845,6 +845,7 @@
#define CONFIG_AV1_NVDEC_HWACCEL 0
#define CONFIG_AV1_VAAPI_HWACCEL 0
#define CONFIG_AV1_VDPAU_HWACCEL 0
#define CONFIG_AV1_VULKAN_HWACCEL 0
#define CONFIG_H263_VAAPI_HWACCEL 0
#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0
#define CONFIG_H264_D3D11VA_HWACCEL 0

Просмотреть файл

@ -874,6 +874,7 @@
#define CONFIG_AV1_NVDEC_HWACCEL 0
#define CONFIG_AV1_VAAPI_HWACCEL 0
#define CONFIG_AV1_VDPAU_HWACCEL 0
#define CONFIG_AV1_VULKAN_HWACCEL 0
#define CONFIG_H263_VAAPI_HWACCEL 0
#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0
#define CONFIG_H264_D3D11VA_HWACCEL 0

Просмотреть файл

@ -304,6 +304,7 @@
%define HAVE_WGLGETPROCADDRESS 0
%define HAVE_BCRYPT 0
%define HAVE_VAAPI_DRM 0
%define HAVE_VAAPI_WIN32 0
%define HAVE_VAAPI_X11 0
%define HAVE_VDPAU_X11 0
%define HAVE_PTHREADS 1

Просмотреть файл

@ -319,6 +319,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -341,6 +341,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -341,6 +341,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -336,6 +336,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -325,6 +325,7 @@
%define HAVE_WGLGETPROCADDRESS 0
%define HAVE_BCRYPT 0
%define HAVE_VAAPI_DRM 1
%define HAVE_VAAPI_WIN32 0
%define HAVE_VAAPI_X11 0
%define HAVE_VDPAU_X11 0
%define HAVE_PTHREADS 1

Просмотреть файл

@ -336,6 +336,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -336,6 +336,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 0
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 1

Просмотреть файл

@ -320,6 +320,7 @@
%define HAVE_WGLGETPROCADDRESS 0
%define HAVE_BCRYPT 1
%define HAVE_VAAPI_DRM 0
%define HAVE_VAAPI_WIN32 0
%define HAVE_VAAPI_X11 0
%define HAVE_VDPAU_X11 0
%define HAVE_PTHREADS 0

Просмотреть файл

@ -336,6 +336,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 1
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 0

Просмотреть файл

@ -320,6 +320,7 @@
%define HAVE_WGLGETPROCADDRESS 0
%define HAVE_BCRYPT 1
%define HAVE_VAAPI_DRM 0
%define HAVE_VAAPI_WIN32 0
%define HAVE_VAAPI_X11 0
%define HAVE_VDPAU_X11 0
%define HAVE_PTHREADS 0

Просмотреть файл

@ -336,6 +336,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 1
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 0

Просмотреть файл

@ -319,6 +319,7 @@
#define HAVE_WGLGETPROCADDRESS 0
#define HAVE_BCRYPT 1
#define HAVE_VAAPI_DRM 0
#define HAVE_VAAPI_WIN32 0
#define HAVE_VAAPI_X11 0
#define HAVE_VDPAU_X11 0
#define HAVE_PTHREADS 0

Просмотреть файл

@ -228,6 +228,7 @@ extern const FFCodec ff_msmpeg4v3_encoder;
extern const FFCodec ff_msmpeg4v3_decoder;
extern const FFCodec ff_msmpeg4_crystalhd_decoder;
extern const FFCodec ff_msp2_decoder;
extern const FFCodec ff_msrle_encoder;
extern const FFCodec ff_msrle_decoder;
extern const FFCodec ff_mss1_decoder;
extern const FFCodec ff_mss2_decoder;
@ -251,6 +252,7 @@ extern const FFCodec ff_pbm_encoder;
extern const FFCodec ff_pbm_decoder;
extern const FFCodec ff_pcx_encoder;
extern const FFCodec ff_pcx_decoder;
extern const FFCodec ff_pdv_decoder;
extern const FFCodec ff_pfm_encoder;
extern const FFCodec ff_pfm_decoder;
extern const FFCodec ff_pgm_encoder;
@ -294,6 +296,7 @@ extern const FFCodec ff_roq_decoder;
extern const FFCodec ff_rpza_encoder;
extern const FFCodec ff_rpza_decoder;
extern const FFCodec ff_rscc_decoder;
extern const FFCodec ff_rtv1_decoder;
extern const FFCodec ff_rv10_encoder;
extern const FFCodec ff_rv10_decoder;
extern const FFCodec ff_rv20_encoder;
@ -368,6 +371,7 @@ extern const FFCodec ff_vc1_v4l2m2m_decoder;
extern const FFCodec ff_vc2_encoder;
extern const FFCodec ff_vcr1_decoder;
extern const FFCodec ff_vmdvideo_decoder;
extern const FFCodec ff_vmix_decoder;
extern const FFCodec ff_vmnc_decoder;
extern const FFCodec ff_vp3_decoder;
extern const FFCodec ff_vp4_decoder;
@ -759,6 +763,8 @@ extern const FFCodec ff_pcm_mulaw_at_decoder;
extern const FFCodec ff_qdmc_at_decoder;
extern const FFCodec ff_qdm2_at_decoder;
extern FFCodec ff_libaom_av1_encoder;
/* preferred over libaribb24 */
extern const FFCodec ff_libaribcaption_decoder;
extern const FFCodec ff_libaribb24_decoder;
extern const FFCodec ff_libcelt_decoder;
extern const FFCodec ff_libcodec2_encoder;
@ -780,7 +786,6 @@ extern const FFCodec ff_libopencore_amrnb_encoder;
extern const FFCodec ff_libopencore_amrnb_decoder;
extern const FFCodec ff_libopencore_amrwb_decoder;
extern const FFCodec ff_libopenjpeg_encoder;
extern const FFCodec ff_libopenjpeg_decoder;
extern const FFCodec ff_libopus_encoder;
extern const FFCodec ff_libopus_decoder;
extern const FFCodec ff_librav1e_encoder;
@ -834,6 +839,7 @@ extern const FFCodec ff_libaom_av1_decoder;
extern const FFCodec ff_av1_decoder;
extern const FFCodec ff_av1_cuvid_decoder;
extern const FFCodec ff_av1_mediacodec_decoder;
extern const FFCodec ff_av1_mediacodec_encoder;
extern const FFCodec ff_av1_nvenc_encoder;
extern const FFCodec ff_av1_qsv_decoder;
extern const FFCodec ff_av1_qsv_encoder;
@ -871,17 +877,20 @@ extern const FFCodec ff_mpeg2_qsv_encoder;
extern const FFCodec ff_mpeg2_vaapi_encoder;
extern const FFCodec ff_mpeg4_cuvid_decoder;
extern const FFCodec ff_mpeg4_mediacodec_decoder;
extern const FFCodec ff_mpeg4_mediacodec_encoder;
extern const FFCodec ff_mpeg4_omx_encoder;
extern const FFCodec ff_mpeg4_v4l2m2m_encoder;
extern const FFCodec ff_prores_videotoolbox_encoder;
extern const FFCodec ff_vc1_cuvid_decoder;
extern const FFCodec ff_vp8_cuvid_decoder;
extern const FFCodec ff_vp8_mediacodec_decoder;
extern const FFCodec ff_vp8_mediacodec_encoder;
extern const FFCodec ff_vp8_qsv_decoder;
extern const FFCodec ff_vp8_v4l2m2m_encoder;
extern const FFCodec ff_vp8_vaapi_encoder;
extern const FFCodec ff_vp9_cuvid_decoder;
extern const FFCodec ff_vp9_mediacodec_decoder;
extern const FFCodec ff_vp9_mediacodec_encoder;
extern const FFCodec ff_vp9_qsv_decoder;
extern const FFCodec ff_vp9_vaapi_encoder;
extern const FFCodec ff_vp9_qsv_encoder;

Просмотреть файл

@ -0,0 +1,122 @@
/*
* AV1 common parsing code
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/mem.h"
#include "av1.h"
#include "av1_parse.h"
#include "bytestream.h"
int ff_av1_extract_obu(AV1OBU *obu, const uint8_t *buf, int length, void *logctx)
{
int64_t obu_size;
int start_pos, type, temporal_id, spatial_id;
int len;
len = parse_obu_header(buf, length, &obu_size, &start_pos,
&type, &temporal_id, &spatial_id);
if (len < 0)
return len;
obu->type = type;
obu->temporal_id = temporal_id;
obu->spatial_id = spatial_id;
obu->data = buf + start_pos;
obu->size = obu_size;
obu->raw_data = buf;
obu->raw_size = len;
av_log(logctx, AV_LOG_DEBUG,
"obu_type: %d, temporal_id: %d, spatial_id: %d, payload size: %d\n",
obu->type, obu->temporal_id, obu->spatial_id, obu->size);
return len;
}
int ff_av1_packet_split(AV1Packet *pkt, const uint8_t *buf, int length, void *logctx)
{
GetByteContext bc;
int consumed;
bytestream2_init(&bc, buf, length);
pkt->nb_obus = 0;
while (bytestream2_get_bytes_left(&bc) > 0) {
AV1OBU *obu;
if (pkt->obus_allocated < pkt->nb_obus + 1) {
int new_size = pkt->obus_allocated + 1;
AV1OBU *tmp;
if (new_size >= INT_MAX / sizeof(*tmp))
return AVERROR(ENOMEM);
tmp = av_fast_realloc(pkt->obus, &pkt->obus_allocated_size, new_size * sizeof(*tmp));
if (!tmp)
return AVERROR(ENOMEM);
pkt->obus = tmp;
memset(pkt->obus + pkt->obus_allocated, 0, sizeof(*pkt->obus));
pkt->obus_allocated = new_size;
}
obu = &pkt->obus[pkt->nb_obus];
consumed = ff_av1_extract_obu(obu, bc.buffer, bytestream2_get_bytes_left(&bc), logctx);
if (consumed < 0)
return consumed;
bytestream2_skip(&bc, consumed);
obu->size_bits = get_obu_bit_length(obu->data, obu->size, obu->type);
if (obu->size_bits < 0 ||
(obu->size_bits == 0 && (obu->type != AV1_OBU_TEMPORAL_DELIMITER &&
obu->type != AV1_OBU_PADDING))) {
av_log(logctx, AV_LOG_ERROR, "Invalid OBU of type %d, skipping.\n", obu->type);
continue;
}
pkt->nb_obus++;
}
return 0;
}
void ff_av1_packet_uninit(AV1Packet *pkt)
{
av_freep(&pkt->obus);
pkt->obus_allocated = pkt->obus_allocated_size = 0;
}
AVRational ff_av1_framerate(int64_t ticks_per_frame, int64_t units_per_tick,
int64_t time_scale)
{
AVRational fr;
if (ticks_per_frame && units_per_tick && time_scale &&
ticks_per_frame < INT64_MAX / units_per_tick &&
av_reduce(&fr.den, &fr.num, units_per_tick * ticks_per_frame,
time_scale, INT_MAX))
return fr;
return (AVRational){ 0, 1 };
}

Просмотреть файл

@ -49,9 +49,6 @@ typedef struct AV1OBU {
int raw_size;
const uint8_t *raw_data;
/** GetBitContext initialized to the start of the payload */
GetBitContext gb;
int type;
int temporal_id;
@ -181,4 +178,7 @@ static inline int get_obu_bit_length(const uint8_t *buf, int size, int type)
return size;
}
AVRational ff_av1_framerate(int64_t ticks_per_frame, int64_t units_per_tick,
int64_t time_scale);
#endif /* AVCODEC_AV1_PARSE_H */

Просмотреть файл

@ -21,6 +21,8 @@
*/
#include "libavutil/avassert.h"
#include "av1_parse.h"
#include "cbs.h"
#include "cbs_av1.h"
#include "parser.h"
@ -162,11 +164,10 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
avctx->color_trc = (enum AVColorTransferCharacteristic) color->transfer_characteristics;
avctx->color_range = color->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
if (seq->timing_info_present_flag) {
const AV1RawTimingInfo *timing = &seq->timing_info;
av_reduce(&avctx->framerate.den, &avctx->framerate.num,
timing->num_units_in_display_tick, timing->time_scale, INT_MAX);
}
if (seq->timing_info_present_flag)
avctx->framerate = ff_av1_framerate(1LL + seq->timing_info.num_ticks_per_picture_minus_1,
seq->timing_info.num_units_in_display_tick,
seq->timing_info.time_scale);
end:
ff_cbs_fragment_reset(td);

Просмотреть файл

@ -26,11 +26,15 @@
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "av1_parse.h"
#include "decode.h"
#include "av1dec.h"
#include "atsc_a53.h"
#include "bytestream.h"
#include "codec_internal.h"
#include "decode.h"
#include "hwaccel_internal.h"
#include "internal.h"
#include "hwconfig.h"
#include "profiles.h"
#include "thread.h"
@ -267,7 +271,9 @@ static void skip_mode_params(AV1DecContext *s)
int second_forward_idx, second_forward_hint;
int ref_hint, dist, i;
if (!header->skip_mode_present)
if (header->frame_type == AV1_FRAME_KEY ||
header->frame_type == AV1_FRAME_INTRA_ONLY ||
!header->reference_select || !seq->enable_order_hint)
return;
forward_idx = -1;
@ -445,7 +451,8 @@ static int get_pixel_format(AVCodecContext *avctx)
CONFIG_AV1_D3D11VA_HWACCEL * 2 + \
CONFIG_AV1_NVDEC_HWACCEL + \
CONFIG_AV1_VAAPI_HWACCEL + \
CONFIG_AV1_VDPAU_HWACCEL)
CONFIG_AV1_VDPAU_HWACCEL + \
CONFIG_AV1_VULKAN_HWACCEL)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
@ -525,6 +532,9 @@ static int get_pixel_format(AVCodecContext *avctx)
#endif
#if CONFIG_AV1_VDPAU_HWACCEL
*fmtp++ = AV_PIX_FMT_VDPAU;
#endif
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV420P10:
@ -543,6 +553,44 @@ static int get_pixel_format(AVCodecContext *avctx)
#endif
#if CONFIG_AV1_VDPAU_HWACCEL
*fmtp++ = AV_PIX_FMT_VDPAU;
#endif
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV420P12:
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV422P:
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV422P10:
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV422P12:
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV444P:
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV444P10:
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_YUV444P12:
#if CONFIG_AV1_VULKAN_HWACCEL
*fmtp++ = AV_PIX_FMT_VULKAN;
#endif
break;
case AV_PIX_FMT_GRAY8:
@ -709,15 +757,10 @@ static int set_context_with_sequence(AVCodecContext *avctx,
}
avctx->sample_aspect_ratio = (AVRational) { 1, 1 };
if (seq->timing_info.num_units_in_display_tick &&
seq->timing_info.time_scale) {
av_reduce(&avctx->framerate.den, &avctx->framerate.num,
seq->timing_info.num_units_in_display_tick,
seq->timing_info.time_scale,
INT_MAX);
if (seq->timing_info.equal_picture_interval)
avctx->ticks_per_frame = seq->timing_info.num_ticks_per_picture_minus_1 + 1;
}
if (seq->timing_info_present_flag)
avctx->framerate = ff_av1_framerate(1LL + seq->timing_info.num_ticks_per_picture_minus_1,
seq->timing_info.num_units_in_display_tick,
seq->timing_info.time_scale);
return 0;
}
@ -769,6 +812,7 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
int ret;
s->avctx = avctx;
s->pkt = avctx->internal->in_pkt;
s->pix_fmt = AV_PIX_FMT_NONE;
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
@ -807,7 +851,7 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_WARNING, "Failed to read extradata.\n");
return ret;
goto end;
}
seq = ((CodedBitstreamAV1Context *)(s->cbc->priv_data))->sequence_header;
@ -846,7 +890,10 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
goto fail;
frame = f->f;
frame->key_frame = header->frame_type == AV1_FRAME_KEY;
if (header->frame_type == AV1_FRAME_KEY)
frame->flags |= AV_FRAME_FLAG_KEY;
else
frame->flags &= ~AV_FRAME_FLAG_KEY;
switch (header->frame_type) {
case AV1_FRAME_KEY:
@ -861,18 +908,11 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
break;
}
if (avctx->hwaccel) {
const AVHWAccel *hwaccel = avctx->hwaccel;
if (hwaccel->frame_priv_data_size) {
f->hwaccel_priv_buf =
av_buffer_allocz(hwaccel->frame_priv_data_size);
if (!f->hwaccel_priv_buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
}
}
ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private,
&f->hwaccel_priv_buf);
if (ret < 0)
goto fail;
return 0;
fail:
@ -1042,11 +1082,11 @@ static int export_film_grain(AVCodecContext *avctx, AVFrame *frame)
return 0;
}
static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
const AVPacket *pkt, int *got_frame)
static int set_output_frame(AVCodecContext *avctx, AVFrame *frame)
{
AV1DecContext *s = avctx->priv_data;
const AVFrame *srcframe = s->cur_frame.f;
AVPacket *pkt = s->pkt;
int ret;
// TODO: all layers
@ -1077,10 +1117,11 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_size = pkt->size;
frame->pkt_pos = pkt->pos;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
*got_frame = 1;
av_packet_unref(pkt);
return 0;
}
@ -1146,22 +1187,13 @@ static int get_current_frame(AVCodecContext *avctx)
return ret;
}
static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame, AVPacket *pkt)
static int av1_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
{
AV1DecContext *s = avctx->priv_data;
AV1RawTileGroup *raw_tile_group = NULL;
int ret;
int i = 0, ret;
ret = ff_cbs_read_packet(s->cbc, &s->current_obu, pkt);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
goto end;
}
av_log(avctx, AV_LOG_DEBUG, "Total obu for this frame:%d.\n",
s->current_obu.nb_units);
for (int i = 0; i < s->current_obu.nb_units; i++) {
for (i = s->nb_unit; i < s->current_obu.nb_units; i++) {
CodedBitstreamUnit *unit = &s->current_obu.units[i];
AV1RawOBU *obu = unit->content;
const AV1RawOBUHeader *header;
@ -1202,9 +1234,9 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
}
}
if (avctx->hwaccel && avctx->hwaccel->decode_params) {
ret = avctx->hwaccel->decode_params(avctx, unit->type, unit->data,
unit->data_size);
if (FF_HW_HAS_CB(avctx, decode_params)) {
ret = FF_HW_CALL(avctx, decode_params, unit->type,
unit->data, unit->data_size);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "HW accel decode params fail.\n");
s->raw_seq = NULL;
@ -1253,12 +1285,13 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
}
if (s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
ret = set_output_frame(avctx, frame);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
}
s->raw_frame_header = NULL;
i++;
goto end;
}
@ -1273,8 +1306,7 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
s->cur_frame.temporal_id = header->temporal_id;
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->start_frame(avctx, unit->data,
unit->data_size);
ret = FF_HW_CALL(avctx, start_frame, unit->data, unit->data_size);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n");
goto end;
@ -1300,9 +1332,8 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
goto end;
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->decode_slice(avctx,
raw_tile_group->tile_data.data,
raw_tile_group->tile_data.data_size);
ret = FF_HW_CALL(avctx, decode_slice, raw_tile_group->tile_data.data,
raw_tile_group->tile_data.data_size);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"HW accel decode slice fail.\n");
@ -1362,8 +1393,9 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
}
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
int show_frame = s->raw_frame_header->show_frame;
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->end_frame(avctx);
ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
goto end;
@ -1377,7 +1409,7 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
}
if (s->raw_frame_header->show_frame && s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
ret = set_output_frame(avctx, frame);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
goto end;
@ -1385,13 +1417,56 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
}
raw_tile_group = NULL;
s->raw_frame_header = NULL;
if (show_frame) {
i++;
goto end;
}
}
}
ret = AVERROR(EAGAIN);
end:
ff_cbs_fragment_reset(&s->current_obu);
if (ret < 0)
s->raw_frame_header = NULL;
av_assert0(i <= s->current_obu.nb_units);
s->nb_unit = i;
if ((ret < 0 && ret != AVERROR(EAGAIN)) || s->current_obu.nb_units == i) {
if (ret < 0)
s->raw_frame_header = NULL;
av_packet_unref(s->pkt);
ff_cbs_fragment_reset(&s->current_obu);
s->nb_unit = 0;
}
return ret;
}
static int av1_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
AV1DecContext *s = avctx->priv_data;
int ret;
do {
if (!s->current_obu.nb_units) {
ret = ff_decode_get_packet(avctx, s->pkt);
if (ret < 0)
return ret;
ret = ff_cbs_read_packet(s->cbc, &s->current_obu, s->pkt);
if (ret < 0) {
ff_cbs_fragment_reset(&s->current_obu);
av_packet_unref(s->pkt);
av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
return ret;
}
s->nb_unit = 0;
av_log(avctx, AV_LOG_DEBUG, "Total OBUs on this packet: %d.\n",
s->current_obu.nb_units);
}
ret = av1_receive_frame_internal(avctx, frame);
} while (ret == AVERROR(EAGAIN));
return ret;
}
@ -1405,6 +1480,7 @@ static void av1_decode_flush(AVCodecContext *avctx)
av1_frame_unref(avctx, &s->cur_frame);
s->operating_point_idc = 0;
s->nb_unit = 0;
s->raw_frame_header = NULL;
s->raw_seq = NULL;
s->cll = NULL;
@ -1412,7 +1488,11 @@ static void av1_decode_flush(AVCodecContext *avctx)
while (av_fifo_read(s->itut_t35_fifo, &itut_t35, 1) >= 0)
av_buffer_unref(&itut_t35.payload_ref);
ff_cbs_fragment_reset(&s->current_obu);
ff_cbs_flush(s->cbc);
if (FF_HW_HAS_CB(avctx, flush))
FF_HW_SIMPLE_CALL(avctx, flush);
}
#define OFFSET(x) offsetof(AV1DecContext, x)
@ -1438,14 +1518,12 @@ const FFCodec ff_av1_decoder = {
.priv_data_size = sizeof(AV1DecContext),
.init = av1_decode_init,
.close = av1_decode_free,
FF_CODEC_DECODE_CB(av1_decode_frame),
FF_CODEC_RECEIVE_FRAME_CB(av1_receive_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SETS_PKT_DTS,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.flush = av1_decode_flush,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
.p.priv_class = &av1_class,
.bsfs = "av1_frame_split",
.hw_configs = (const AVCodecHWConfigInternal *const []) {
#if CONFIG_AV1_DXVA2_HWACCEL
HWACCEL_DXVA2(av1),
@ -1465,6 +1543,9 @@ const FFCodec ff_av1_decoder = {
#if CONFIG_AV1_VDPAU_HWACCEL
HWACCEL_VDPAU(av1),
#endif
#if CONFIG_AV1_VULKAN_HWACCEL
HWACCEL_VULKAN(av1),
#endif
NULL
},

Просмотреть файл

@ -28,6 +28,7 @@
#include "libavutil/frame.h"
#include "libavutil/pixfmt.h"
#include "avcodec.h"
#include "packet.h"
#include "cbs.h"
#include "cbs_av1.h"
@ -68,6 +69,7 @@ typedef struct AV1DecContext {
enum AVPixelFormat pix_fmt;
CodedBitstreamContext *cbc;
CodedBitstreamFragment current_obu;
AVPacket *pkt;
AVBufferRef *seq_ref;
AV1RawSequenceHeader *raw_seq;
@ -90,6 +92,8 @@ typedef struct AV1DecContext {
AV1Frame ref[AV1_NUM_REF_FRAMES];
AV1Frame cur_frame;
int nb_unit;
// AVOptions
int operating_point;
} AV1DecContext;

Просмотреть файл

@ -34,20 +34,30 @@
#include "libavutil/opt.h"
#include "libavutil/thread.h"
#include "avcodec.h"
#include "avcodec_internal.h"
#include "bsf.h"
#include "codec_internal.h"
#include "decode.h"
#include "encode.h"
#include "frame_thread_encoder.h"
#include "hwconfig.h"
#include "internal.h"
#include "thread.h"
/**
* Maximum size in bytes of extradata.
* This value was chosen such that every bit of the buffer is
* addressable by a 32-bit signed integer as used by get_bits.
*/
#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - AV_INPUT_BUFFER_PADDING_SIZE)
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
{
int i;
size_t i;
for (i = 0; i < count; i++) {
int r = func(c, (char *)arg + i * size);
size_t offset = i * size;
int r = func(c, FF_PTR_ADD((char *)arg, offset));
if (ret)
ret[i] = r;
}
@ -147,7 +157,9 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
return AVERROR(EINVAL);
avci = av_mallocz(sizeof(*avci));
avci = av_codec_is_decoder(codec) ?
ff_decode_internal_alloc() :
ff_encode_internal_alloc();
if (!avci) {
ret = AVERROR(ENOMEM);
goto end;
@ -380,23 +392,12 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
"that doesn't support it\n");
return;
}
if (avci->in_frame)
av_frame_unref(avci->in_frame);
if (avci->recon_frame)
av_frame_unref(avci->recon_frame);
} else {
av_packet_unref(avci->last_pkt_props);
av_packet_unref(avci->in_pkt);
avctx->pts_correction_last_pts =
avctx->pts_correction_last_dts = INT64_MIN;
av_bsf_flush(avci->bsf);
}
ff_encode_flush_buffers(avctx);
} else
ff_decode_flush_buffers(avctx);
avci->draining = 0;
avci->draining_done = 0;
avci->nb_draining_errors = 0;
av_frame_unref(avci->buffer_frame);
av_packet_unref(avci->buffer_pkt);
@ -458,13 +459,13 @@ av_cold int avcodec_close(AVCodecContext *avctx)
av_buffer_unref(&avci->pool);
if (avctx->hwaccel && avctx->hwaccel->uninit)
avctx->hwaccel->uninit(avctx);
av_freep(&avci->hwaccel_priv_data);
ff_hwaccel_uninit(avctx);
av_bsf_free(&avci->bsf);
#if FF_API_DROPCHANGED
av_channel_layout_uninit(&avci->initial_ch_layout);
#endif
#if CONFIG_LCMS2
ff_icc_context_uninit(&avci->icc);

Просмотреть файл

@ -226,11 +226,15 @@ typedef struct RcOverride{
* Use qpel MC.
*/
#define AV_CODEC_FLAG_QPEL (1 << 4)
#if FF_API_DROPCHANGED
/**
* Don't output frames whose parameters differ from first
* decoded frame in stream.
*
* @deprecated callers should implement this functionality in their own code
*/
#define AV_CODEC_FLAG_DROPCHANGED (1 << 5)
#endif
/**
* Request the encoder to output reconstructed frames, i.e.\ frames that would
* be produced by decoding the encoded bistream. These frames may be retrieved
@ -239,11 +243,20 @@ typedef struct RcOverride{
*
* Should only be used with encoders flagged with the
* @ref AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
*
* @note
* Each reconstructed frame returned by the encoder corresponds to the last
* encoded packet, i.e. the frames are returned in coded order rather than
* presentation order.
*
* @note
* Frame parameters (like pixel format or dimensions) do not have to match the
* AVCodecContext values. Make sure to use the values from the returned frame.
*/
#define AV_CODEC_FLAG_RECON_FRAME (1 << 6)
/**
* @par decoding
* Request the decoder to propagate each packets AVPacket.opaque and
* Request the decoder to propagate each packet's AVPacket.opaque and
* AVPacket.opaque_ref to its corresponding output AVFrame.
*
* @par encoding:
@ -408,8 +421,6 @@ typedef struct RcOverride{
*/
#define AV_GET_ENCODE_BUFFER_FLAG_REF (1 << 0)
struct AVCodecInternal;
/**
* main external API structure.
* New fields can be added to the end with minor version bumps.
@ -547,14 +558,22 @@ typedef struct AVCodecContext {
*/
AVRational time_base;
#if FF_API_TICKS_PER_FRAME
/**
* For some codecs, the time base is closer to the field rate than the frame rate.
* Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
* if no telecine is used ...
*
* Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
*
* @deprecated
* - decoding: Use AVCodecDescriptor.props & AV_CODEC_PROP_FIELDS
* - encoding: Set AVCodecContext.framerate instead
*
*/
attribute_deprecated
int ticks_per_frame;
#endif
/**
* Codec delay.
@ -1007,8 +1026,11 @@ typedef struct AVCodecContext {
/**
* MPEG vs JPEG YUV range.
* - encoding: Set by user
* - decoding: Set by libavcodec
* - encoding: Set by user to override the default output color range value,
* If not specified, libavcodec sets the color range depending on the
* output format.
* - decoding: Set by libavcodec, can be set by the user to propagate the
* color range to components reading from the decoder context.
*/
enum AVColorRange color_range;
@ -1695,6 +1717,9 @@ typedef struct AVCodecContext {
#define FF_PROFILE_KLVA_SYNC 0
#define FF_PROFILE_KLVA_ASYNC 1
#define FF_PROFILE_EVC_BASELINE 0
#define FF_PROFILE_EVC_MAIN 1
/**
* level
* - encoding: Set by user.
@ -2119,120 +2144,6 @@ typedef struct AVHWAccel {
* see AV_HWACCEL_CODEC_CAP_*
*/
int capabilities;
/*****************************************************************
* No fields below this line are part of the public API. They
* may not be used outside of libavcodec and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
/**
* Allocate a custom buffer
*/
int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);
/**
* Called at the beginning of each frame or field picture.
*
* Meaningful frame information (codec specific) is guaranteed to
* be parsed at this point. This function is mandatory.
*
* Note that buf can be NULL along with buf_size set to 0.
* Otherwise, this means the whole frame is available at this point.
*
* @param avctx the codec context
* @param buf the frame data buffer base
* @param buf_size the size of the frame in bytes
* @return zero if successful, a negative value otherwise
*/
int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
/**
* Callback for parameter data (SPS/PPS/VPS etc).
*
* Useful for hardware decoders which keep persistent state about the
* video parameters, and need to receive any changes to update that state.
*
* @param avctx the codec context
* @param type the nal unit type
* @param buf the nal unit data buffer
* @param buf_size the size of the nal unit in bytes
* @return zero if successful, a negative value otherwise
*/
int (*decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size);
/**
* Callback for each slice.
*
* Meaningful slice information (codec specific) is guaranteed to
* be parsed at this point. This function is mandatory.
*
* @param avctx the codec context
* @param buf the slice data buffer base
* @param buf_size the size of the slice in bytes
* @return zero if successful, a negative value otherwise
*/
int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
/**
* Called at the end of each frame or field picture.
*
* The whole picture is parsed at this point and can now be sent
* to the hardware accelerator. This function is mandatory.
*
* @param avctx the codec context
* @return zero if successful, a negative value otherwise
*/
int (*end_frame)(AVCodecContext *avctx);
/**
* Size of per-frame hardware accelerator private data.
*
* Private data is allocated with av_mallocz() before
* AVCodecContext.get_buffer() and deallocated after
* AVCodecContext.release_buffer().
*/
int frame_priv_data_size;
/**
* Initialize the hwaccel private data.
*
* This will be called from ff_get_format(), after hwaccel and
* hwaccel_context are set and the hwaccel private data in AVCodecInternal
* is allocated.
*/
int (*init)(AVCodecContext *avctx);
/**
* Uninitialize the hwaccel private data.
*
* This will be called from get_format() or avcodec_close(), after hwaccel
* and hwaccel_context are already uninitialized.
*/
int (*uninit)(AVCodecContext *avctx);
/**
* Size of the private data to allocate in
* AVCodecInternal.hwaccel_priv_data.
*/
int priv_data_size;
/**
* Internal hwaccel capabilities.
*/
int caps_internal;
/**
* Fill the given hw_frames context with current codec parameters. Called
* from get_format. Refer to avcodec_get_hw_frames_parameters() for
* details.
*
* This CAN be called before AVHWAccel.init is called, and you must assume
* that avctx->hwaccel_priv_data is invalid.
*/
int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx);
} AVHWAccel;
/**
@ -2656,7 +2567,7 @@ int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
/**
* Return decoded output data from a decoder or encoder (when the
* AV_CODEC_FLAG_RECON_FRAME flag is used).
* @ref AV_CODEC_FLAG_RECON_FRAME flag is used).
*
* @param avctx codec context
* @param frame This will be set to a reference-counted video or audio
@ -2670,10 +2581,7 @@ int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
* @retval AVERROR_EOF the codec has been fully flushed, and there will be
* no more output frames
* @retval AVERROR(EINVAL) codec not opened, or it is an encoder without the
* AV_CODEC_FLAG_RECON_FRAME flag enabled
* @retval AVERROR_INPUT_CHANGED current decoded frame has changed parameters with
* respect to first decoded frame. Applicable when flag
* AV_CODEC_FLAG_DROPCHANGED is set.
* @ref AV_CODEC_FLAG_RECON_FRAME flag enabled
* @retval "other negative error code" legitimate decoding errors
*/
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);

Просмотреть файл

@ -0,0 +1,59 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* APIs internal to the generic codec layer.
*
* MUST NOT be included by individual encoders or decoders.
*/
#ifndef AVCODEC_AVCODEC_INTERNAL_H
#define AVCODEC_AVCODEC_INTERNAL_H
struct AVCodecContext;
struct AVFrame;
/**
* avcodec_receive_frame() implementation for decoders.
*/
int ff_decode_receive_frame(struct AVCodecContext *avctx, struct AVFrame *frame);
/**
* avcodec_receive_frame() implementation for encoders.
*/
int ff_encode_receive_frame(struct AVCodecContext *avctx, struct AVFrame *frame);
/*
* Perform encoder initialization and validation.
* Called when opening the encoder, before the FFCodec.init() call.
*/
int ff_encode_preinit(struct AVCodecContext *avctx);
/**
* Perform decoder initialization and validation.
* Called when opening the decoder, before the FFCodec.init() call.
*/
int ff_decode_preinit(struct AVCodecContext *avctx);
void ff_decode_flush_buffers(struct AVCodecContext *avctx);
void ff_encode_flush_buffers(struct AVCodecContext *avctx);
struct AVCodecInternal *ff_decode_internal_alloc(void);
struct AVCodecInternal *ff_encode_internal_alloc(void);
#endif // AVCODEC_AVCODEC_INTERNAL_H

Просмотреть файл

@ -65,16 +65,19 @@ extern const FFBitStreamFilter ff_vp9_metadata_bsf;
extern const FFBitStreamFilter ff_vp9_raw_reorder_bsf;
extern const FFBitStreamFilter ff_vp9_superframe_bsf;
extern const FFBitStreamFilter ff_vp9_superframe_split_bsf;
extern const FFBitStreamFilter ff_vvc_metadata_bsf;
extern const FFBitStreamFilter ff_vvc_mp4toannexb_bsf;
extern const FFBitStreamFilter ff_evc_frame_merge_bsf;
#include "libavcodec/bsf_list.c"
const AVBitStreamFilter *av_bsf_iterate(void **opaque)
{
uintptr_t i = (uintptr_t)*opaque;
uintptr_t i = (uintptr_t) * opaque;
const FFBitStreamFilter *f = bitstream_filters[i];
if (f) {
*opaque = (void*)(i + 1);
*opaque = (void *)(i + 1);
return &f->p;
}
return NULL;

Просмотреть файл

@ -31,8 +31,7 @@
#include "bsf_internal.h"
#include "codec_desc.h"
#include "codec_par.h"
#define IS_EMPTY(pkt) (!(pkt)->data && !(pkt)->side_data_elems)
#include "packet_internal.h"
static av_always_inline const FFBitStreamFilter *ff_bsf(const AVBitStreamFilter *bsf)
{
@ -205,7 +204,7 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
FFBSFContext *const bsfi = ffbsfcontext(ctx);
int ret;
if (!pkt || IS_EMPTY(pkt)) {
if (!pkt || AVPACKET_IS_EMPTY(pkt)) {
if (pkt)
av_packet_unref(pkt);
bsfi->eof = 1;
@ -217,7 +216,7 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
return AVERROR(EINVAL);
}
if (!IS_EMPTY(bsfi->buffer_pkt))
if (!AVPACKET_IS_EMPTY(bsfi->buffer_pkt))
return AVERROR(EAGAIN);
ret = av_packet_make_refcounted(pkt);
@ -241,7 +240,7 @@ int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
if (bsfi->eof)
return AVERROR_EOF;
if (IS_EMPTY(bsfi->buffer_pkt))
if (AVPACKET_IS_EMPTY(bsfi->buffer_pkt))
return AVERROR(EAGAIN);
tmp_pkt = av_packet_alloc();
@ -261,7 +260,7 @@ int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
if (bsfi->eof)
return AVERROR_EOF;
if (IS_EMPTY(bsfi->buffer_pkt))
if (AVPACKET_IS_EMPTY(bsfi->buffer_pkt))
return AVERROR(EAGAIN);
av_packet_move_ref(pkt, bsfi->buffer_pkt);

Просмотреть файл

@ -180,7 +180,7 @@ static av_always_inline void bytestream2_skipu(GetByteContext *g,
static av_always_inline void bytestream2_skip_p(PutByteContext *p,
unsigned int size)
{
int size2;
unsigned int size2;
if (p->eof)
return;
size2 = FFMIN(p->buffer_end - p->buffer, size);
@ -268,7 +268,7 @@ static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
uint8_t *dst,
unsigned int size)
{
int size2 = FFMIN(g->buffer_end - g->buffer, size);
unsigned int size2 = FFMIN(g->buffer_end - g->buffer, size);
memcpy(dst, g->buffer, size2);
g->buffer += size2;
return size2;
@ -287,7 +287,7 @@ static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
const uint8_t *src,
unsigned int size)
{
int size2;
unsigned int size2;
if (p->eof)
return 0;
size2 = FFMIN(p->buffer_end - p->buffer, size);
@ -311,7 +311,7 @@ static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
const uint8_t c,
unsigned int size)
{
int size2;
unsigned int size2;
if (p->eof)
return;
size2 = FFMIN(p->buffer_end - p->buffer, size);
@ -348,7 +348,7 @@ static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p,
GetByteContext *g,
unsigned int size)
{
int size2;
unsigned int size2;
if (p->eof)
return 0;

Просмотреть файл

@ -40,6 +40,9 @@ static const CodedBitstreamType *const cbs_type_table[] = {
#if CONFIG_CBS_H265
&ff_cbs_type_h265,
#endif
#if CONFIG_CBS_H266
&ff_cbs_type_h266,
#endif
#if CONFIG_CBS_JPEG
&ff_cbs_type_jpeg,
#endif
@ -61,6 +64,9 @@ const enum AVCodecID ff_cbs_all_codec_ids[] = {
#if CONFIG_CBS_H265
AV_CODEC_ID_H265,
#endif
#if CONFIG_CBS_H266
AV_CODEC_ID_H266,
#endif
#if CONFIG_CBS_JPEG
AV_CODEC_ID_MJPEG,
#endif
@ -540,10 +546,13 @@ void ff_cbs_trace_syntax_element(CodedBitstreamContext *ctx, int position,
position, name, pad, bits, value);
}
int ff_cbs_read_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, uint32_t *write_to,
uint32_t range_min, uint32_t range_max)
static av_always_inline int cbs_read_unsigned(CodedBitstreamContext *ctx,
GetBitContext *gbc,
int width, const char *name,
const int *subscripts,
uint32_t *write_to,
uint32_t range_min,
uint32_t range_max)
{
uint32_t value;
int position;
@ -583,6 +592,22 @@ int ff_cbs_read_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
return 0;
}
int ff_cbs_read_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, uint32_t *write_to,
uint32_t range_min, uint32_t range_max)
{
return cbs_read_unsigned(ctx, gbc, width, name, subscripts,
write_to, range_min, range_max);
}
int ff_cbs_read_simple_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name, uint32_t *write_to)
{
return cbs_read_unsigned(ctx, gbc, width, name, NULL,
write_to, 0, UINT32_MAX);
}
int ff_cbs_write_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name,
const int *subscripts, uint32_t value,
@ -619,6 +644,13 @@ int ff_cbs_write_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
return 0;
}
int ff_cbs_write_simple_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name, uint32_t value)
{
return ff_cbs_write_unsigned(ctx, pbc, width, name, NULL,
value, 0, MAX_UINT_BITS(width));
}
int ff_cbs_read_signed(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, int32_t *write_to,
@ -1026,3 +1058,24 @@ int ff_cbs_make_unit_writable(CodedBitstreamContext *ctx,
av_buffer_unref(&ref);
return 0;
}
void ff_cbs_discard_units(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
enum AVDiscard skip,
int flags)
{
if (!ctx->codec->discarded_unit)
return;
for (int i = frag->nb_units - 1; i >= 0; i--) {
if (ctx->codec->discarded_unit(ctx, &frag->units[i], skip)) {
// discard all units
if (!(flags & DISCARD_FLAG_KEEP_NON_VCL)) {
ff_cbs_fragment_free(frag);
return;
}
ff_cbs_delete_unit(frag, i);
}
}
}

Просмотреть файл

@ -26,6 +26,7 @@
#include "codec_id.h"
#include "codec_par.h"
#include "defs.h"
#include "packet.h"
@ -432,5 +433,21 @@ int ff_cbs_make_unit_refcounted(CodedBitstreamContext *ctx,
int ff_cbs_make_unit_writable(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
enum CbsDiscardFlags {
DISCARD_FLAG_NONE = 0,
/**
* keep non-vcl units even if the picture has been dropped.
*/
DISCARD_FLAG_KEEP_NON_VCL = 0x01,
};
/**
* Discard units accroding to 'skip'.
*/
void ff_cbs_discard_units(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
enum AVDiscard skip,
int flags);
#endif /* AVCODEC_CBS_H */

Просмотреть файл

@ -412,9 +412,8 @@ static int cbs_av1_read_subexp(CodedBitstreamContext *ctx, GetBitContext *gbc,
}
if (len < max_len) {
err = ff_cbs_read_unsigned(ctx, gbc, range_bits,
"subexp_bits", NULL, &value,
0, MAX_UINT_BITS(range_bits));
err = ff_cbs_read_simple_unsigned(ctx, gbc, range_bits,
"subexp_bits", &value);
if (err < 0)
return err;
@ -476,10 +475,9 @@ static int cbs_av1_write_subexp(CodedBitstreamContext *ctx, PutBitContext *pbc,
return err;
if (len < max_len) {
err = ff_cbs_write_unsigned(ctx, pbc, range_bits,
"subexp_bits", NULL,
value - range_offset,
0, MAX_UINT_BITS(range_bits));
err = ff_cbs_write_simple_unsigned(ctx, pbc, range_bits,
"subexp_bits",
value - range_offset);
if (err < 0)
return err;
@ -546,8 +544,6 @@ static size_t cbs_av1_get_payload_bytes_left(GetBitContext *gbc)
#define SUBSCRIPTS(subs, ...) (subs > 0 ? ((int[subs + 1]){ subs, __VA_ARGS__ }) : NULL)
#define fb(width, name) \
xf(width, name, current->name, 0, MAX_UINT_BITS(width), 0, )
#define fc(width, name, range_min, range_max) \
xf(width, name, current->name, range_min, range_max, 0, )
#define flag(name) fb(1, name)
@ -573,6 +569,13 @@ static size_t cbs_av1_get_payload_bytes_left(GetBitContext *gbc)
#define READWRITE read
#define RWContext GetBitContext
#define fb(width, name) do { \
uint32_t value; \
CHECK(ff_cbs_read_simple_unsigned(ctx, rw, width, \
#name, &value)); \
current->name = value; \
} while (0)
#define xf(width, name, var, range_min, range_max, subs, ...) do { \
uint32_t value; \
CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \
@ -645,6 +648,7 @@ static size_t cbs_av1_get_payload_bytes_left(GetBitContext *gbc)
#undef READ
#undef READWRITE
#undef RWContext
#undef fb
#undef xf
#undef xsu
#undef uvlc
@ -661,6 +665,11 @@ static size_t cbs_av1_get_payload_bytes_left(GetBitContext *gbc)
#define READWRITE write
#define RWContext PutBitContext
#define fb(width, name) do { \
CHECK(ff_cbs_write_simple_unsigned(ctx, rw, width, #name, \
current->name)); \
} while (0)
#define xf(width, name, var, range_min, range_max, subs, ...) do { \
CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), \
@ -723,6 +732,7 @@ static size_t cbs_av1_get_payload_bytes_left(GetBitContext *gbc)
#undef WRITE
#undef READWRITE
#undef RWContext
#undef fb
#undef xf
#undef xsu
#undef uvlc
@ -1306,9 +1316,16 @@ static void cbs_av1_free_metadata(void *unit, uint8_t *content)
md = &obu->obu.metadata;
switch (md->metadata_type) {
case AV1_METADATA_TYPE_HDR_CLL:
case AV1_METADATA_TYPE_HDR_MDCV:
case AV1_METADATA_TYPE_SCALABILITY:
case AV1_METADATA_TYPE_TIMECODE:
break;
case AV1_METADATA_TYPE_ITUT_T35:
av_buffer_unref(&md->metadata.itut_t35.payload_ref);
break;
default:
av_buffer_unref(&md->metadata.unknown.payload_ref);
}
av_free(content);
}

Просмотреть файл

@ -215,6 +215,8 @@ typedef struct AV1RawFrameHeader {
uint8_t uniform_tile_spacing_flag;
uint8_t tile_cols_log2;
uint8_t tile_rows_log2;
uint8_t tile_start_col_sb[AV1_MAX_TILE_COLS];
uint8_t tile_start_row_sb[AV1_MAX_TILE_COLS];
uint8_t width_in_sbs_minus_1[AV1_MAX_TILE_COLS];
uint8_t height_in_sbs_minus_1[AV1_MAX_TILE_ROWS];
uint16_t context_update_tile_id;
@ -370,6 +372,12 @@ typedef struct AV1RawMetadataTimecode {
uint32_t time_offset_value;
} AV1RawMetadataTimecode;
typedef struct AV1RawMetadataUnknown {
uint8_t *payload;
AVBufferRef *payload_ref;
size_t payload_size;
} AV1RawMetadataUnknown;
typedef struct AV1RawMetadata {
uint64_t metadata_type;
union {
@ -378,6 +386,7 @@ typedef struct AV1RawMetadata {
AV1RawMetadataScalability scalability;
AV1RawMetadataITUTT35 itut_t35;
AV1RawMetadataTimecode timecode;
AV1RawMetadataUnknown unknown;
} metadata;
} AV1RawMetadata;

Просмотреть файл

@ -176,7 +176,7 @@ static int FUNC(decoder_model_info)(CodedBitstreamContext *ctx, RWContext *rw,
int err;
fb(5, buffer_delay_length_minus_1);
fb(32, num_units_in_decoding_tick);
fc(32, num_units_in_decoding_tick, 1, MAX_UINT_BITS(32));
fb(5, buffer_removal_time_length_minus_1);
fb(5, frame_presentation_time_length_minus_1);
@ -626,6 +626,10 @@ static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
tile_width_sb = (sb_cols + (1 << current->tile_cols_log2) - 1) >>
current->tile_cols_log2;
for (int off = 0, i = 0; off < sb_cols; off += tile_width_sb)
current->tile_start_col_sb[i++] = off;
current->tile_cols = (sb_cols + tile_width_sb - 1) / tile_width_sb;
min_log2_tile_rows = FFMAX(min_log2_tiles - current->tile_cols_log2, 0);
@ -634,6 +638,10 @@ static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
tile_height_sb = (sb_rows + (1 << current->tile_rows_log2) - 1) >>
current->tile_rows_log2;
for (int off = 0, i = 0; off < sb_rows; off += tile_height_sb)
current->tile_start_row_sb[i++] = off;
current->tile_rows = (sb_rows + tile_height_sb - 1) / tile_height_sb;
for (i = 0; i < current->tile_cols - 1; i++)
@ -652,6 +660,7 @@ static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
start_sb = 0;
for (i = 0; start_sb < sb_cols && i < AV1_MAX_TILE_COLS; i++) {
current->tile_start_col_sb[i] = start_sb;
max_width = FFMIN(sb_cols - start_sb, max_tile_width_sb);
ns(max_width, width_in_sbs_minus_1[i], 1, i);
size_sb = current->width_in_sbs_minus_1[i] + 1;
@ -669,6 +678,7 @@ static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
start_sb = 0;
for (i = 0; start_sb < sb_rows && i < AV1_MAX_TILE_ROWS; i++) {
current->tile_start_row_sb[i] = start_sb;
max_height = FFMIN(sb_rows - start_sb, max_tile_height_sb);
ns(max_height, height_in_sbs_minus_1[i], 1, i);
size_sb = current->height_in_sbs_minus_1[i] + 1;
@ -1843,6 +1853,8 @@ static int FUNC(metadata_hdr_cll)(CodedBitstreamContext *ctx, RWContext *rw,
{
int err;
HEADER("HDR CLL Metadata");
fb(16, max_cll);
fb(16, max_fall);
@ -1854,6 +1866,8 @@ static int FUNC(metadata_hdr_mdcv)(CodedBitstreamContext *ctx, RWContext *rw,
{
int err, i;
HEADER("HDR MDCV Metadata");
for (i = 0; i < 3; i++) {
fbs(16, primary_chromaticity_x[i], 1, i);
fbs(16, primary_chromaticity_y[i], 1, i);
@ -1920,6 +1934,8 @@ static int FUNC(metadata_scalability)(CodedBitstreamContext *ctx, RWContext *rw,
{
int err;
HEADER("Scalability Metadata");
fb(8, scalability_mode_idc);
if (current->scalability_mode_idc == AV1_SCALABILITY_SS)
@ -1934,6 +1950,8 @@ static int FUNC(metadata_itut_t35)(CodedBitstreamContext *ctx, RWContext *rw,
int err;
size_t i;
HEADER("ITU-T T.35 Metadata");
fb(8, itu_t_t35_country_code);
if (current->itu_t_t35_country_code == 0xff)
fb(8, itu_t_t35_country_code_extension_byte);
@ -1961,6 +1979,8 @@ static int FUNC(metadata_timecode)(CodedBitstreamContext *ctx, RWContext *rw,
{
int err;
HEADER("Timecode Metadata");
fb(5, counting_type);
flag(full_timestamp_flag);
flag(discontinuity_flag);
@ -1994,6 +2014,29 @@ static int FUNC(metadata_timecode)(CodedBitstreamContext *ctx, RWContext *rw,
return 0;
}
static int FUNC(metadata_unknown)(CodedBitstreamContext *ctx, RWContext *rw,
AV1RawMetadataUnknown *current)
{
int err;
size_t i;
HEADER("Unknown Metadata");
#ifdef READ
current->payload_size = cbs_av1_get_payload_bytes_left(rw);
current->payload_ref = av_buffer_alloc(current->payload_size);
if (!current->payload_ref)
return AVERROR(ENOMEM);
current->payload = current->payload_ref->data;
#endif
for (i = 0; i < current->payload_size; i++)
fbs(8, payload[i], 1, i);
return 0;
}
static int FUNC(metadata_obu)(CodedBitstreamContext *ctx, RWContext *rw,
AV1RawMetadata *current)
{
@ -2018,8 +2061,7 @@ static int FUNC(metadata_obu)(CodedBitstreamContext *ctx, RWContext *rw,
CHECK(FUNC(metadata_timecode)(ctx, rw, &current->metadata.timecode));
break;
default:
// Unknown metadata type.
return AVERROR_PATCHWELCOME;
CHECK(FUNC(metadata_unknown)(ctx, rw, &current->metadata.unknown));
}
return 0;

Просмотреть файл

@ -133,6 +133,12 @@ typedef struct CodedBitstreamType {
CodedBitstreamUnit *unit,
PutBitContext *pbc);
// Return 1 when the unit should be dropped according to 'skip',
// 0 otherwise.
int (*discarded_unit)(CodedBitstreamContext *ctx,
const CodedBitstreamUnit *unit,
enum AVDiscard skip);
// Read the data from all of frag->units and assemble it into
// a bitstream for the whole fragment.
int (*assemble_fragment)(CodedBitstreamContext *ctx,
@ -157,18 +163,27 @@ void ff_cbs_trace_syntax_element(CodedBitstreamContext *ctx, int position,
// Helper functions for read/write of common bitstream elements, including
// generation of trace output.
// generation of trace output. The simple functions are equivalent to
// their non-simple counterparts except that their range is unrestricted
// (i.e. only limited by the amount of bits used) and they lack
// the ability to use subscripts.
int ff_cbs_read_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, uint32_t *write_to,
uint32_t range_min, uint32_t range_max);
int ff_cbs_read_simple_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name, uint32_t *write_to);
int ff_cbs_write_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name,
const int *subscripts, uint32_t value,
uint32_t range_min, uint32_t range_max);
int ff_cbs_write_simple_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name, uint32_t value);
int ff_cbs_read_signed(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, int32_t *write_to,
@ -245,6 +260,7 @@ int ff_cbs_write_signed(CodedBitstreamContext *ctx, PutBitContext *pbc,
extern const CodedBitstreamType ff_cbs_type_av1;
extern const CodedBitstreamType ff_cbs_type_h264;
extern const CodedBitstreamType ff_cbs_type_h265;
extern const CodedBitstreamType ff_cbs_type_h266;
extern const CodedBitstreamType ff_cbs_type_jpeg;
extern const CodedBitstreamType ff_cbs_type_mpeg2;
extern const CodedBitstreamType ff_cbs_type_vp9;

Просмотреть файл

@ -80,6 +80,7 @@
*/
#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
#if FF_API_SUBFRAMES
/**
* Codec can output multiple frames per AVPacket
* Normally demuxers return one frame at a time, demuxers which do not do
@ -92,6 +93,8 @@
* as a last resort.
*/
#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
#endif
/**
* Codec is experimental and is thus avoided in favor of non experimental
* encoders

Просмотреть файл

@ -38,14 +38,20 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_VIDEO,
.name = "mpeg1video",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER |
// FIXME this is strigly speaking not true, as MPEG-1 does
// not allow field coding, but our mpeg12 code (decoder and
// parser) can sometimes change codec id at runtime, so
// this is safer
AV_CODEC_PROP_FIELDS,
},
{
.id = AV_CODEC_ID_MPEG2VIDEO,
.type = AVMEDIA_TYPE_VIDEO,
.name = "mpeg2video",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER |
AV_CODEC_PROP_FIELDS,
.profiles = NULL_IF_CONFIG_SMALL(ff_mpeg2_video_profiles),
},
{
@ -225,7 +231,8 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_VIDEO,
.name = "h264",
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS | AV_CODEC_PROP_REORDER,
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS |
AV_CODEC_PROP_REORDER | AV_CODEC_PROP_FIELDS,
.profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
},
{
@ -529,7 +536,8 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_VIDEO,
.name = "vc1",
.long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER |
AV_CODEC_PROP_FIELDS,
.profiles = NULL_IF_CONFIG_SMALL(ff_vc1_profiles),
},
{
@ -1923,6 +1931,35 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("ViewQuest VQC"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_PDV,
.type = AVMEDIA_TYPE_VIDEO,
.name = "pdv",
.long_name = NULL_IF_CONFIG_SMALL("PDV (PlayDate Video)"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_EVC,
.type = AVMEDIA_TYPE_VIDEO,
.name = "evc",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-5 EVC (Essential Video Coding)"),
.props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
.profiles = NULL_IF_CONFIG_SMALL(ff_evc_profiles),
},
{
.id = AV_CODEC_ID_RTV1,
.type = AVMEDIA_TYPE_VIDEO,
.name = "rtv1",
.long_name = NULL_IF_CONFIG_SMALL("RTV1 (RivaTuner Video)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_VMIX,
.type = AVMEDIA_TYPE_VIDEO,
.name = "vmix",
.long_name = NULL_IF_CONFIG_SMALL("vMix Video"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* various PCM "codecs" */
{
@ -3369,6 +3406,13 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("RKA (RK Audio)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_AC4,
.type = AVMEDIA_TYPE_AUDIO,
.name = "ac4",
.long_name = NULL_IF_CONFIG_SMALL("AC-4"),
.props = AV_CODEC_PROP_LOSSY,
},
/* subtitle codecs */
{
@ -3550,7 +3594,6 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_SUBTITLE,
.name = "arib_caption",
.long_name = NULL_IF_CONFIG_SMALL("ARIB STD-B24 caption"),
.props = AV_CODEC_PROP_TEXT_SUB,
.profiles = NULL_IF_CONFIG_SMALL(ff_arib_caption_profiles),
},
@ -3627,6 +3670,12 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("binary data"),
.mime_types= MT("application/octet-stream"),
},
{
.id = AV_CODEC_ID_SMPTE_2038,
.type = AVMEDIA_TYPE_DATA,
.name = "smpte_2038",
.long_name = NULL_IF_CONFIG_SMALL("SMPTE ST 2038 VANC in MPEG-2 TS"),
},
{
.id = AV_CODEC_ID_MPEG2TS,
.type = AVMEDIA_TYPE_DATA,

Просмотреть файл

@ -90,6 +90,12 @@ typedef struct AVCodecDescriptor {
* equal.
*/
#define AV_CODEC_PROP_REORDER (1 << 3)
/**
* Video codec supports separate coding of fields in interlaced frames.
*/
#define AV_CODEC_PROP_FIELDS (1 << 4)
/**
* Subtitle codec is bitmap based
* Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.

Просмотреть файл

@ -320,6 +320,10 @@ enum AVCodecID {
AV_CODEC_ID_WBMP,
AV_CODEC_ID_MEDIA100,
AV_CODEC_ID_VQC,
AV_CODEC_ID_PDV,
AV_CODEC_ID_EVC,
AV_CODEC_ID_RTV1,
AV_CODEC_ID_VMIX,
/* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
@ -538,6 +542,7 @@ enum AVCodecID {
AV_CODEC_ID_FTR,
AV_CODEC_ID_WAVARC,
AV_CODEC_ID_RKA,
AV_CODEC_ID_AC4,
/* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
@ -582,6 +587,7 @@ enum AVCodecID {
AV_CODEC_ID_DVD_NAV,
AV_CODEC_ID_TIMED_ID3,
AV_CODEC_ID_BIN_DATA,
AV_CODEC_ID_SMPTE_2038,
AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it

Просмотреть файл

@ -46,6 +46,7 @@ static void codec_parameters_reset(AVCodecParameters *par)
par->color_space = AVCOL_SPC_UNSPECIFIED;
par->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
par->sample_aspect_ratio = (AVRational){ 0, 1 };
par->framerate = (AVRational){ 0, 1 };
par->profile = FF_PROFILE_UNKNOWN;
par->level = FF_LEVEL_UNKNOWN;
}
@ -126,6 +127,7 @@ int avcodec_parameters_from_context(AVCodecParameters *par,
par->chroma_location = codec->chroma_sample_location;
par->sample_aspect_ratio = codec->sample_aspect_ratio;
par->video_delay = codec->has_b_frames;
par->framerate = codec->framerate;
break;
case AVMEDIA_TYPE_AUDIO:
par->format = codec->sample_fmt;
@ -207,6 +209,7 @@ int avcodec_parameters_to_context(AVCodecContext *codec,
codec->chroma_sample_location = par->chroma_location;
codec->sample_aspect_ratio = par->sample_aspect_ratio;
codec->has_b_frames = par->video_delay;
codec->framerate = par->framerate;
break;
case AVMEDIA_TYPE_AUDIO:
codec->sample_fmt = par->format;
@ -250,8 +253,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
break;
}
av_freep(&codec->extradata);
if (par->extradata) {
av_freep(&codec->extradata);
codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!codec->extradata)
return AVERROR(ENOMEM);

Просмотреть файл

@ -211,6 +211,18 @@ typedef struct AVCodecParameters {
* Audio only. The channel layout and number of channels.
*/
AVChannelLayout ch_layout;
/**
* Video only. Number of frames per second, for streams with constant frame
* durations. Should be set to { 0, 1 } when some frames have differing
* durations or if the value is not known.
*
* @note This field correponds to values that are stored in codec-level
* headers and is typically overridden by container/transport-layer
* timestamps, when available. It should thus be used only as a last resort,
* when no higher-level timing information is available.
*/
AVRational framerate;
} AVCodecParameters;
/**

Просмотреть файл

@ -52,13 +52,6 @@ void ff_dct_end (DCTContext *s);
void ff_dct_init_x86(DCTContext *s);
void ff_fdct_ifast(int16_t *data);
void ff_fdct_ifast248(int16_t *data);
void ff_jpeg_fdct_islow_8(int16_t *data);
void ff_jpeg_fdct_islow_10(int16_t *data);
void ff_fdct248_islow_8(int16_t *data);
void ff_fdct248_islow_10(int16_t *data);
void ff_j_rev_dct(int16_t *data);
void ff_j_rev_dct4(int16_t *data);
void ff_j_rev_dct2(int16_t *data);

Просмотреть файл

@ -41,14 +41,34 @@
#include "libavutil/opt.h"
#include "avcodec.h"
#include "avcodec_internal.h"
#include "bytestream.h"
#include "bsf.h"
#include "codec_internal.h"
#include "decode.h"
#include "hwaccel_internal.h"
#include "hwconfig.h"
#include "internal.h"
#include "packet_internal.h"
#include "thread.h"
typedef struct DecodeContext {
AVCodecInternal avci;
/* to prevent infinite loop on errors when draining */
int nb_draining_errors;
/**
* The caller has submitted a NULL packet on input.
*/
int draining_started;
} DecodeContext;
static DecodeContext *decode_ctx(AVCodecInternal *avci)
{
return (DecodeContext *)avci;
}
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
{
int ret;
@ -85,15 +105,26 @@ FF_DISABLE_DEPRECATION_WARNINGS
ret = AVERROR_INVALIDDATA;
goto fail2;
}
avctx->channels = val;
av_channel_layout_uninit(&avctx->ch_layout);
avctx->ch_layout.nb_channels = val;
avctx->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
size -= 4;
}
if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
if (size < 8)
goto fail;
avctx->channel_layout = bytestream_get_le64(&data);
av_channel_layout_uninit(&avctx->ch_layout);
ret = av_channel_layout_from_mask(&avctx->ch_layout, bytestream_get_le64(&data));
if (ret < 0)
goto fail2;
size -= 8;
}
if (flags & (AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT |
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)) {
avctx->channels = avctx->ch_layout.nb_channels;
avctx->channel_layout = (avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE) ?
avctx->ch_layout.u.mask : 0;
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
@ -182,14 +213,11 @@ fail:
return ret;
}
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
static int decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
{
AVCodecInternal *avci = avctx->internal;
int ret;
if (avci->draining)
return AVERROR_EOF;
ret = av_bsf_receive_packet(avci->bsf, pkt);
if (ret == AVERROR_EOF)
avci->draining = 1;
@ -212,6 +240,31 @@ finish:
return ret;
}
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
{
AVCodecInternal *avci = avctx->internal;
DecodeContext *dc = decode_ctx(avci);
if (avci->draining)
return AVERROR_EOF;
while (1) {
int ret = decode_get_packet(avctx, pkt);
if (ret == AVERROR(EAGAIN) &&
(!AVPACKET_IS_EMPTY(avci->buffer_pkt) || dc->draining_started)) {
ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
if (ret < 0) {
av_packet_unref(avci->buffer_pkt);
return ret;
}
continue;
}
return ret;
}
}
/**
* Attempt to guess proper monotonic timestamps for decoded video frames
* which might have incorrect times. Input timestamps may wrap around, in
@ -248,6 +301,98 @@ static int64_t guess_correct_pts(AVCodecContext *ctx,
return pts;
}
static int discard_samples(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
{
AVCodecInternal *avci = avctx->internal;
AVFrameSideData *side;
uint32_t discard_padding = 0;
uint8_t skip_reason = 0;
uint8_t discard_reason = 0;
side = av_frame_get_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES);
if (side && side->size >= 10) {
avci->skip_samples = AV_RL32(side->data);
avci->skip_samples = FFMAX(0, avci->skip_samples);
discard_padding = AV_RL32(side->data + 4);
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
avci->skip_samples, (int)discard_padding);
skip_reason = AV_RL8(side->data + 8);
discard_reason = AV_RL8(side->data + 9);
}
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if (!side && (avci->skip_samples || discard_padding))
side = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (side && (avci->skip_samples || discard_padding)) {
AV_WL32(side->data, avci->skip_samples);
AV_WL32(side->data + 4, discard_padding);
AV_WL8(side->data + 8, skip_reason);
AV_WL8(side->data + 9, discard_reason);
avci->skip_samples = 0;
}
return 0;
}
av_frame_remove_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES);
if ((frame->flags & AV_FRAME_FLAG_DISCARD)) {
avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
*discarded_samples += frame->nb_samples;
return AVERROR(EAGAIN);
}
if (avci->skip_samples > 0) {
if (frame->nb_samples <= avci->skip_samples){
*discarded_samples += frame->nb_samples;
avci->skip_samples -= frame->nb_samples;
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
avci->skip_samples);
return AVERROR(EAGAIN);
} else {
av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
frame->nb_samples - avci->skip_samples, avctx->ch_layout.nb_channels, frame->format);
if (avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(avci->skip_samples,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
if (frame->pts != AV_NOPTS_VALUE)
frame->pts += diff_ts;
if (frame->pkt_dts != AV_NOPTS_VALUE)
frame->pkt_dts += diff_ts;
if (frame->duration >= diff_ts)
frame->duration -= diff_ts;
} else
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
avci->skip_samples, frame->nb_samples);
*discarded_samples += avci->skip_samples;
frame->nb_samples -= avci->skip_samples;
avci->skip_samples = 0;
}
}
if (discard_padding > 0 && discard_padding <= frame->nb_samples) {
if (discard_padding == frame->nb_samples) {
*discarded_samples += frame->nb_samples;
return AVERROR(EAGAIN);
} else {
if (avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
frame->duration = diff_ts;
} else
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
(int)discard_padding, frame->nb_samples);
frame->nb_samples -= discard_padding;
}
}
return 0;
}
/*
* The core of the receive_frame_wrapper for the decoders implementing
* the simple API. Certain decoders might consume partial packets without
@ -259,7 +404,7 @@ static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame,
AVCodecInternal *avci = avctx->internal;
AVPacket *const pkt = avci->in_pkt;
const FFCodec *const codec = ffcodec(avctx->codec);
int got_frame, actual_got_frame;
int got_frame, consumed;
int ret;
if (!pkt->data && !avci->draining) {
@ -282,9 +427,9 @@ static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame,
got_frame = 0;
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
consumed = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
} else {
ret = codec->cb.decode(avctx, frame, &got_frame, pkt);
consumed = codec->cb.decode(avctx, frame, &got_frame, pkt);
if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
frame->pkt_dts = pkt->dts;
@ -295,157 +440,41 @@ FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_pos = pkt->pos;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
//FIXME these should be under if(!avctx->has_b_frames)
/* get_buffer is supposed to set frame parameters */
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
if (!frame->width) frame->width = avctx->width;
if (!frame->height) frame->height = avctx->height;
if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
}
}
}
emms_c();
actual_got_frame = got_frame;
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if (frame->flags & AV_FRAME_FLAG_DISCARD)
got_frame = 0;
ret = (!got_frame || frame->flags & AV_FRAME_FLAG_DISCARD)
? AVERROR(EAGAIN)
: 0;
} else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
uint8_t *side;
size_t side_size;
uint32_t discard_padding = 0;
uint8_t skip_reason = 0;
uint8_t discard_reason = 0;
if (ret >= 0 && got_frame) {
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
if (!frame->ch_layout.nb_channels) {
int ret2 = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
if (ret2 < 0) {
ret = ret2;
got_frame = 0;
}
}
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
if (!frame->channel_layout)
frame->channel_layout = avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
avctx->ch_layout.u.mask : 0;
if (!frame->channels)
frame->channels = avctx->ch_layout.nb_channels;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (!frame->sample_rate)
frame->sample_rate = avctx->sample_rate;
}
side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
if(side && side_size>=10) {
avci->skip_samples = AV_RL32(side);
avci->skip_samples = FFMAX(0, avci->skip_samples);
discard_padding = AV_RL32(side + 4);
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
avci->skip_samples, (int)discard_padding);
skip_reason = AV_RL8(side + 8);
discard_reason = AV_RL8(side + 9);
}
if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
got_frame = 0;
*discarded_samples += frame->nb_samples;
}
if (avci->skip_samples > 0 && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if(frame->nb_samples <= avci->skip_samples){
got_frame = 0;
*discarded_samples += frame->nb_samples;
avci->skip_samples -= frame->nb_samples;
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
avci->skip_samples);
} else {
av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
frame->nb_samples - avci->skip_samples, avctx->ch_layout.nb_channels, frame->format);
if(avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(avci->skip_samples,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
if(frame->pts!=AV_NOPTS_VALUE)
frame->pts += diff_ts;
if(frame->pkt_dts!=AV_NOPTS_VALUE)
frame->pkt_dts += diff_ts;
if (frame->duration >= diff_ts)
frame->duration -= diff_ts;
} else {
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
}
av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
avci->skip_samples, frame->nb_samples);
*discarded_samples += avci->skip_samples;
frame->nb_samples -= avci->skip_samples;
avci->skip_samples = 0;
}
}
if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if (discard_padding == frame->nb_samples) {
*discarded_samples += frame->nb_samples;
got_frame = 0;
} else {
if(avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
frame->duration = diff_ts;
} else {
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
}
av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
(int)discard_padding, frame->nb_samples);
frame->nb_samples -= discard_padding;
}
}
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (fside) {
AV_WL32(fside->data, avci->skip_samples);
AV_WL32(fside->data + 4, discard_padding);
AV_WL8(fside->data + 8, skip_reason);
AV_WL8(fside->data + 9, discard_reason);
avci->skip_samples = 0;
}
}
ret = !got_frame ? AVERROR(EAGAIN)
: discard_samples(avctx, frame, discarded_samples);
}
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
!avci->showed_multi_packet_warning &&
ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
avci->showed_multi_packet_warning = 1;
}
if (!got_frame)
if (ret == AVERROR(EAGAIN))
av_frame_unref(frame);
if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
ret = pkt->size;
if (consumed < 0)
ret = consumed;
if (consumed >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
consumed = pkt->size;
/* do not stop draining when actual_got_frame != 0 or ret < 0 */
/* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
if (avci->draining && !actual_got_frame) {
if (!ret)
av_assert0(frame->buf[0]);
if (ret == AVERROR(EAGAIN))
ret = 0;
/* do not stop draining when got_frame != 0 or ret < 0 */
if (avci->draining && !got_frame) {
if (ret < 0) {
/* prevent infinite loop if a decoder wrongly always return error on draining */
/* reasonable nb_errors_max = maximum b frames + thread count */
int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
avctx->thread_count : 1);
if (avci->nb_draining_errors++ >= nb_errors_max) {
if (decode_ctx(avci)->nb_draining_errors++ >= nb_errors_max) {
av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
"Stop draining and force EOF.\n");
avci->draining_done = 1;
@ -456,11 +485,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
if (ret >= pkt->size || ret < 0) {
if (consumed >= pkt->size || ret < 0) {
av_packet_unref(pkt);
} else {
int consumed = ret;
pkt->data += consumed;
pkt->size -= consumed;
pkt->pts = AV_NOPTS_VALUE;
@ -475,10 +502,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
if (got_frame)
av_assert0(frame->buf[0]);
return ret < 0 ? ret : 0;
return ret;
}
#if CONFIG_LCMS2
@ -529,6 +553,48 @@ static int detect_colorspace(av_unused AVCodecContext *c, av_unused AVFrame *f)
}
#endif
static int fill_frame_props(const AVCodecContext *avctx, AVFrame *frame)
{
int ret;
if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
frame->color_primaries = avctx->color_primaries;
if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
frame->color_trc = avctx->color_trc;
if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
frame->colorspace = avctx->colorspace;
if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
frame->color_range = avctx->color_range;
if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
frame->chroma_location = avctx->chroma_sample_location;
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
} else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
if (!frame->ch_layout.nb_channels) {
ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
if (ret < 0)
return ret;
}
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
if (!frame->channel_layout)
frame->channel_layout = avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
avctx->ch_layout.u.mask : 0;
if (!frame->channels)
frame->channels = avctx->ch_layout.nb_channels;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (!frame->sample_rate)
frame->sample_rate = avctx->sample_rate;
}
return 0;
}
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
int ret;
@ -556,6 +622,14 @@ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
ret = codec->cb.receive_frame(avctx, frame);
emms_c();
if (!ret) {
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO)
ret = (frame->flags & AV_FRAME_FLAG_DISCARD) ? AVERROR(EAGAIN) : 0;
else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
int64_t discarded_samples = 0;
ret = discard_samples(avctx, frame, &discarded_samples);
}
}
} else
ret = decode_simple_receive_frame(avctx, frame);
@ -570,6 +644,31 @@ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
}
if (!ret) {
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
if (!frame->width)
frame->width = avctx->width;
if (!frame->height)
frame->height = avctx->height;
} else
frame->flags |= AV_FRAME_FLAG_KEY;
ret = fill_frame_props(avctx, frame);
if (ret < 0) {
av_frame_unref(frame);
return ret;
}
#if FF_API_FRAME_KEY
FF_DISABLE_DEPRECATION_WARNINGS
frame->key_frame = !!(frame->flags & AV_FRAME_FLAG_KEY);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
#if FF_API_INTERLACED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
frame->interlaced_frame = !!(frame->flags & AV_FRAME_FLAG_INTERLACED);
frame->top_field_first = !!(frame->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame->best_effort_timestamp = guess_correct_pts(avctx,
frame->pts,
frame->pkt_dts);
@ -607,31 +706,28 @@ FF_ENABLE_DEPRECATION_WARNINGS
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
DecodeContext *dc = decode_ctx(avci);
int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
return AVERROR(EINVAL);
if (avctx->internal->draining)
if (dc->draining_started)
return AVERROR_EOF;
if (avpkt && !avpkt->size && avpkt->data)
return AVERROR(EINVAL);
av_packet_unref(avci->buffer_pkt);
if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
if (!AVPACKET_IS_EMPTY(avci->buffer_pkt))
return AVERROR(EAGAIN);
ret = av_packet_ref(avci->buffer_pkt, avpkt);
if (ret < 0)
return ret;
}
} else
dc->draining_started = 1;
ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
if (ret < 0) {
av_packet_unref(avci->buffer_pkt);
return ret;
}
if (!avci->buffer_frame->buf[0]) {
if (!avci->buffer_frame->buf[0] && !dc->draining_started) {
ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
@ -697,7 +793,7 @@ fail:
int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret, changed;
int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
return AVERROR(EINVAL);
@ -727,6 +823,7 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
#if FF_API_DROPCHANGED
if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
if (avctx->frame_num == 1) {
@ -747,7 +844,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
if (avctx->frame_num > 1) {
changed = avci->initial_format != frame->format;
int changed = avci->initial_format != frame->format;
switch(avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
@ -772,6 +869,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
}
#endif
return 0;
fail:
av_frame_unref(frame);
@ -1062,7 +1160,7 @@ int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
{
AVBufferRef *frames_ref = NULL;
const AVCodecHWConfigInternal *hw_config;
const AVHWAccel *hwa;
const FFHWAccel *hwa;
int i, ret;
for (i = 0;; i++) {
@ -1081,6 +1179,15 @@ int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
if (!frames_ref)
return AVERROR(ENOMEM);
if (!avctx->internal->hwaccel_priv_data) {
avctx->internal->hwaccel_priv_data =
av_mallocz(hwa->priv_data_size);
if (!avctx->internal->hwaccel_priv_data) {
av_buffer_unref(&frames_ref);
return AVERROR(ENOMEM);
}
}
ret = hwa->frame_params(avctx, frames_ref);
if (ret >= 0) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
@ -1105,33 +1212,31 @@ int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
}
static int hwaccel_init(AVCodecContext *avctx,
const AVCodecHWConfigInternal *hw_config)
const FFHWAccel *hwaccel)
{
const AVHWAccel *hwaccel;
int err;
hwaccel = hw_config->hwaccel;
if (hwaccel->capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL &&
if (hwaccel->p.capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL &&
avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
hwaccel->name);
hwaccel->p.name);
return AVERROR_PATCHWELCOME;
}
if (hwaccel->priv_data_size) {
if (!avctx->internal->hwaccel_priv_data && hwaccel->priv_data_size) {
avctx->internal->hwaccel_priv_data =
av_mallocz(hwaccel->priv_data_size);
if (!avctx->internal->hwaccel_priv_data)
return AVERROR(ENOMEM);
}
avctx->hwaccel = hwaccel;
avctx->hwaccel = &hwaccel->p;
if (hwaccel->init) {
err = hwaccel->init(avctx);
if (err < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
"hwaccel initialisation returned error.\n",
av_get_pix_fmt_name(hw_config->public.pix_fmt));
av_get_pix_fmt_name(hwaccel->p.pix_fmt));
av_freep(&avctx->internal->hwaccel_priv_data);
avctx->hwaccel = NULL;
return err;
@ -1141,10 +1246,10 @@ static int hwaccel_init(AVCodecContext *avctx,
return 0;
}
static void hwaccel_uninit(AVCodecContext *avctx)
void ff_hwaccel_uninit(AVCodecContext *avctx)
{
if (avctx->hwaccel && avctx->hwaccel->uninit)
avctx->hwaccel->uninit(avctx);
if (FF_HW_HAS_CB(avctx, uninit))
FF_HW_SIMPLE_CALL(avctx, uninit);
av_freep(&avctx->internal->hwaccel_priv_data);
@ -1180,7 +1285,7 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
for (;;) {
// Remove the previous hwaccel, if there was one.
hwaccel_uninit(avctx);
ff_hwaccel_uninit(avctx);
user_choice = avctx->get_format(avctx, choices);
if (user_choice == AV_PIX_FMT_NONE) {
@ -1265,7 +1370,7 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
if (hw_config->hwaccel) {
av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
"initialisation.\n", desc->name);
err = hwaccel_init(avctx, hw_config);
err = hwaccel_init(avctx, hw_config->hwaccel);
if (err < 0)
goto try_again;
}
@ -1284,6 +1389,9 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
--n;
}
if (ret < 0)
ff_hwaccel_uninit(avctx);
av_freep(&choices);
return ret;
}
@ -1315,9 +1423,11 @@ int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
{ AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
{ AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL },
{ AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC },
{ AV_PKT_DATA_AFD, AV_FRAME_DATA_AFD },
{ AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE },
{ AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE },
{ AV_PKT_DATA_DYNAMIC_HDR10_PLUS, AV_FRAME_DATA_DYNAMIC_HDR_PLUS },
{ AV_PKT_DATA_SKIP_SAMPLES, AV_FRAME_DATA_SKIP_SAMPLES },
};
frame->pts = pkt->pts;
@ -1363,9 +1473,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
{
const AVPacket *pkt = avctx->internal->last_pkt_props;
int ret;
if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
int ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
if (ret < 0)
return ret;
#if FF_API_FRAME_PKT
@ -1380,23 +1491,12 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
frame->color_primaries = avctx->color_primaries;
if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
frame->color_trc = avctx->color_trc;
if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
frame->colorspace = avctx->colorspace;
if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
frame->color_range = avctx->color_range;
if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
frame->chroma_location = avctx->chroma_sample_location;
ret = fill_frame_props(avctx, frame);
if (ret < 0)
return ret;
switch (avctx->codec->type) {
case AVMEDIA_TYPE_VIDEO:
frame->format = avctx->pix_fmt;
if (!frame->sample_aspect_ratio.num)
frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
if (frame->width && frame->height &&
av_image_check_sar(frame->width, frame->height,
frame->sample_aspect_ratio) < 0) {
@ -1405,25 +1505,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
frame->sample_aspect_ratio.den);
frame->sample_aspect_ratio = (AVRational){ 0, 1 };
}
break;
case AVMEDIA_TYPE_AUDIO:
if (!frame->sample_rate)
frame->sample_rate = avctx->sample_rate;
if (frame->format < 0)
frame->format = avctx->sample_fmt;
if (!frame->ch_layout.nb_channels) {
int ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
if (ret < 0)
return ret;
}
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
frame->channels = frame->ch_layout.nb_channels;
frame->channel_layout = frame->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
frame->ch_layout.u.mask : 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
break;
}
return 0;
@ -1489,7 +1570,7 @@ int ff_attach_decode_data(AVFrame *frame)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
{
const AVHWAccel *hwaccel = avctx->hwaccel;
const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
int override_dimensions = 1;
int ret;
@ -1685,6 +1766,11 @@ int ff_decode_preinit(AVCodecContext *avctx)
if (ret < 0)
return ret;
#if FF_API_DROPCHANGED
if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED)
av_log(avctx, AV_LOG_WARNING, "The dropchanged flag is deprecated.\n");
#endif
return 0;
}
@ -1702,3 +1788,57 @@ int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
}
return 0;
}
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private,
AVBufferRef **hwaccel_priv_buf)
{
const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
AVBufferRef *ref;
AVHWFramesContext *frames_ctx;
uint8_t *data;
if (!hwaccel || !hwaccel->frame_priv_data_size)
return 0;
av_assert0(!*hwaccel_picture_private);
data = av_mallocz(hwaccel->frame_priv_data_size);
if (!data)
return AVERROR(ENOMEM);
frames_ctx = (AVHWFramesContext *)avctx->hw_frames_ctx->data;
ref = av_buffer_create(data, hwaccel->frame_priv_data_size,
hwaccel->free_frame_priv,
frames_ctx->device_ctx, 0);
if (!ref) {
av_free(data);
return AVERROR(ENOMEM);
}
*hwaccel_priv_buf = ref;
*hwaccel_picture_private = ref->data;
return 0;
}
void ff_decode_flush_buffers(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
DecodeContext *dc = decode_ctx(avci);
av_packet_unref(avci->last_pkt_props);
av_packet_unref(avci->in_pkt);
avctx->pts_correction_last_pts =
avctx->pts_correction_last_dts = INT64_MIN;
av_bsf_flush(avci->bsf);
dc->nb_draining_errors = 0;
dc->draining_started = 0;
}
AVCodecInternal *ff_decode_internal_alloc(void)
{
return av_mallocz(sizeof(DecodeContext));
}

Просмотреть файл

@ -53,11 +53,6 @@ typedef struct FrameDecodeData {
void (*hwaccel_priv_free)(void *priv);
} FrameDecodeData;
/**
* avcodec_receive_frame() implementation for decoders.
*/
int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame);
/**
* Called by decoders to get the next packet for decoding.
*
@ -99,12 +94,6 @@ int ff_attach_decode_data(AVFrame *frame);
*/
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx);
/**
* Perform decoder initialization and validation.
* Called when opening the decoder, before the FFCodec.init() call.
*/
int ff_decode_preinit(AVCodecContext *avctx);
/**
* Check that the provided frame dimensions are valid and set them on the codec
* context.
@ -150,4 +139,18 @@ int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
int ff_side_data_update_matrix_encoding(AVFrame *frame,
enum AVMatrixEncoding matrix_encoding);
/**
* Allocate a hwaccel frame private data if the provided avctx
* uses a hwaccel method that needs it. The private data will
* be refcounted via the AVBuffer API (if allocated).
*
* @param avctx The codec context
* @param hwaccel_picture_private Pointer to return hwaccel_picture_private
* @param hwaccel_priv_buf Pointer to return the AVBufferRef owning
* hwaccel_picture_private
* @return 0 on success, < 0 on error
*/
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private,
AVBufferRef **hwaccel_priv_buf);
#endif /* AVCODEC_DECODE_H */

Просмотреть файл

@ -24,14 +24,38 @@
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/pixdesc.h"
#include "libavutil/samplefmt.h"
#include "avcodec.h"
#include "avcodec_internal.h"
#include "codec_internal.h"
#include "encode.h"
#include "frame_thread_encoder.h"
#include "internal.h"
typedef struct EncodeContext {
AVCodecInternal avci;
/**
* This is set to AV_PKT_FLAG_KEY for encoders that encode intra-only
* formats (i.e. whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set).
* This is used to set said flag generically for said encoders.
*/
int intra_only_flag;
/**
* An audio frame with less than required samples has been submitted (and
* potentially padded with silence). Reject all subsequent frames.
*/
int last_audio_frame;
} EncodeContext;
static EncodeContext *encode_ctx(AVCodecInternal *avci)
{
return (EncodeContext*)avci;
}
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
{
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
@ -157,7 +181,7 @@ static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src,
fail:
av_frame_unref(frame);
s->internal->last_audio_frame = 0;
encode_ctx(s->internal)->last_audio_frame = 0;
return ret;
}
@ -192,6 +216,21 @@ int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
av_frame_move_ref(frame, avci->buffer_frame);
#if FF_API_FRAME_KEY
FF_DISABLE_DEPRECATION_WARNINGS
if (frame->key_frame)
frame->flags |= AV_FRAME_FLAG_KEY;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
#if FF_API_INTERLACED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
if (frame->interlaced_frame)
frame->flags |= AV_FRAME_FLAG_INTERLACED;
if (frame->top_field_first)
frame->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
return 0;
}
@ -356,7 +395,7 @@ static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt
} else
ret = encode_simple_receive_packet(avctx, avpkt);
if (ret >= 0)
avpkt->flags |= avci->intra_only_flag;
avpkt->flags |= encode_ctx(avci)->intra_only_flag;
if (ret == AVERROR_EOF)
avci->draining_done = 1;
@ -414,6 +453,7 @@ static int encode_generate_icc_profile(av_unused AVCodecContext *c, av_unused AV
static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
{
AVCodecInternal *avci = avctx->internal;
EncodeContext *ec = encode_ctx(avci);
AVFrame *dst = avci->buffer_frame;
int ret;
@ -426,7 +466,7 @@ static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
/* check for valid frame size */
if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
/* if we already got an undersized frame, that must have been the last */
if (avctx->internal->last_audio_frame) {
if (ec->last_audio_frame) {
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
return AVERROR(EINVAL);
}
@ -435,7 +475,7 @@ static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
return AVERROR(EINVAL);
}
if (src->nb_samples < avctx->frame_size) {
avctx->internal->last_audio_frame = 1;
ec->last_audio_frame = 1;
if (!(avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME)) {
int pad_samples = avci->pad_samples ? avci->pad_samples : avctx->frame_size;
int out_samples = (src->nb_samples + pad_samples - 1) / pad_samples * pad_samples;
@ -457,13 +497,6 @@ static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
finish:
#if FF_API_PKT_DURATION
FF_DISABLE_DEPRECATION_WARNINGS
if (dst->pkt_duration && dst->pkt_duration != dst->duration)
dst->duration = dst->pkt_duration;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
ret = encode_generate_icc_profile(avctx, dst);
if (ret < 0)
@ -541,25 +574,38 @@ int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *
static int encode_preinit_video(AVCodecContext *avctx)
{
const AVCodec *c = avctx->codec;
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
int i;
if (avctx->codec->pix_fmts) {
for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
if (!av_get_pix_fmt_name(avctx->pix_fmt)) {
av_log(avctx, AV_LOG_ERROR, "Invalid video pixel format: %d\n",
avctx->pix_fmt);
return AVERROR(EINVAL);
}
if (c->pix_fmts) {
for (i = 0; c->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
if (avctx->pix_fmt == c->pix_fmts[i])
break;
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) {
char buf[128];
snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
(char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
if (c->pix_fmts[i] == AV_PIX_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR,
"Specified pixel format %s is not supported by the %s encoder.\n",
av_get_pix_fmt_name(avctx->pix_fmt), c->name);
av_log(avctx, AV_LOG_ERROR, "Supported pixel formats:\n");
for (int p = 0; c->pix_fmts[p] != AV_PIX_FMT_NONE; p++) {
av_log(avctx, AV_LOG_ERROR, " %s\n",
av_get_pix_fmt_name(c->pix_fmts[p]));
}
return AVERROR(EINVAL);
}
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
if (c->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
c->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
c->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
c->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
c->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
avctx->color_range = AVCOL_RANGE_JPEG;
}
@ -574,6 +620,8 @@ static int encode_preinit_video(AVCodecContext *avctx)
return AVERROR(EINVAL);
}
#if FF_API_TICKS_PER_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->ticks_per_frame && avctx->time_base.num &&
avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
av_log(avctx, AV_LOG_ERROR,
@ -583,6 +631,8 @@ static int encode_preinit_video(AVCodecContext *avctx)
avctx->time_base.den);
return AVERROR(EINVAL);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->hw_frames_ctx) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
@ -608,52 +658,78 @@ static int encode_preinit_video(AVCodecContext *avctx)
static int encode_preinit_audio(AVCodecContext *avctx)
{
const AVCodec *c = avctx->codec;
int i;
if (avctx->codec->sample_fmts) {
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
if (!av_get_sample_fmt_name(avctx->sample_fmt)) {
av_log(avctx, AV_LOG_ERROR, "Invalid audio sample format: %d\n",
avctx->sample_fmt);
return AVERROR(EINVAL);
}
if (avctx->sample_rate <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid audio sample rate: %d\n",
avctx->sample_rate);
return AVERROR(EINVAL);
}
if (c->sample_fmts) {
for (i = 0; c->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
if (avctx->sample_fmt == c->sample_fmts[i])
break;
if (avctx->ch_layout.nb_channels == 1 &&
av_get_planar_sample_fmt(avctx->sample_fmt) ==
av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
avctx->sample_fmt = avctx->codec->sample_fmts[i];
av_get_planar_sample_fmt(c->sample_fmts[i])) {
avctx->sample_fmt = c->sample_fmts[i];
break;
}
}
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
char buf[128];
snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
(char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
if (c->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR,
"Specified sample format %s is not supported by the %s encoder\n",
av_get_sample_fmt_name(avctx->sample_fmt), c->name);
av_log(avctx, AV_LOG_ERROR, "Supported sample formats:\n");
for (int p = 0; c->sample_fmts[p] != AV_SAMPLE_FMT_NONE; p++) {
av_log(avctx, AV_LOG_ERROR, " %s\n",
av_get_sample_fmt_name(c->sample_fmts[p]));
}
return AVERROR(EINVAL);
}
}
if (avctx->codec->supported_samplerates) {
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
if (c->supported_samplerates) {
for (i = 0; c->supported_samplerates[i] != 0; i++)
if (avctx->sample_rate == c->supported_samplerates[i])
break;
if (avctx->codec->supported_samplerates[i] == 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
avctx->sample_rate);
if (c->supported_samplerates[i] == 0) {
av_log(avctx, AV_LOG_ERROR,
"Specified sample rate %d is not supported by the %s encoder\n",
avctx->sample_rate, c->name);
av_log(avctx, AV_LOG_ERROR, "Supported sample rates:\n");
for (int p = 0; c->supported_samplerates[p]; p++)
av_log(avctx, AV_LOG_ERROR, " %d\n", c->supported_samplerates[p]);
return AVERROR(EINVAL);
}
}
if (avctx->sample_rate < 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
avctx->sample_rate);
return AVERROR(EINVAL);
}
if (avctx->codec->ch_layouts) {
for (i = 0; avctx->codec->ch_layouts[i].nb_channels; i++) {
if (!av_channel_layout_compare(&avctx->ch_layout, &avctx->codec->ch_layouts[i]))
if (c->ch_layouts) {
for (i = 0; c->ch_layouts[i].nb_channels; i++) {
if (!av_channel_layout_compare(&avctx->ch_layout, &c->ch_layouts[i]))
break;
}
if (!avctx->codec->ch_layouts[i].nb_channels) {
if (!c->ch_layouts[i].nb_channels) {
char buf[512];
int ret = av_channel_layout_describe(&avctx->ch_layout, buf, sizeof(buf));
if (ret > 0)
av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
av_log(avctx, AV_LOG_ERROR,
"Specified channel layout '%s' is not supported by the %s encoder\n",
ret > 0 ? buf : "?", c->name);
av_log(avctx, AV_LOG_ERROR, "Supported channel layouts:\n");
for (int p = 0; c->ch_layouts[p].nb_channels; p++) {
ret = av_channel_layout_describe(&c->ch_layouts[p], buf, sizeof(buf));
av_log(avctx, AV_LOG_ERROR, " %s\n", ret > 0 ? buf : "?");
}
return AVERROR(EINVAL);
}
}
@ -667,6 +743,7 @@ static int encode_preinit_audio(AVCodecContext *avctx)
int ff_encode_preinit(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
EncodeContext *ec = encode_ctx(avci);
int ret = 0;
if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
@ -697,7 +774,7 @@ int ff_encode_preinit(AVCodecContext *avctx)
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
if (avctx->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY)
avctx->internal->intra_only_flag = AV_PKT_FLAG_KEY;
ec->intra_only_flag = AV_PKT_FLAG_KEY;
if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_ENCODE) {
avci->in_frame = av_frame_alloc();
@ -772,3 +849,18 @@ int ff_encode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
av_frame_move_ref(frame, avci->recon_frame);
return 0;
}
void ff_encode_flush_buffers(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
if (avci->in_frame)
av_frame_unref(avci->in_frame);
if (avci->recon_frame)
av_frame_unref(avci->recon_frame);
}
AVCodecInternal *ff_encode_internal_alloc(void)
{
return av_mallocz(sizeof(EncodeContext));
}

Просмотреть файл

@ -26,11 +26,6 @@
#include "avcodec.h"
#include "packet.h"
/**
* avcodec_receive_frame() implementation for encoders.
*/
int ff_encode_receive_frame(AVCodecContext *avctx, AVFrame *frame);
/**
* Called by encoders to get the next frame for encoding.
*
@ -75,12 +70,6 @@ int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size);
int ff_encode_reordered_opaque(AVCodecContext *avctx,
AVPacket *pkt, const AVFrame *frame);
/*
* Perform encoder initialization and validation.
* Called when opening the encoder, before the FFCodec.init() call.
*/
int ff_encode_preinit(AVCodecContext *avctx);
int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
AVFrame *frame, int *got_packet);

Просмотреть файл

@ -18,7 +18,6 @@
#include "libavutil/attributes.h"
#include "avcodec.h"
#include "dct.h"
#include "faandct.h"
#include "fdctdsp.h"
#include "config.h"

Просмотреть файл

@ -21,17 +21,28 @@
#include <stdint.h>
#include "avcodec.h"
#include "libavutil/attributes_internal.h"
struct AVCodecContext;
typedef struct FDCTDSPContext {
void (*fdct)(int16_t *block /* align 16 */);
void (*fdct248)(int16_t *block /* align 16 */);
} FDCTDSPContext;
void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx);
void ff_fdctdsp_init_ppc(FDCTDSPContext *c, AVCodecContext *avctx,
FF_VISIBILITY_PUSH_HIDDEN
void ff_fdctdsp_init(FDCTDSPContext *c, struct AVCodecContext *avctx);
void ff_fdctdsp_init_ppc(FDCTDSPContext *c, struct AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_fdctdsp_init_x86(FDCTDSPContext *c, AVCodecContext *avctx,
void ff_fdctdsp_init_x86(FDCTDSPContext *c, struct AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_fdct_ifast(int16_t *data);
void ff_fdct_ifast248(int16_t *data);
void ff_jpeg_fdct_islow_8(int16_t *data);
void ff_jpeg_fdct_islow_10(int16_t *data);
void ff_fdct248_islow_8(int16_t *data);
void ff_fdct248_islow_10(int16_t *data);
FF_VISIBILITY_POP_HIDDEN
#endif /* AVCODEC_FDCTDSP_H */

Просмотреть файл

@ -513,7 +513,7 @@ static int decode_subframe_lpc_33bps(FLACContext *s, int64_t *decoded,
for (i = pred_order; i < s->blocksize; i++, decoded++) {
int64_t sum = 0;
for (j = 0; j < pred_order; j++)
sum += (int64_t)coeffs[j] * decoded[j];
sum += (int64_t)coeffs[j] * (uint64_t)decoded[j];
decoded[j] = residual[i] + (sum >> qlevel);
}

Просмотреть файл

@ -37,5 +37,6 @@ void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_x86(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_mips(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_loongarch(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_riscv(H264ChromaContext *c, int bit_depth);
#endif /* AVCODEC_H264CHROMA_H */

Просмотреть файл

@ -0,0 +1,179 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* Header providing the internals of AVHWAccel.
*/
#ifndef AVCODEC_HWACCEL_INTERNAL_H
#define AVCODEC_HWACCEL_INTERNAL_H
#include <stdint.h>
#include "avcodec.h"
#define HWACCEL_CAP_ASYNC_SAFE (1 << 0)
#define HWACCEL_CAP_THREAD_SAFE (1 << 1)
typedef struct FFHWAccel {
/**
* The public AVHWAccel. See avcodec.h for it.
*/
AVHWAccel p;
/**
* Allocate a custom buffer
*/
int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);
/**
* Called at the beginning of each frame or field picture.
*
* Meaningful frame information (codec specific) is guaranteed to
* be parsed at this point. This function is mandatory.
*
* Note that buf can be NULL along with buf_size set to 0.
* Otherwise, this means the whole frame is available at this point.
*
* @param avctx the codec context
* @param buf the frame data buffer base
* @param buf_size the size of the frame in bytes
* @return zero if successful, a negative value otherwise
*/
int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
/**
* Callback for parameter data (SPS/PPS/VPS etc).
*
* Useful for hardware decoders which keep persistent state about the
* video parameters, and need to receive any changes to update that state.
*
* @param avctx the codec context
* @param type the nal unit type
* @param buf the nal unit data buffer
* @param buf_size the size of the nal unit in bytes
* @return zero if successful, a negative value otherwise
*/
int (*decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size);
/**
* Callback for each slice.
*
* Meaningful slice information (codec specific) is guaranteed to
* be parsed at this point. This function is mandatory.
*
* @param avctx the codec context
* @param buf the slice data buffer base
* @param buf_size the size of the slice in bytes
* @return zero if successful, a negative value otherwise
*/
int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
/**
* Called at the end of each frame or field picture.
*
* The whole picture is parsed at this point and can now be sent
* to the hardware accelerator. This function is mandatory.
*
* @param avctx the codec context
* @return zero if successful, a negative value otherwise
*/
int (*end_frame)(AVCodecContext *avctx);
/**
* Size of per-frame hardware accelerator private data.
*
* Private data is allocated with av_mallocz() before
* AVCodecContext.get_buffer() and deallocated after
* AVCodecContext.release_buffer().
*/
int frame_priv_data_size;
/**
* Size of the private data to allocate in
* AVCodecInternal.hwaccel_priv_data.
*/
int priv_data_size;
/**
* Internal hwaccel capabilities.
*/
int caps_internal;
/**
* Initialize the hwaccel private data.
*
* This will be called from ff_get_format(), after hwaccel and
* hwaccel_context are set and the hwaccel private data in AVCodecInternal
* is allocated.
*/
int (*init)(AVCodecContext *avctx);
/**
* Uninitialize the hwaccel private data.
*
* This will be called from get_format() or avcodec_close(), after hwaccel
* and hwaccel_context are already uninitialized.
*/
int (*uninit)(AVCodecContext *avctx);
/**
* Fill the given hw_frames context with current codec parameters. Called
* from get_format. Refer to avcodec_get_hw_frames_parameters() for
* details.
*
* This CAN be called before AVHWAccel.init is called, and you must assume
* that avctx->hwaccel_priv_data is invalid.
*/
int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx);
/**
* Copy necessary context variables from a previous thread context to the current one.
* For thread-safe hwaccels only.
*/
int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
/**
* Callback to free the hwaccel-specific frame data.
*
* @param hwctx a pointer to an AVHWDeviceContext.
* @param data the per-frame hardware accelerator private data to be freed.
*/
void (*free_frame_priv)(void *hwctx, uint8_t *data);
/**
* Callback to flush the hwaccel state.
*/
void (*flush)(AVCodecContext *avctx);
} FFHWAccel;
static inline const FFHWAccel *ffhwaccel(const AVHWAccel *codec)
{
return (const FFHWAccel*)codec;
}
#define FF_HW_CALL(avctx, function, ...) \
(ffhwaccel((avctx)->hwaccel)->function((avctx), __VA_ARGS__))
#define FF_HW_SIMPLE_CALL(avctx, function) \
(ffhwaccel((avctx)->hwaccel)->function(avctx))
#define FF_HW_HAS_CB(avctx, function) \
((avctx)->hwaccel && ffhwaccel((avctx)->hwaccel)->function)
#endif /* AVCODEC_HWACCEL_INTERNAL */

Просмотреть файл

@ -19,67 +19,68 @@
#ifndef AVCODEC_HWACCELS_H
#define AVCODEC_HWACCELS_H
#include "avcodec.h"
extern const AVHWAccel ff_av1_d3d11va_hwaccel;
extern const AVHWAccel ff_av1_d3d11va2_hwaccel;
extern const AVHWAccel ff_av1_dxva2_hwaccel;
extern const AVHWAccel ff_av1_nvdec_hwaccel;
extern const AVHWAccel ff_av1_vaapi_hwaccel;
extern const AVHWAccel ff_av1_vdpau_hwaccel;
extern const AVHWAccel ff_h263_vaapi_hwaccel;
extern const AVHWAccel ff_h263_videotoolbox_hwaccel;
extern const AVHWAccel ff_h264_d3d11va_hwaccel;
extern const AVHWAccel ff_h264_d3d11va2_hwaccel;
extern const AVHWAccel ff_h264_dxva2_hwaccel;
extern const AVHWAccel ff_h264_nvdec_hwaccel;
extern const AVHWAccel ff_h264_vaapi_hwaccel;
extern const AVHWAccel ff_h264_vdpau_hwaccel;
extern const AVHWAccel ff_h264_videotoolbox_hwaccel;
extern const AVHWAccel ff_hevc_d3d11va_hwaccel;
extern const AVHWAccel ff_hevc_d3d11va2_hwaccel;
extern const AVHWAccel ff_hevc_dxva2_hwaccel;
extern const AVHWAccel ff_hevc_nvdec_hwaccel;
extern const AVHWAccel ff_hevc_vaapi_hwaccel;
extern const AVHWAccel ff_hevc_vdpau_hwaccel;
extern const AVHWAccel ff_hevc_videotoolbox_hwaccel;
extern const AVHWAccel ff_mjpeg_nvdec_hwaccel;
extern const AVHWAccel ff_mjpeg_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg1_nvdec_hwaccel;
extern const AVHWAccel ff_mpeg1_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg1_videotoolbox_hwaccel;
extern const AVHWAccel ff_mpeg2_d3d11va_hwaccel;
extern const AVHWAccel ff_mpeg2_d3d11va2_hwaccel;
extern const AVHWAccel ff_mpeg2_nvdec_hwaccel;
extern const AVHWAccel ff_mpeg2_dxva2_hwaccel;
extern const AVHWAccel ff_mpeg2_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg2_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg2_videotoolbox_hwaccel;
extern const AVHWAccel ff_mpeg4_nvdec_hwaccel;
extern const AVHWAccel ff_mpeg4_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg4_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg4_videotoolbox_hwaccel;
extern const AVHWAccel ff_prores_videotoolbox_hwaccel;
extern const AVHWAccel ff_vc1_d3d11va_hwaccel;
extern const AVHWAccel ff_vc1_d3d11va2_hwaccel;
extern const AVHWAccel ff_vc1_dxva2_hwaccel;
extern const AVHWAccel ff_vc1_nvdec_hwaccel;
extern const AVHWAccel ff_vc1_vaapi_hwaccel;
extern const AVHWAccel ff_vc1_vdpau_hwaccel;
extern const AVHWAccel ff_vp8_nvdec_hwaccel;
extern const AVHWAccel ff_vp8_vaapi_hwaccel;
extern const AVHWAccel ff_vp9_d3d11va_hwaccel;
extern const AVHWAccel ff_vp9_d3d11va2_hwaccel;
extern const AVHWAccel ff_vp9_dxva2_hwaccel;
extern const AVHWAccel ff_vp9_nvdec_hwaccel;
extern const AVHWAccel ff_vp9_vaapi_hwaccel;
extern const AVHWAccel ff_vp9_vdpau_hwaccel;
extern const AVHWAccel ff_vp9_videotoolbox_hwaccel;
extern const AVHWAccel ff_wmv3_d3d11va_hwaccel;
extern const AVHWAccel ff_wmv3_d3d11va2_hwaccel;
extern const AVHWAccel ff_wmv3_dxva2_hwaccel;
extern const AVHWAccel ff_wmv3_nvdec_hwaccel;
extern const AVHWAccel ff_wmv3_vaapi_hwaccel;
extern const AVHWAccel ff_wmv3_vdpau_hwaccel;
extern const struct FFHWAccel ff_av1_d3d11va_hwaccel;
extern const struct FFHWAccel ff_av1_d3d11va2_hwaccel;
extern const struct FFHWAccel ff_av1_dxva2_hwaccel;
extern const struct FFHWAccel ff_av1_nvdec_hwaccel;
extern const struct FFHWAccel ff_av1_vaapi_hwaccel;
extern const struct FFHWAccel ff_av1_vdpau_hwaccel;
extern const struct FFHWAccel ff_av1_vulkan_hwaccel;
extern const struct FFHWAccel ff_h263_vaapi_hwaccel;
extern const struct FFHWAccel ff_h263_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_h264_d3d11va_hwaccel;
extern const struct FFHWAccel ff_h264_d3d11va2_hwaccel;
extern const struct FFHWAccel ff_h264_dxva2_hwaccel;
extern const struct FFHWAccel ff_h264_nvdec_hwaccel;
extern const struct FFHWAccel ff_h264_vaapi_hwaccel;
extern const struct FFHWAccel ff_h264_vdpau_hwaccel;
extern const struct FFHWAccel ff_h264_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_h264_vulkan_hwaccel;
extern const struct FFHWAccel ff_hevc_d3d11va_hwaccel;
extern const struct FFHWAccel ff_hevc_d3d11va2_hwaccel;
extern const struct FFHWAccel ff_hevc_dxva2_hwaccel;
extern const struct FFHWAccel ff_hevc_nvdec_hwaccel;
extern const struct FFHWAccel ff_hevc_vaapi_hwaccel;
extern const struct FFHWAccel ff_hevc_vdpau_hwaccel;
extern const struct FFHWAccel ff_hevc_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_hevc_vulkan_hwaccel;
extern const struct FFHWAccel ff_mjpeg_nvdec_hwaccel;
extern const struct FFHWAccel ff_mjpeg_vaapi_hwaccel;
extern const struct FFHWAccel ff_mpeg1_nvdec_hwaccel;
extern const struct FFHWAccel ff_mpeg1_vdpau_hwaccel;
extern const struct FFHWAccel ff_mpeg1_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_mpeg2_d3d11va_hwaccel;
extern const struct FFHWAccel ff_mpeg2_d3d11va2_hwaccel;
extern const struct FFHWAccel ff_mpeg2_dxva2_hwaccel;
extern const struct FFHWAccel ff_mpeg2_nvdec_hwaccel;
extern const struct FFHWAccel ff_mpeg2_vaapi_hwaccel;
extern const struct FFHWAccel ff_mpeg2_vdpau_hwaccel;
extern const struct FFHWAccel ff_mpeg2_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_mpeg4_nvdec_hwaccel;
extern const struct FFHWAccel ff_mpeg4_vaapi_hwaccel;
extern const struct FFHWAccel ff_mpeg4_vdpau_hwaccel;
extern const struct FFHWAccel ff_mpeg4_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_prores_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_vc1_d3d11va_hwaccel;
extern const struct FFHWAccel ff_vc1_d3d11va2_hwaccel;
extern const struct FFHWAccel ff_vc1_dxva2_hwaccel;
extern const struct FFHWAccel ff_vc1_nvdec_hwaccel;
extern const struct FFHWAccel ff_vc1_vaapi_hwaccel;
extern const struct FFHWAccel ff_vc1_vdpau_hwaccel;
extern const struct FFHWAccel ff_vp8_nvdec_hwaccel;
extern const struct FFHWAccel ff_vp8_vaapi_hwaccel;
extern const struct FFHWAccel ff_vp9_d3d11va_hwaccel;
extern const struct FFHWAccel ff_vp9_d3d11va2_hwaccel;
extern const struct FFHWAccel ff_vp9_dxva2_hwaccel;
extern const struct FFHWAccel ff_vp9_nvdec_hwaccel;
extern const struct FFHWAccel ff_vp9_vaapi_hwaccel;
extern const struct FFHWAccel ff_vp9_vdpau_hwaccel;
extern const struct FFHWAccel ff_vp9_videotoolbox_hwaccel;
extern const struct FFHWAccel ff_wmv3_d3d11va_hwaccel;
extern const struct FFHWAccel ff_wmv3_d3d11va2_hwaccel;
extern const struct FFHWAccel ff_wmv3_dxva2_hwaccel;
extern const struct FFHWAccel ff_wmv3_nvdec_hwaccel;
extern const struct FFHWAccel ff_wmv3_vaapi_hwaccel;
extern const struct FFHWAccel ff_wmv3_vdpau_hwaccel;
#endif /* AVCODEC_HWACCELS_H */

Просмотреть файл

@ -22,10 +22,6 @@
#include "avcodec.h"
#include "hwaccels.h"
#define HWACCEL_CAP_ASYNC_SAFE (1 << 0)
typedef struct AVCodecHWConfigInternal {
/**
* This is the structure which will be returned to the user by
@ -36,9 +32,10 @@ typedef struct AVCodecHWConfigInternal {
* If this configuration uses a hwaccel, a pointer to it.
* If not, NULL.
*/
const AVHWAccel *hwaccel;
const struct FFHWAccel *hwaccel;
} AVCodecHWConfigInternal;
void ff_hwaccel_uninit(AVCodecContext *avctx);
// These macros are used to simplify AVCodecHWConfigInternal definitions.
@ -76,6 +73,8 @@ typedef struct AVCodecHWConfigInternal {
HW_CONFIG_HWACCEL(1, 1, 1, VDPAU, VDPAU, ff_ ## codec ## _vdpau_hwaccel)
#define HWACCEL_VIDEOTOOLBOX(codec) \
HW_CONFIG_HWACCEL(1, 1, 1, VIDEOTOOLBOX, VIDEOTOOLBOX, ff_ ## codec ## _videotoolbox_hwaccel)
#define HWACCEL_VULKAN(codec) \
HW_CONFIG_HWACCEL(1, 1, 1, VULKAN, VULKAN, ff_ ## codec ## _vulkan_hwaccel)
#define HWACCEL_D3D11VA(codec) \
HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel)

Просмотреть файл

@ -56,12 +56,6 @@ typedef struct AVCodecInternal {
*/
int is_copy;
/**
* An audio frame with less than required samples has been submitted (and
* potentially padded with silence). Reject all subsequent frames.
*/
int last_audio_frame;
/**
* Audio encoders can set this flag during init to indicate that they
* want the small last frame to be padded to a multiple of pad_samples.
@ -95,13 +89,6 @@ typedef struct AVCodecInternal {
uint8_t *byte_buffer;
unsigned int byte_buffer_size;
/**
* This is set to AV_PKT_FLAG_KEY for encoders that encode intra-only
* formats (i.e. whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set).
* This is used to set said flag generically for said encoders.
*/
int intra_only_flag;
void *frame_thread_encoder;
/**
@ -148,17 +135,14 @@ typedef struct AVCodecInternal {
AVFrame *buffer_frame;
int draining_done;
int showed_multi_packet_warning;
/* to prevent infinite loop on errors when draining */
int nb_draining_errors;
#if FF_API_DROPCHANGED
/* used when avctx flag AV_CODEC_FLAG_DROPCHANGED is set */
int changed_frames_dropped;
int initial_format;
int initial_width, initial_height;
int initial_sample_rate;
AVChannelLayout initial_ch_layout;
#endif
#if CONFIG_LCMS2
FFIccContext icc; /* used to read and write embedded ICC profiles */
@ -173,15 +157,6 @@ int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b);
unsigned int ff_toupper4(unsigned int x);
void ff_color_frame(AVFrame *frame, const int color[4]);
/**
* Maximum size in bytes of extradata.
* This value was chosen such that every bit of the buffer is
* addressable by a 32-bit signed integer as used by get_bits.
*/
#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - AV_INPUT_BUFFER_PADDING_SIZE)
/**
* 2^(x) for integer x
* @return correctly rounded float
@ -232,16 +207,4 @@ int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_l
*/
int64_t ff_guess_coded_bitrate(AVCodecContext *avctx);
/**
* Check if a value is in the list. If not, return the default value
*
* @param ctx Context for the log msg
* @param val_name Name of the checked value, for log msg
* @param array_valid_values Array of valid int, ended with INT_MAX
* @param default_value Value return if checked value is not in the array
* @return Value or default_value.
*/
int ff_int_from_list_or_default(void *ctx, const char * val_name, int val,
const int * array_valid_values, int default_value);
#endif /* AVCODEC_INTERNAL_H */

Просмотреть файл

@ -68,7 +68,7 @@
#include <stdint.h>
#include "libavutil/attributes.h"
#include "dct.h"
#include "fdctdsp.h"
#define DCTSIZE 8
#define GLOBAL(x) x

Просмотреть файл

@ -60,7 +60,7 @@
*/
#include "libavutil/common.h"
#include "dct.h"
#include "fdctdsp.h"
#include "bit_depth_template.c"

Просмотреть файл

@ -30,6 +30,7 @@
#include "libavutil/opt.h"
#include "atsc_a53.h"
#include "av1_parse.h"
#include "avcodec.h"
#include "bytestream.h"
#include "codec_internal.h"
@ -154,12 +155,9 @@ static void libdav1d_init_params(AVCodecContext *c, const Dav1dSequenceHeader *s
else
c->pix_fmt = pix_fmt[seq->layout][seq->hbd];
if (seq->num_units_in_tick && seq->time_scale) {
av_reduce(&c->framerate.den, &c->framerate.num,
seq->num_units_in_tick, seq->time_scale, INT_MAX);
if (seq->equal_picture_interval)
c->ticks_per_frame = seq->num_ticks_per_picture;
}
c->framerate = ff_av1_framerate(seq->num_ticks_per_picture,
(unsigned)seq->num_units_in_tick,
(unsigned)seq->time_scale);
if (seq->film_grain_present)
c->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
@ -278,6 +276,15 @@ static av_cold int libdav1d_init(AVCodecContext *c)
if (res < 0)
return AVERROR(ENOMEM);
#if FF_DAV1D_VERSION_AT_LEAST(6,7)
res = dav1d_get_frame_delay(&s);
if (res < 0) // Should not happen
return AVERROR_EXTERNAL;
// When dav1d_get_frame_delay() returns 1, there's no delay whatsoever
c->delay = res > 1 ? res : 0;
#endif
return 0;
}
@ -309,20 +316,14 @@ static void libdav1d_user_data_free(const uint8_t *data, void *opaque) {
av_packet_free(&pkt);
}
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
static int libdav1d_receive_frame_internal(AVCodecContext *c, Dav1dPicture *p)
{
Libdav1dContext *dav1d = c->priv_data;
Dav1dData *data = &dav1d->data;
Dav1dPicture pic = { 0 }, *p = &pic;
AVPacket *pkt;
OpaqueData *od = NULL;
#if FF_DAV1D_VERSION_AT_LEAST(5,1)
enum Dav1dEventFlags event_flags = 0;
#endif
int res;
if (!data->sz) {
pkt = av_packet_alloc();
AVPacket *pkt = av_packet_alloc();
if (!pkt)
return AVERROR(ENOMEM);
@ -334,6 +335,8 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
}
if (pkt->size) {
OpaqueData *od = NULL;
res = dav1d_data_wrap(data, pkt->data, pkt->size,
libdav1d_data_free, pkt->buf);
if (res < 0) {
@ -393,12 +396,31 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (res < 0) {
if (res == AVERROR(EINVAL))
res = AVERROR_INVALIDDATA;
else if (res == AVERROR(EAGAIN) && c->internal->draining)
res = AVERROR_EOF;
return res;
else if (res == AVERROR(EAGAIN))
res = c->internal->draining ? AVERROR_EOF : 1;
}
return res;
}
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
{
Libdav1dContext *dav1d = c->priv_data;
Dav1dPicture pic = { 0 }, *p = &pic;
AVPacket *pkt;
OpaqueData *od = NULL;
#if FF_DAV1D_VERSION_AT_LEAST(5,1)
enum Dav1dEventFlags event_flags = 0;
#endif
int res;
do {
res = libdav1d_receive_frame_internal(c, p);
} while (res > 0);
if (res < 0)
return res;
av_assert0(p->data[0] && p->allocator_data);
// This requires the custom allocator above
@ -463,7 +485,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
goto fail;
frame->pkt_dts = pkt->pts;
frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
if (p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY)
frame->flags |= AV_FRAME_FLAG_KEY;
else
frame->flags &= ~AV_FRAME_FLAG_KEY;
switch (p->frame_hdr->frame_type) {
case DAV1D_FRAME_TYPE_KEY:
@ -511,10 +536,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
light->MaxFALL = p->content_light->max_frame_average_light_level;
}
if (p->itut_t35) {
#if FF_DAV1D_VERSION_AT_LEAST(6,9)
for (size_t i = 0; i < p->n_itut_t35; i++) {
const Dav1dITUTT35 *itut_t35 = &p->itut_t35[i];
#else
const Dav1dITUTT35 *itut_t35 = p->itut_t35;
#endif
GetByteContext gb;
int provider_code;
bytestream2_init(&gb, p->itut_t35->payload, p->itut_t35->payload_size);
bytestream2_init(&gb, itut_t35->payload, itut_t35->payload_size);
provider_code = bytestream2_get_be16(&gb);
switch (provider_code) {
@ -546,7 +577,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
int provider_oriented_code = bytestream2_get_be16(&gb);
int application_identifier = bytestream2_get_byte(&gb);
if (p->itut_t35->country_code != 0xB5 ||
if (itut_t35->country_code != 0xB5 ||
provider_oriented_code != 1 || application_identifier != 4)
break;
@ -565,6 +596,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
default: // ignore unsupported provider codes
break;
}
#if FF_DAV1D_VERSION_AT_LEAST(6,9)
}
#endif
}
if (p->frame_hdr->film_grain.present && (!dav1d->apply_grain ||
(c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN))) {
@ -664,7 +698,7 @@ const FFCodec ff_libdav1d_decoder = {
.flush = libdav1d_flush,
FF_CODEC_RECEIVE_FRAME_CB(libdav1d_receive_frame),
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
.caps_internal = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_SETS_FRAME_PROPS |
.caps_internal = FF_CODEC_CAP_SETS_FRAME_PROPS |
FF_CODEC_CAP_AUTO_THREADS,
.p.priv_class = &libdav1d_class,
.p.wrapper_name = "libdav1d",

Просмотреть файл

@ -89,7 +89,7 @@ void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx);
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type);
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type);
void ff_dsputil_init_dwt(MECmpContext *c);

Просмотреть файл

@ -86,6 +86,7 @@ if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
SOURCES += [
'atsc_a53.c',
'av1_frame_split_bsf.c',
'av1_parse.c',
'av1dec.c',
'avpicture.c',
'cbs.c',
@ -100,6 +101,7 @@ if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
'videodsp.c',
'vp8.c',
'vp8_parser.c',
'vp8data.c',
'vp8dsp.c',
'vp9.c',
'vp9_parser.c',

Просмотреть файл

@ -50,7 +50,7 @@ static const AVOption avcodec_options[] = {
{"bt", "Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate tolerance specifies how far "
"ratecontrol is willing to deviate from the target average bitrate value. This is not related "
"to minimum/maximum bitrate. Lowering tolerance too much has an adverse effect on quality.",
OFFSET(bit_rate_tolerance), AV_OPT_TYPE_INT, {.i64 = AV_CODEC_DEFAULT_BITRATE*20 }, 1, INT_MAX, V|E},
OFFSET(bit_rate_tolerance), AV_OPT_TYPE_INT, {.i64 = AV_CODEC_DEFAULT_BITRATE*20 }, 0, INT_MAX, A|V|E},
{"flags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, UINT_MAX, V|A|S|E|D, "flags"},
{"unaligned", "allow decoders to produce unaligned output", 0, AV_OPT_TYPE_CONST, { .i64 = AV_CODEC_FLAG_UNALIGNED }, INT_MIN, INT_MAX, V | D, "flags" },
{"mv4", "use four motion vectors per macroblock (MPEG-4)", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"},
@ -72,7 +72,9 @@ static const AVOption avcodec_options[] = {
{"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
{"cgop", "closed GOP", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
{"output_corrupt", "Output even potentially corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_OUTPUT_CORRUPT }, INT_MIN, INT_MAX, V|D, "flags"},
{"drop_changed", "Drop frames whose parameters differ from first decoded frame", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_DROPCHANGED }, INT_MIN, INT_MAX, A|V|D, "flags"},
#if FF_API_DROPCHANGED
{"drop_changed", "Drop frames whose parameters differ from first decoded frame", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_DROPCHANGED }, INT_MIN, INT_MAX, A|V|D | AV_OPT_FLAG_DEPRECATED, "flags"},
#endif
{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT}, 0, UINT_MAX, V|A|E|D|S, "flags2"},
{"fast", "allow non-spec-compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
@ -259,8 +261,8 @@ static const AVOption avcodec_options[] = {
{"default" , "discard useless frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"noref" , "discard all non-reference frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONREF }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"bidir" , "discard all bidirectional frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_BIDIR }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"nokey" , "discard all frames except keyframes", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONKEY }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"nointra" , "discard all frames except I frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONINTRA}, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"nokey" , "discard all frames except keyframes", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONKEY }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"all" , "discard all frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_ALL }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"bidir_refine", "refine the two motion vectors used in bidirectional macroblocks", OFFSET(bidir_refine), AV_OPT_TYPE_INT, {.i64 = 1 }, 0, 4, V|E},
{"keyint_min", "minimum interval between IDR-frames", OFFSET(keyint_min), AV_OPT_TYPE_INT, {.i64 = 25 }, INT_MIN, INT_MAX, V|E},
@ -276,7 +278,9 @@ static const AVOption avcodec_options[] = {
#endif
{"rc_max_vbv_use", NULL, OFFSET(rc_max_available_vbv_use), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, 0.0, FLT_MAX, V|E},
{"rc_min_vbv_use", NULL, OFFSET(rc_min_vbv_overflow_use), AV_OPT_TYPE_FLOAT, {.dbl = 3 }, 0.0, FLT_MAX, V|E},
#if FF_API_TICKS_PER_FRAME
{"ticks_per_frame", NULL, OFFSET(ticks_per_frame), AV_OPT_TYPE_INT, {.i64 = 1 }, 1, INT_MAX, A|V|E|D},
#endif
{"color_primaries", "color primaries", OFFSET(color_primaries), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_UNSPECIFIED }, 1, INT_MAX, V|E|D, "color_primaries_type"},
{"bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},

Просмотреть файл

@ -23,6 +23,8 @@
#include "packet.h"
#define AVPACKET_IS_EMPTY(pkt) (!(pkt)->data && !(pkt)->side_data_elems)
typedef struct PacketListEntry {
struct PacketListEntry *next;
AVPacket pkt;

Просмотреть файл

@ -166,6 +166,10 @@ int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx,
#define FILL(name) if(s->name > 0 && avctx->name <= 0) avctx->name = s->name
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
FILL(field_order);
FILL(coded_width);
FILL(coded_height);
FILL(width);
FILL(height);
}
/* update the file pointer */

Просмотреть файл

@ -41,6 +41,7 @@ extern const AVCodecParser ff_dvaudio_parser;
extern const AVCodecParser ff_dvbsub_parser;
extern const AVCodecParser ff_dvdsub_parser;
extern const AVCodecParser ff_dvd_nav_parser;
extern const AVCodecParser ff_evc_parser;
extern const AVCodecParser ff_flac_parser;
extern const AVCodecParser ff_ftr_parser;
extern const AVCodecParser ff_g723_1_parser;
@ -74,6 +75,7 @@ extern const AVCodecParser ff_vorbis_parser;
extern const AVCodecParser ff_vp3_parser;
extern const AVCodecParser ff_vp8_parser;
extern const AVCodecParser ff_vp9_parser;
extern const AVCodecParser ff_vvc_parser;
extern const AVCodecParser ff_webp_parser;
extern const AVCodecParser ff_xbm_parser;
extern const AVCodecParser ff_xma_parser;

Просмотреть файл

@ -578,7 +578,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(PCMDecode), \
.init = pcm_decode_init, \
FF_CODEC_DECODE_CB(pcm_decode_frame), \
.p.capabilities = AV_CODEC_CAP_DR1, \
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_PARAM_CHANGE, \
.p.sample_fmts = (const enum AVSampleFormat[]){ sample_fmt_, \
AV_SAMPLE_FMT_NONE }, \
}

Просмотреть файл

@ -194,4 +194,10 @@ const AVProfile ff_arib_caption_profiles[] = {
{ FF_PROFILE_UNKNOWN }
};
const AVProfile ff_evc_profiles[] = {
{ FF_PROFILE_EVC_BASELINE, "Baseline" },
{ FF_PROFILE_EVC_MAIN, "Main" },
{ FF_PROFILE_UNKNOWN },
};
#endif /* !CONFIG_SMALL */

Просмотреть файл

@ -74,5 +74,6 @@ extern const AVProfile ff_sbc_profiles[];
extern const AVProfile ff_prores_profiles[];
extern const AVProfile ff_mjpeg_profiles[];
extern const AVProfile ff_arib_caption_profiles[];
extern const AVProfile ff_evc_profiles[];
#endif /* AVCODEC_PROFILES_H */

Просмотреть файл

@ -28,8 +28,10 @@
#include <stdint.h>
#include "avcodec.h"
#include "avcodec_internal.h"
#include "codec_internal.h"
#include "decode.h"
#include "hwaccel_internal.h"
#include "hwconfig.h"
#include "internal.h"
#include "pthread_internal.h"
@ -104,6 +106,12 @@ typedef struct PerThreadContext {
int hwaccel_serializing;
int async_serializing;
// set to 1 in ff_thread_finish_setup() when a threadsafe hwaccel is used;
// cannot check hwaccel caps directly, because
// worked threads clear hwaccel state for thread-unsafe hwaccels
// after each decode call
int hwaccel_threadsafe;
atomic_int debug_threads; ///< Set if the FF_DEBUG_THREADS option is set.
} PerThreadContext;
@ -117,8 +125,8 @@ typedef struct FrameThreadContext {
unsigned pthread_init_cnt; ///< Number of successfully initialized mutexes/conditions
pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
/**
* This lock is used for ensuring threads run in serial when hwaccel
* is used.
* This lock is used for ensuring threads run in serial when thread-unsafe
* hwaccel is used.
*/
pthread_mutex_t hwaccel_mutex;
pthread_mutex_t async_mutex;
@ -133,13 +141,19 @@ typedef struct FrameThreadContext {
* While it is set, ff_thread_en/decode_frame won't return any results.
*/
/* hwaccel state is temporarily stored here in order to transfer its ownership
* to the next decoding thread without the need for extra synchronization */
/* hwaccel state for thread-unsafe hwaccels is temporarily stored here in
* order to transfer its ownership to the next decoding thread without the
* need for extra synchronization */
const AVHWAccel *stash_hwaccel;
void *stash_hwaccel_context;
void *stash_hwaccel_priv;
} FrameThreadContext;
static int hwaccel_serial(const AVCodecContext *avctx)
{
return avctx->hwaccel && !(ffhwaccel(avctx->hwaccel)->caps_internal & HWACCEL_CAP_THREAD_SAFE);
}
static void async_lock(FrameThreadContext *fctx)
{
pthread_mutex_lock(&fctx->async_mutex);
@ -202,9 +216,9 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
* cannot be true here. */
av_assert0(!p->hwaccel_serializing);
/* if the previous thread uses hwaccel then we take the lock to ensure
* the threads don't run concurrently */
if (avctx->hwaccel) {
/* if the previous thread uses thread-unsafe hwaccel then we take the
* lock to ensure the threads don't run concurrently */
if (hwaccel_serial(avctx)) {
pthread_mutex_lock(&p->parent->hwaccel_mutex);
p->hwaccel_serializing = 1;
}
@ -220,7 +234,8 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
ff_thread_finish_setup(avctx);
if (p->hwaccel_serializing) {
/* wipe hwaccel state to avoid stale pointers lying around;
/* wipe hwaccel state for thread-unsafe hwaccels to avoid stale
* pointers lying around;
* the state was transferred to FrameThreadContext in
* ff_thread_finish_setup(), so nothing is leaked */
avctx->hwaccel = NULL;
@ -230,7 +245,8 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
p->hwaccel_serializing = 0;
pthread_mutex_unlock(&p->parent->hwaccel_mutex);
}
av_assert0(!avctx->hwaccel);
av_assert0(!avctx->hwaccel ||
(ffhwaccel(avctx->hwaccel)->caps_internal & HWACCEL_CAP_THREAD_SAFE));
if (p->async_serializing) {
p->async_serializing = 0;
@ -286,7 +302,11 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
dst->level = src->level;
dst->bits_per_raw_sample = src->bits_per_raw_sample;
#if FF_API_TICKS_PER_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
dst->ticks_per_frame = src->ticks_per_frame;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
dst->color_primaries = src->color_primaries;
dst->color_trc = src->color_trc;
@ -328,8 +348,50 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (codec->update_thread_context_for_user)
err = codec->update_thread_context_for_user(dst, src);
} else {
if (codec->update_thread_context)
const PerThreadContext *p_src = src->internal->thread_ctx;
PerThreadContext *p_dst = dst->internal->thread_ctx;
if (codec->update_thread_context) {
err = codec->update_thread_context(dst, src);
if (err < 0)
return err;
}
// reset dst hwaccel state if needed
av_assert0(p_dst->hwaccel_threadsafe ||
(!dst->hwaccel && !dst->internal->hwaccel_priv_data));
if (p_dst->hwaccel_threadsafe &&
(!p_src->hwaccel_threadsafe || dst->hwaccel != src->hwaccel)) {
ff_hwaccel_uninit(dst);
p_dst->hwaccel_threadsafe = 0;
}
// propagate hwaccel state for threadsafe hwaccels
if (p_src->hwaccel_threadsafe) {
const FFHWAccel *hwaccel = ffhwaccel(src->hwaccel);
if (!dst->hwaccel) {
if (hwaccel->priv_data_size) {
av_assert0(hwaccel->update_thread_context);
dst->internal->hwaccel_priv_data =
av_mallocz(hwaccel->priv_data_size);
if (!dst->internal->hwaccel_priv_data)
return AVERROR(ENOMEM);
}
dst->hwaccel = src->hwaccel;
}
av_assert0(dst->hwaccel == src->hwaccel);
if (hwaccel->update_thread_context) {
err = hwaccel->update_thread_context(dst, src);
if (err < 0) {
av_log(dst, AV_LOG_ERROR, "Error propagating hwaccel state\n");
ff_hwaccel_uninit(dst);
return err;
}
}
p_dst->hwaccel_threadsafe = 1;
}
}
return err;
@ -437,10 +499,12 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
}
/* transfer the stashed hwaccel state, if any */
av_assert0(!p->avctx->hwaccel);
FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
av_assert0(!p->avctx->hwaccel || p->hwaccel_threadsafe);
if (!p->hwaccel_threadsafe) {
FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
}
av_packet_unref(p->avpkt);
ret = av_packet_ref(p->avpkt, avpkt);
@ -594,26 +658,32 @@ void ff_thread_finish_setup(AVCodecContext *avctx) {
if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
if (avctx->hwaccel && !p->hwaccel_serializing) {
p->hwaccel_threadsafe = avctx->hwaccel &&
(ffhwaccel(avctx->hwaccel)->caps_internal & HWACCEL_CAP_THREAD_SAFE);
if (hwaccel_serial(avctx) && !p->hwaccel_serializing) {
pthread_mutex_lock(&p->parent->hwaccel_mutex);
p->hwaccel_serializing = 1;
}
/* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
if (avctx->hwaccel &&
!(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
!(ffhwaccel(avctx->hwaccel)->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
p->async_serializing = 1;
async_lock(p->parent);
}
/* save hwaccel state for passing to the next thread;
/* thread-unsafe hwaccels share a single private data instance, so we
* save hwaccel state for passing to the next thread;
* this is done here so that this worker thread can wipe its own hwaccel
* state after decoding, without requiring synchronization */
av_assert0(!p->parent->stash_hwaccel);
p->parent->stash_hwaccel = avctx->hwaccel;
p->parent->stash_hwaccel_context = avctx->hwaccel_context;
p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
if (hwaccel_serial(avctx)) {
p->parent->stash_hwaccel = avctx->hwaccel;
p->parent->stash_hwaccel_context = avctx->hwaccel_context;
p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
}
pthread_mutex_lock(&p->progress_mutex);
if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
@ -684,6 +754,10 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
if (codec->close && p->thread_init != UNINITIALIZED)
codec->close(ctx);
/* When using a threadsafe hwaccel, this is where
* each thread's context is uninit'd and freed. */
ff_hwaccel_uninit(ctx);
if (ctx->priv_data) {
if (codec->p.priv_class)
av_opt_free(ctx->priv_data);
@ -744,7 +818,7 @@ static av_cold int init_thread(PerThreadContext *p, int *threads_to_free,
p->parent = fctx;
p->avctx = copy;
copy->internal = av_mallocz(sizeof(*copy->internal));
copy->internal = ff_decode_internal_alloc();
if (!copy->internal)
return AVERROR(ENOMEM);
copy->internal->thread_ctx = p;

Просмотреть файл

@ -84,6 +84,9 @@ void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
int ff_thread_replace_frame(AVCodecContext *avctx, ThreadFrame *dst,
const ThreadFrame *src);
int ff_thread_can_start_frame(AVCodecContext *avctx);
#endif

Просмотреть файл

@ -317,7 +317,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
}
if (s->codec_id == AV_CODEC_ID_IFF_ILBM) {
w_align = FFMAX(w_align, 8);
w_align = FFMAX(w_align, 16);
}
*width = FFALIGN(*width, w_align);
@ -406,34 +406,6 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
return ret;
}
void ff_color_frame(AVFrame *frame, const int c[4])
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int p, y;
av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
for (p = 0; p<desc->nb_components; p++) {
uint8_t *dst = frame->data[p];
int is_chroma = p == 1 || p == 2;
int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width;
int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height;
if (desc->comp[0].depth >= 9) {
((uint16_t*)dst)[0] = c[p];
av_memcpy_backptr(dst + 2, 2, bytes - 2);
dst += frame->linesize[p];
for (y = 1; y < height; y++) {
memcpy(dst, frame->data[p], 2*bytes);
dst += frame->linesize[p];
}
} else {
for (y = 0; y < height; y++) {
memset(dst, c[p], bytes);
dst += frame->linesize[p];
}
}
}
}
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec){
return !!(ffcodec(codec)->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM);
@ -641,9 +613,9 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
if (sr > 0) {
/* calc from sample rate */
if (id == AV_CODEC_ID_TTA)
return 256 * sr / 245;
return 256ll * sr / 245;
else if (id == AV_CODEC_ID_DST)
return 588 * sr / 44100;
return 588ll * sr / 44100;
else if (id == AV_CODEC_ID_BINKAUDIO_DCT) {
if (sr / 22050 > 22)
return 0;
@ -915,6 +887,27 @@ int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
return 0;
}
int ff_thread_replace_frame(AVCodecContext *avctx, ThreadFrame *dst,
const ThreadFrame *src)
{
int ret;
dst->owner[0] = src->owner[0];
dst->owner[1] = src->owner[1];
ret = av_frame_replace(dst->f, src->f);
if (ret < 0)
return ret;
ret = av_buffer_replace(&dst->progress, src->progress);
if (ret < 0) {
ff_thread_release_ext_buffer(dst->owner[0], dst);
return ret;
}
return 0;
}
#if !HAVE_THREADS
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
@ -1149,22 +1142,3 @@ int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
return bitrate;
}
int ff_int_from_list_or_default(void *ctx, const char * val_name, int val,
const int * array_valid_values, int default_value)
{
int i = 0, ref_val;
while (1) {
ref_val = array_valid_values[i];
if (ref_val == INT_MAX)
break;
if (val == ref_val)
return val;
i++;
}
/* val is not a valid value */
av_log(ctx, AV_LOG_DEBUG,
"%s %d are not supported. Set to default value : %d\n", val_name, val, default_value);
return default_value;
}

Просмотреть файл

@ -19,8 +19,7 @@
*/
#include "libavutil/frame.h"
#include "libavutil/pixdesc.h"
#include "hwconfig.h"
#include "hwaccel_internal.h"
#include "vaapi_decode.h"
#include "internal.h"
#include "av1dec.h"
@ -434,11 +433,11 @@ static int vaapi_av1_decode_slice(AVCodecContext *avctx,
return 0;
}
const AVHWAccel ff_av1_vaapi_hwaccel = {
.name = "av1_vaapi",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AV1,
.pix_fmt = AV_PIX_FMT_VAAPI,
const FFHWAccel ff_av1_vaapi_hwaccel = {
.p.name = "av1_vaapi",
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AV1,
.p.pix_fmt = AV_PIX_FMT_VAAPI,
.start_frame = vaapi_av1_start_frame,
.end_frame = vaapi_av1_end_frame,
.decode_slice = vaapi_av1_decode_slice,

Просмотреть файл

@ -398,6 +398,11 @@ static const struct {
MAP(MPEG4, MPEG4_ADVANCED_SIMPLE,
MPEG4AdvancedSimple),
MAP(MPEG4, MPEG4_MAIN, MPEG4Main ),
#if VA_CHECK_VERSION(1, 18, 0)
MAP(H264, H264_HIGH_10_INTRA,
H264High10 ),
MAP(H264, H264_HIGH_10, H264High10 ),
#endif
MAP(H264, H264_CONSTRAINED_BASELINE,
H264ConstrainedBaseline),
MAP(H264, H264_MAIN, H264Main ),

Просмотреть файл

@ -19,7 +19,7 @@
#include <va/va.h>
#include <va/va_dec_vp8.h>
#include "hwconfig.h"
#include "hwaccel_internal.h"
#include "vaapi_decode.h"
#include "vp8.h"
@ -220,11 +220,11 @@ fail:
return err;
}
const AVHWAccel ff_vp8_vaapi_hwaccel = {
.name = "vp8_vaapi",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VP8,
.pix_fmt = AV_PIX_FMT_VAAPI,
const FFHWAccel ff_vp8_vaapi_hwaccel = {
.p.name = "vp8_vaapi",
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_VP8,
.p.pix_fmt = AV_PIX_FMT_VAAPI,
.start_frame = &vaapi_vp8_start_frame,
.end_frame = &vaapi_vp8_end_frame,
.decode_slice = &vaapi_vp8_decode_slice,

Просмотреть файл

@ -22,7 +22,7 @@
#include "libavutil/pixdesc.h"
#include "hwconfig.h"
#include "hwaccel_internal.h"
#include "vaapi_decode.h"
#include "vp9shared.h"
@ -168,11 +168,11 @@ static int vaapi_vp9_decode_slice(AVCodecContext *avctx,
return 0;
}
const AVHWAccel ff_vp9_vaapi_hwaccel = {
.name = "vp9_vaapi",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VP9,
.pix_fmt = AV_PIX_FMT_VAAPI,
const FFHWAccel ff_vp9_vaapi_hwaccel = {
.p.name = "vp9_vaapi",
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_VP9,
.p.pix_fmt = AV_PIX_FMT_VAAPI,
.start_frame = vaapi_vp9_start_frame,
.end_frame = vaapi_vp9_end_frame,
.decode_slice = vaapi_vp9_decode_slice,

Просмотреть файл

@ -29,8 +29,8 @@
#include "version_major.h"
#define LIBAVCODEC_VERSION_MINOR 6
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_MINOR 23
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \

Просмотреть файл

@ -46,6 +46,9 @@
#define FF_API_VT_HWACCEL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_AVCTX_FRAME_NUMBER (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_SLICE_OFFSET (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_SUBFRAMES (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_TICKS_PER_FRAME (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_DROPCHANGED (LIBAVCODEC_VERSION_MAJOR < 61)
// reminder to remove CrystalHD decoders on next major bump
#define FF_CODEC_CRYSTAL_HD (LIBAVCODEC_VERSION_MAJOR < 61)

Просмотреть файл

@ -234,7 +234,8 @@ int av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf,
else if (buf[0] == 5)
*flags |= VORBIS_FLAG_SETUP;
else
goto bad_packet;
av_log(s, AV_LOG_VERBOSE, "Ignoring packet with unknown type %u\n",
buf[0]);
/* Special packets have no duration. */
return 0;

Просмотреть файл

@ -31,6 +31,7 @@
#include "avcodec.h"
#include "codec_internal.h"
#include "decode.h"
#include "hwaccel_internal.h"
#include "hwconfig.h"
#include "mathops.h"
#include "thread.h"
@ -104,23 +105,21 @@ static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
if ((ret = ff_thread_get_ext_buffer(s->avctx, &f->tf,
ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
return ret;
if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height)))
if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
ret = AVERROR(ENOMEM);
goto fail;
if (s->avctx->hwaccel) {
const AVHWAccel *hwaccel = s->avctx->hwaccel;
if (hwaccel->frame_priv_data_size) {
f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
if (!f->hwaccel_priv_buf)
goto fail;
f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
}
}
ret = ff_hwaccel_frame_priv_alloc(s->avctx, &f->hwaccel_picture_private,
&f->hwaccel_priv_buf);
if (ret < 0)
goto fail;
return 0;
fail:
av_buffer_unref(&f->seg_map);
ff_thread_release_ext_buffer(s->avctx, &f->tf);
return AVERROR(ENOMEM);
return ret;
}
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
@ -167,6 +166,9 @@ static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
if (free_mem)
free_buffers(s);
if (FF_HW_HAS_CB(avctx, flush))
FF_HW_SIMPLE_CALL(avctx, flush);
}
static void vp8_decode_flush(AVCodecContext *avctx)
@ -2732,7 +2734,10 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
goto err;
}
curframe->tf.f->key_frame = s->keyframe;
if (s->keyframe)
curframe->tf.f->flags |= AV_FRAME_FLAG_KEY;
else
curframe->tf.f->flags &= ~AV_FRAME_FLAG_KEY;
curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
: AV_PICTURE_TYPE_P;
if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
@ -2760,15 +2765,16 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
ff_thread_finish_setup(avctx);
if (avctx->hwaccel) {
ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
ret = hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
if (ret < 0)
goto err;
ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
ret = hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
if (ret < 0)
goto err;
ret = avctx->hwaccel->end_frame(avctx);
ret = hwaccel->end_frame(avctx);
if (ret < 0)
goto err;

Просмотреть файл

@ -0,0 +1,42 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "stdint.h"
// cat 1 and 2 are defined in vp8data.h
static const uint8_t vp8_dct_cat3_prob[] = {
173, 148, 140, 0
};
static const uint8_t vp8_dct_cat4_prob[] = {
176, 155, 140, 135, 0
};
static const uint8_t vp8_dct_cat5_prob[] = {
180, 157, 141, 134, 130, 0
};
static const uint8_t vp8_dct_cat6_prob[] = {
254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0
};
// only used for cat3 and above; cat 1 and 2 are referenced directly.
const uint8_t *const ff_vp8_dct_cat_prob[] = {
vp8_dct_cat3_prob,
vp8_dct_cat4_prob,
vp8_dct_cat5_prob,
vp8_dct_cat6_prob,
};

Просмотреть файл

@ -339,26 +339,8 @@ static const uint8_t vp8_dct_cat1_prob[] = {
static const uint8_t vp8_dct_cat2_prob[] = {
165, 145, 0
};
static const uint8_t vp8_dct_cat3_prob[] = {
173, 148, 140, 0
};
static const uint8_t vp8_dct_cat4_prob[] = {
176, 155, 140, 135, 0
};
static const uint8_t vp8_dct_cat5_prob[] = {
180, 157, 141, 134, 130, 0
};
static const uint8_t vp8_dct_cat6_prob[] = {
254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0
};
// only used for cat3 and above; cat 1 and 2 are referenced directly
const uint8_t *const ff_vp8_dct_cat_prob[] = {
vp8_dct_cat3_prob,
vp8_dct_cat4_prob,
vp8_dct_cat5_prob,
vp8_dct_cat6_prob,
};
extern const uint8_t *const ff_vp8_dct_cat_prob[];
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS - 1] = {
{

Просмотреть файл

@ -27,6 +27,7 @@
#include "codec_internal.h"
#include "decode.h"
#include "get_bits.h"
#include "hwaccel_internal.h"
#include "hwconfig.h"
#include "profiles.h"
#include "thread.h"
@ -119,12 +120,14 @@ static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
if (!s->frame_extradata_pool) {
s->frame_extradata_pool_size = 0;
ret = AVERROR(ENOMEM);
goto fail;
}
s->frame_extradata_pool_size = sz;
}
f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
if (!f->extradata) {
ret = AVERROR(ENOMEM);
goto fail;
}
memset(f->extradata->data, 0, f->extradata->size);
@ -132,22 +135,16 @@ static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
f->segmentation_map = f->extradata->data;
f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
if (avctx->hwaccel) {
const AVHWAccel *hwaccel = avctx->hwaccel;
av_assert0(!f->hwaccel_picture_private);
if (hwaccel->frame_priv_data_size) {
f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
if (!f->hwaccel_priv_buf)
goto fail;
f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
}
}
ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private,
&f->hwaccel_priv_buf);
if (ret < 0)
goto fail;
return 0;
fail:
vp9_frame_unref(avctx, f);
return AVERROR(ENOMEM);
return ret;
}
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
@ -239,6 +236,13 @@ static int update_size(AVCodecContext *avctx, int w, int h)
case AV_PIX_FMT_YUV444P12:
#if CONFIG_VP9_VAAPI_HWACCEL
*fmtp++ = AV_PIX_FMT_VAAPI;
#endif
break;
case AV_PIX_FMT_GBRP:
case AV_PIX_FMT_GBRP10:
case AV_PIX_FMT_GBRP12:
#if CONFIG_VP9_VAAPI_HWACCEL
*fmtp++ = AV_PIX_FMT_VAAPI;
#endif
break;
}
@ -1606,7 +1610,10 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
return ret;
f = s->s.frames[CUR_FRAME].tf.f;
f->key_frame = s->s.h.keyframe;
if (s->s.h.keyframe)
f->flags |= AV_FRAME_FLAG_KEY;
else
f->flags &= ~AV_FRAME_FLAG_KEY;
f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
@ -1629,13 +1636,14 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
}
if (avctx->hwaccel) {
ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
ret = hwaccel->start_frame(avctx, NULL, 0);
if (ret < 0)
return ret;
ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
if (ret < 0)
return ret;
ret = avctx->hwaccel->end_frame(avctx);
ret = hwaccel->end_frame(avctx);
if (ret < 0)
return ret;
goto finish;
@ -1791,6 +1799,9 @@ static void vp9_decode_flush(AVCodecContext *avctx)
vp9_frame_unref(avctx, &s->s.frames[i]);
for (i = 0; i < 8; i++)
ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
if (FF_HW_HAS_CB(avctx, flush))
FF_HW_SIMPLE_CALL(avctx, flush);
}
static av_cold int vp9_decode_init(AVCodecContext *avctx)

Просмотреть файл

@ -35,12 +35,20 @@
static av_always_inline av_const int MULL(int a, int b, unsigned shift)
{
int rt, dummy;
if (__builtin_constant_p(shift))
__asm__ (
"imull %3 \n\t"
"shrdl %4, %%edx, %%eax \n\t"
:"=a"(rt), "=d"(dummy)
:"a"(a), "rm"(b), "ci"((uint8_t)shift)
:"a"(a), "rm"(b), "i"(shift & 0x1F)
);
else
__asm__ (
"imull %3 \n\t"
"shrdl %4, %%edx, %%eax \n\t"
:"=a"(rt), "=d"(dummy)
:"a"(a), "rm"(b), "c"((uint8_t)shift)
);
return rt;
}
@ -113,19 +121,31 @@ __asm__ volatile(\
// avoid +32 for shift optimization (gcc should do that ...)
#define NEG_SSR32 NEG_SSR32
static inline int32_t NEG_SSR32( int32_t a, int8_t s){
if (__builtin_constant_p(s))
__asm__ ("sarl %1, %0\n\t"
: "+r" (a)
: "ic" ((uint8_t)(-s))
: "i" (-s & 0x1F)
);
else
__asm__ ("sarl %1, %0\n\t"
: "+r" (a)
: "c" ((uint8_t)(-s))
);
return a;
}
#define NEG_USR32 NEG_USR32
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
if (__builtin_constant_p(s))
__asm__ ("shrl %1, %0\n\t"
: "+r" (a)
: "ic" ((uint8_t)(-s))
: "i" (-s & 0x1F)
);
else
__asm__ ("shrl %1, %0\n\t"
: "+r" (a)
: "c" ((uint8_t)(-s))
);
return a;
}

Просмотреть файл

@ -36,6 +36,17 @@
# define __has_feature(x) 0
#endif
#if HAVE_AS_ARCH_DIRECTIVE
.arch AS_ARCH_LEVEL
#endif
#if HAVE_AS_ARCHEXT_DOTPROD_DIRECTIVE
.arch_extension dotprod
#endif
#if HAVE_AS_ARCHEXT_I8MM_DIRECTIVE
.arch_extension i8mm
#endif
/* Support macros for
* - Armv8.3-A Pointer Authentication and

Просмотреть файл

@ -20,11 +20,105 @@
#include "libavutil/cpu_internal.h"
#include "config.h"
#if (defined(__linux__) || defined(__ANDROID__)) && HAVE_GETAUXVAL && HAVE_ASM_HWCAP_H
#include <stdint.h>
#include <asm/hwcap.h>
#include <sys/auxv.h>
#define get_cpu_feature_reg(reg, val) \
__asm__("mrs %0, " #reg : "=r" (val))
static int detect_flags(void)
{
int flags = 0;
unsigned long hwcap;
hwcap = getauxval(AT_HWCAP);
#if defined(HWCAP_CPUID)
// We can check for DOTPROD and I8MM using HWCAP_ASIMDDP and
// HWCAP2_I8MM too, avoiding to read the CPUID registers (which triggers
// a trap, handled by the kernel). However the HWCAP_* defines for these
// extensions are added much later than HWCAP_CPUID, so the userland
// headers might lack support for them even if the binary later is run
// on hardware that does support it (and where the kernel might support
// HWCAP_CPUID).
// See https://www.kernel.org/doc/html/latest/arm64/cpu-feature-registers.html
if (hwcap & HWCAP_CPUID) {
uint64_t tmp;
get_cpu_feature_reg(ID_AA64ISAR0_EL1, tmp);
if (((tmp >> 44) & 0xf) == 0x1)
flags |= AV_CPU_FLAG_DOTPROD;
get_cpu_feature_reg(ID_AA64ISAR1_EL1, tmp);
if (((tmp >> 52) & 0xf) == 0x1)
flags |= AV_CPU_FLAG_I8MM;
}
#else
(void)hwcap;
#endif
return flags;
}
#elif defined(__APPLE__) && HAVE_SYSCTLBYNAME
#include <sys/sysctl.h>
static int detect_flags(void)
{
uint32_t value = 0;
size_t size;
int flags = 0;
size = sizeof(value);
if (!sysctlbyname("hw.optional.arm.FEAT_DotProd", &value, &size, NULL, 0)) {
if (value)
flags |= AV_CPU_FLAG_DOTPROD;
}
size = sizeof(value);
if (!sysctlbyname("hw.optional.arm.FEAT_I8MM", &value, &size, NULL, 0)) {
if (value)
flags |= AV_CPU_FLAG_I8MM;
}
return flags;
}
#elif defined(_WIN32)
#include <windows.h>
static int detect_flags(void)
{
int flags = 0;
#ifdef PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE
if (IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE))
flags |= AV_CPU_FLAG_DOTPROD;
#endif
return flags;
}
#else
static int detect_flags(void)
{
return 0;
}
#endif
int ff_get_cpu_flags_aarch64(void)
{
return AV_CPU_FLAG_ARMV8 * HAVE_ARMV8 |
AV_CPU_FLAG_NEON * HAVE_NEON |
AV_CPU_FLAG_VFP * HAVE_VFP;
int flags = AV_CPU_FLAG_ARMV8 * HAVE_ARMV8 |
AV_CPU_FLAG_NEON * HAVE_NEON;
#ifdef __ARM_FEATURE_DOTPROD
flags |= AV_CPU_FLAG_DOTPROD;
#endif
#ifdef __ARM_FEATURE_MATMUL_INT8
flags |= AV_CPU_FLAG_I8MM;
#endif
flags |= detect_flags();
return flags;
}
size_t ff_get_cpu_max_align_aarch64(void)

Просмотреть файл

@ -25,5 +25,7 @@
#define have_armv8(flags) CPUEXT(flags, ARMV8)
#define have_neon(flags) CPUEXT(flags, NEON)
#define have_vfp(flags) CPUEXT(flags, VFP)
#define have_dotprod(flags) CPUEXT(flags, DOTPROD)
#define have_i8mm(flags) CPUEXT(flags, I8MM)
#endif /* AVUTIL_AARCH64_CPU_H */

Просмотреть файл

@ -28,6 +28,9 @@
#define AVUTIL_AVASSERT_H
#include <stdlib.h>
#ifdef HAVE_AV_CONFIG_H
# include "config.h"
#endif
#include "log.h"
#include "macros.h"

Просмотреть файл

@ -124,6 +124,7 @@ av_frame_new_side_data
av_frame_new_side_data_from_buf
av_frame_ref
av_frame_remove_side_data
av_frame_replace
av_frame_side_data_name
av_frame_unref
av_free

Просмотреть файл

@ -71,7 +71,7 @@ void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
unsigned size_auto = (char *)buf + sizeof(*buf) -
buf->reserved_internal_buffer;
if (size_max == 1)
if (size_max == AV_BPRINT_SIZE_AUTOMATIC)
size_max = size_auto;
buf->str = buf->reserved_internal_buffer;
buf->len = 0;
@ -84,6 +84,11 @@ void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size)
{
if (size == 0) {
av_bprint_init(buf, 0, AV_BPRINT_SIZE_COUNT_ONLY);
return;
}
buf->str = buffer;
buf->len = 0;
buf->size = size;

Просмотреть файл

@ -144,6 +144,9 @@ void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);
* Init a print buffer using a pre-existing buffer.
*
* The buffer will not be reallocated.
* In case size equals zero, the AVBPrint will be initialized to use
* the internal buffer as if using AV_BPRINT_SIZE_COUNT_ONLY with
* av_bprint_init().
*
* @param buf buffer structure to init
* @param buffer byte buffer to use for the string data

Просмотреть файл

@ -108,7 +108,9 @@ int av_channel_name(char *buf, size_t buf_size, enum AVChannel channel_id)
av_bprint_init_for_buffer(&bp, buf, buf_size);
av_channel_name_bprint(&bp, channel_id);
return bp.len;
if (bp.len >= INT_MAX)
return AVERROR(ERANGE);
return bp.len + 1;
}
void av_channel_description_bprint(AVBPrint *bp, enum AVChannel channel_id)
@ -135,7 +137,9 @@ int av_channel_description(char *buf, size_t buf_size, enum AVChannel channel_id
av_bprint_init_for_buffer(&bp, buf, buf_size);
av_channel_description_bprint(&bp, channel_id);
return bp.len;
if (bp.len >= INT_MAX)
return AVERROR(ERANGE);
return bp.len + 1;
}
enum AVChannel av_channel_from_string(const char *str)
@ -789,7 +793,9 @@ int av_channel_layout_describe(const AVChannelLayout *channel_layout,
if (ret < 0)
return ret;
return bp.len;
if (bp.len >= INT_MAX)
return AVERROR(ERANGE);
return bp.len + 1;
}
enum AVChannel

Просмотреть файл

@ -174,6 +174,8 @@ int av_parse_cpu_caps(unsigned *flags, const char *s)
{ "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" },
{ "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
{ "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
{ "dotprod", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_DOTPROD }, .unit = "flags" },
{ "i8mm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_I8MM }, .unit = "flags" },
#elif ARCH_MIPS
{ "mmi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMI }, .unit = "flags" },
{ "msa", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MSA }, .unit = "flags" },
@ -188,6 +190,7 @@ int av_parse_cpu_caps(unsigned *flags, const char *s)
{ "rvv-f32", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F32 }, .unit = "flags" },
{ "rvv-i64", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_I64 }, .unit = "flags" },
{ "rvv", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F64 }, .unit = "flags" },
{ "rvb-addr",NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVB_ADDR }, .unit = "flags" },
{ "rvb-basic",NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVB_BASIC }, .unit = "flags" },
#endif
{ NULL },

Просмотреть файл

@ -69,6 +69,8 @@
#define AV_CPU_FLAG_NEON (1 << 5)
#define AV_CPU_FLAG_ARMV8 (1 << 6)
#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations
#define AV_CPU_FLAG_DOTPROD (1 << 8)
#define AV_CPU_FLAG_I8MM (1 << 9)
#define AV_CPU_FLAG_SETEND (1 <<16)
#define AV_CPU_FLAG_MMI (1 << 0)
@ -87,6 +89,7 @@
#define AV_CPU_FLAG_RVV_I64 (1 << 5) ///< Vectors of 64-bit int's */
#define AV_CPU_FLAG_RVV_F64 (1 << 6) ///< Vectors of double's
#define AV_CPU_FLAG_RVB_BASIC (1 << 7) ///< Basic bit-manipulations
#define AV_CPU_FLAG_RVB_ADDR (1 << 8) ///< Address bit-manipulations
/**
* Return the flags which specify extensions supported by the CPU.

Просмотреть файл

@ -1,5 +1,5 @@
/* Automatically generated by version.sh, do not manually edit! */
#ifndef AVUTIL_FFVERSION_H
#define AVUTIL_FFVERSION_H
#define FFMPEG_VERSION "N-109117-g6a3e174ad1"
#define FFMPEG_VERSION "N-107213-gfed07efcde"
#endif /* AVUTIL_FFVERSION_H */

Просмотреть файл

@ -55,7 +55,6 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame->time_base = (AVRational){ 0, 1 };
frame->key_frame = 1;
frame->sample_aspect_ratio = (AVRational){ 0, 1 };
frame->format = -1; /* unknown */
frame->extended_data = frame->data;
@ -78,9 +77,7 @@ static void free_side_data(AVFrameSideData **ptr_sd)
static void wipe_side_data(AVFrame *frame)
{
int i;
for (i = 0; i < frame->nb_side_data; i++) {
for (int i = 0; i < frame->nb_side_data; i++) {
free_side_data(&frame->side_data[i]);
}
frame->nb_side_data = 0;
@ -112,7 +109,7 @@ void av_frame_free(AVFrame **frame)
static int get_video_buffer(AVFrame *frame, int align)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int ret, i, padded_height, total_size;
int ret, padded_height, total_size;
int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
ptrdiff_t linesizes[4];
size_t sizes[4];
@ -127,7 +124,7 @@ static int get_video_buffer(AVFrame *frame, int align)
if (align <= 0)
align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
for(i=1; i<=align; i+=i) {
for (int i = 1; i <= align; i += i) {
ret = av_image_fill_linesizes(frame->linesize, frame->format,
FFALIGN(frame->width, i));
if (ret < 0)
@ -136,11 +133,11 @@ static int get_video_buffer(AVFrame *frame, int align)
break;
}
for (i = 0; i < 4 && frame->linesize[i]; i++)
for (int i = 0; i < 4 && frame->linesize[i]; i++)
frame->linesize[i] = FFALIGN(frame->linesize[i], align);
}
for (i = 0; i < 4; i++)
for (int i = 0; i < 4; i++)
linesizes[i] = frame->linesize[i];
padded_height = FFALIGN(frame->height, 32);
@ -149,7 +146,7 @@ static int get_video_buffer(AVFrame *frame, int align)
return ret;
total_size = 4*plane_padding;
for (i = 0; i < 4; i++) {
for (int i = 0; i < 4; i++) {
if (sizes[i] > INT_MAX - total_size)
return AVERROR(EINVAL);
total_size += sizes[i];
@ -165,7 +162,7 @@ static int get_video_buffer(AVFrame *frame, int align)
frame->buf[0]->data, frame->linesize)) < 0)
goto fail;
for (i = 1; i < 4; i++) {
for (int i = 1; i < 4; i++) {
if (frame->data[i])
frame->data[i] += i * plane_padding;
}
@ -182,7 +179,7 @@ static int get_audio_buffer(AVFrame *frame, int align)
{
int planar = av_sample_fmt_is_planar(frame->format);
int channels, planes;
int ret, i;
int ret;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
@ -223,7 +220,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
} else
frame->extended_data = frame->data;
for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
for (int i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
if (!frame->buf[i]) {
av_frame_unref(frame);
@ -231,7 +228,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
}
for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
for (int i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
if (!frame->extended_buf[i]) {
av_frame_unref(frame);
@ -265,9 +262,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
{
int ret, i;
int ret;
#if FF_API_FRAME_KEY
FF_DISABLE_DEPRECATION_WARNINGS
dst->key_frame = src->key_frame;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
dst->pict_type = src->pict_type;
dst->sample_aspect_ratio = src->sample_aspect_ratio;
dst->crop_top = src->crop_top;
@ -277,9 +278,17 @@ static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
dst->pts = src->pts;
dst->duration = src->duration;
dst->repeat_pict = src->repeat_pict;
#if FF_API_INTERLACED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
dst->interlaced_frame = src->interlaced_frame;
dst->top_field_first = src->top_field_first;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
#if FF_API_PALETTE_HAS_CHANGED
FF_DISABLE_DEPRECATION_WARNINGS
dst->palette_has_changed = src->palette_has_changed;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
dst->sample_rate = src->sample_rate;
dst->opaque = src->opaque;
dst->pkt_dts = src->pkt_dts;
@ -318,7 +327,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_dict_copy(&dst->metadata, src->metadata, 0);
for (i = 0; i < src->nb_side_data; i++) {
for (int i = 0; i < src->nb_side_data; i++) {
const AVFrameSideData *sd_src = src->side_data[i];
AVFrameSideData *sd_dst;
if ( sd_src->type == AV_FRAME_DATA_PANSCAN
@ -351,7 +360,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
int av_frame_ref(AVFrame *dst, const AVFrame *src)
{
int i, ret = 0;
int ret = 0;
av_assert1(dst->width == 0 && dst->height == 0);
#if FF_API_OLD_CHANNEL_LAYOUT
@ -406,7 +415,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
/* ref the buffers */
for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
if (!src->buf[i])
continue;
dst->buf[i] = av_buffer_ref(src->buf[i]);
@ -425,7 +434,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
dst->nb_extended_buf = src->nb_extended_buf;
for (i = 0; i < src->nb_extended_buf; i++) {
for (int i = 0; i < src->nb_extended_buf; i++) {
dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
if (!dst->extended_buf[i]) {
ret = AVERROR(ENOMEM);
@ -470,6 +479,133 @@ fail:
return ret;
}
int av_frame_replace(AVFrame *dst, const AVFrame *src)
{
int ret = 0;
if (dst == src)
return AVERROR(EINVAL);
if (!src->buf[0]) {
av_frame_unref(dst);
/* duplicate the frame data if it's not refcounted */
if ( src->data[0] || src->data[1]
|| src->data[2] || src->data[3])
return av_frame_ref(dst, src);
ret = frame_copy_props(dst, src, 0);
if (ret < 0)
goto fail;
}
dst->format = src->format;
dst->width = src->width;
dst->height = src->height;
dst->nb_samples = src->nb_samples;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
dst->channels = src->channels;
dst->channel_layout = src->channel_layout;
if (!av_channel_layout_check(&src->ch_layout)) {
av_channel_layout_uninit(&dst->ch_layout);
if (src->channel_layout)
av_channel_layout_from_mask(&dst->ch_layout, src->channel_layout);
else {
dst->ch_layout.nb_channels = src->channels;
dst->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
}
} else {
#endif
ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
if (ret < 0)
goto fail;
#if FF_API_OLD_CHANNEL_LAYOUT
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
wipe_side_data(dst);
av_dict_free(&dst->metadata);
ret = frame_copy_props(dst, src, 0);
if (ret < 0)
goto fail;
/* replace the buffers */
for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
ret = av_buffer_replace(&dst->buf[i], src->buf[i]);
if (ret < 0)
goto fail;
}
if (src->extended_buf) {
if (dst->nb_extended_buf != src->nb_extended_buf) {
int nb_extended_buf = FFMIN(dst->nb_extended_buf, src->nb_extended_buf);
void *tmp;
for (int i = nb_extended_buf; i < dst->nb_extended_buf; i++)
av_buffer_unref(&dst->extended_buf[i]);
tmp = av_realloc_array(dst->extended_buf, sizeof(*dst->extended_buf),
src->nb_extended_buf);
if (!tmp) {
ret = AVERROR(ENOMEM);
goto fail;
}
dst->extended_buf = tmp;
dst->nb_extended_buf = src->nb_extended_buf;
memset(&dst->extended_buf[nb_extended_buf], 0,
(src->nb_extended_buf - nb_extended_buf) * sizeof(*dst->extended_buf));
}
for (int i = 0; i < src->nb_extended_buf; i++) {
ret = av_buffer_replace(&dst->extended_buf[i], src->extended_buf[i]);
if (ret < 0)
goto fail;
}
} else if (dst->extended_buf) {
for (int i = 0; i < dst->nb_extended_buf; i++)
av_buffer_unref(&dst->extended_buf[i]);
av_freep(&dst->extended_buf);
}
ret = av_buffer_replace(&dst->hw_frames_ctx, src->hw_frames_ctx);
if (ret < 0)
goto fail;
if (dst->extended_data != dst->data)
av_freep(&dst->extended_data);
if (src->extended_data != src->data) {
int ch = dst->ch_layout.nb_channels;
if (!ch) {
ret = AVERROR(EINVAL);
goto fail;
}
if (ch > SIZE_MAX / sizeof(*dst->extended_data))
goto fail;
dst->extended_data = av_memdup(src->extended_data, sizeof(*dst->extended_data) * ch);
if (!dst->extended_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
} else
dst->extended_data = dst->data;
memcpy(dst->data, src->data, sizeof(src->data));
memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
return 0;
fail:
av_frame_unref(dst);
return ret;
}
AVFrame *av_frame_clone(const AVFrame *src)
{
AVFrame *ret = av_frame_alloc();
@ -485,16 +621,14 @@ AVFrame *av_frame_clone(const AVFrame *src)
void av_frame_unref(AVFrame *frame)
{
int i;
if (!frame)
return;
wipe_side_data(frame);
for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
av_buffer_unref(&frame->buf[i]);
for (i = 0; i < frame->nb_extended_buf; i++)
for (int i = 0; i < frame->nb_extended_buf; i++)
av_buffer_unref(&frame->extended_buf[i]);
av_freep(&frame->extended_buf);
av_dict_free(&frame->metadata);
@ -531,16 +665,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
int av_frame_is_writable(AVFrame *frame)
{
int i, ret = 1;
int ret = 1;
/* assume non-refcounted frames are not writable */
if (!frame->buf[0])
return 0;
for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
if (frame->buf[i])
ret &= !!av_buffer_is_writable(frame->buf[i]);
for (i = 0; i < frame->nb_extended_buf; i++)
for (int i = 0; i < frame->nb_extended_buf; i++)
ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
return ret;
@ -604,10 +738,10 @@ int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
return frame_copy_props(dst, src, 1);
}
AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
AVBufferRef *av_frame_get_plane_buffer(const AVFrame *frame, int plane)
{
uint8_t *data;
int planes, i;
int planes;
if (frame->nb_samples) {
int channels = frame->ch_layout.nb_channels;
@ -630,12 +764,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
return NULL;
data = frame->extended_data[plane];
for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
AVBufferRef *buf = frame->buf[i];
if (data >= buf->data && data < buf->data + buf->size)
return buf;
}
for (i = 0; i < frame->nb_extended_buf; i++) {
for (int i = 0; i < frame->nb_extended_buf; i++) {
AVBufferRef *buf = frame->extended_buf[i];
if (data >= buf->data && data < buf->data + buf->size)
return buf;
@ -690,9 +824,7 @@ AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
enum AVFrameSideDataType type)
{
int i;
for (i = 0; i < frame->nb_side_data; i++) {
for (int i = 0; i < frame->nb_side_data; i++) {
if (frame->side_data[i]->type == type)
return frame->side_data[i];
}
@ -702,7 +834,7 @@ AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
{
const uint8_t *src_data[4];
int i, planes;
int planes;
if (dst->width < src->width ||
dst->height < src->height)
@ -712,7 +844,7 @@ static int frame_copy_video(AVFrame *dst, const AVFrame *src)
return av_hwframe_transfer_data(dst, src, 0);
planes = av_pix_fmt_count_planes(dst->format);
for (i = 0; i < planes; i++)
for (int i = 0; i < planes; i++)
if (!dst->data[i] || !src->data[i])
return AVERROR(EINVAL);
@ -729,7 +861,6 @@ static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
int planar = av_sample_fmt_is_planar(dst->format);
int channels = dst->ch_layout.nb_channels;
int planes = planar ? channels : 1;
int i;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
@ -757,7 +888,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
#endif
return AVERROR(EINVAL);
for (i = 0; i < planes; i++)
for (int i = 0; i < planes; i++)
if (!dst->extended_data[i] || !src->extended_data[i])
return AVERROR(EINVAL);
@ -789,9 +920,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
{
int i;
for (i = frame->nb_side_data - 1; i >= 0; i--) {
for (int i = frame->nb_side_data - 1; i >= 0; i--) {
AVFrameSideData *sd = frame->side_data[i];
if (sd->type == type) {
free_side_data(&frame->side_data[i]);
@ -838,9 +967,7 @@ const char *av_frame_side_data_name(enum AVFrameSideDataType type)
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
const AVPixFmtDescriptor *desc)
{
int i, j;
for (i = 0; frame->data[i]; i++) {
for (int i = 0; frame->data[i]; i++) {
const AVComponentDescriptor *comp = NULL;
int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
@ -851,7 +978,7 @@ static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
}
/* find any component descriptor for this plane */
for (j = 0; j < desc->nb_components; j++) {
for (int j = 0; j < desc->nb_components; j++) {
if (desc->comp[j].plane == i) {
comp = &desc->comp[j];
break;
@ -871,7 +998,6 @@ int av_frame_apply_cropping(AVFrame *frame, int flags)
{
const AVPixFmtDescriptor *desc;
size_t offsets[4];
int i;
if (!(frame->width > 0 && frame->height > 0))
return AVERROR(EINVAL);
@ -906,7 +1032,7 @@ int av_frame_apply_cropping(AVFrame *frame, int flags)
int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
int min_log2_align = INT_MAX;
for (i = 0; frame->data[i]; i++) {
for (int i = 0; frame->data[i]; i++) {
int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
min_log2_align = FFMIN(log2_align, min_log2_align);
}
@ -922,7 +1048,7 @@ int av_frame_apply_cropping(AVFrame *frame, int flags)
}
}
for (i = 0; frame->data[i]; i++)
for (int i = 0; frame->data[i]; i++)
frame->data[i] += offsets[i];
frame->width -= (frame->crop_left + frame->crop_right);

Просмотреть файл

@ -214,6 +214,16 @@ enum AVFrameSideDataType {
* Ambient viewing environment metadata, as defined by H.274.
*/
AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT,
/**
* Provide encoder-specific hinting information about changed/unchanged
* portions of a frame. It can be used to pass information about which
* macroblocks can be skipped because they didn't change from the
* corresponding ones in the previous frame. This could be useful for
* applications which know this information in advance to speed up
* encoding.
*/
AV_FRAME_DATA_VIDEO_HINT,
};
enum AVActiveFormatDescription {
@ -416,10 +426,15 @@ typedef struct AVFrame {
*/
int format;
#if FF_API_FRAME_KEY
/**
* 1 -> keyframe, 0-> not
*
* @deprecated Use AV_FRAME_FLAG_KEY instead
*/
attribute_deprecated
int key_frame;
#endif
/**
* Picture type of the frame.
@ -486,25 +501,50 @@ typedef struct AVFrame {
void *opaque;
/**
* When decoding, this signals how much the picture must be delayed.
* extra_delay = repeat_pict / (2*fps)
* Number of fields in this frame which should be repeated, i.e. the total
* duration of this frame should be repeat_pict + 2 normal field durations.
*
* For interlaced frames this field may be set to 1, which signals that this
* frame should be presented as 3 fields: beginning with the first field (as
* determined by AV_FRAME_FLAG_TOP_FIELD_FIRST being set or not), followed
* by the second field, and then the first field again.
*
* For progressive frames this field may be set to a multiple of 2, which
* signals that this frame's duration should be (repeat_pict + 2) / 2
* normal frame durations.
*
* @note This field is computed from MPEG2 repeat_first_field flag and its
* associated flags, H.264 pic_struct from picture timing SEI, and
* their analogues in other codecs. Typically it should only be used when
* higher-layer timing information is not available.
*/
int repeat_pict;
#if FF_API_INTERLACED_FRAME
/**
* The content of the picture is interlaced.
*
* @deprecated Use AV_FRAME_FLAG_INTERLACED instead
*/
attribute_deprecated
int interlaced_frame;
/**
* If the content is interlaced, is top field displayed first.
*
* @deprecated Use AV_FRAME_FLAG_TOP_FIELD_FIRST instead
*/
attribute_deprecated
int top_field_first;
#endif
#if FF_API_PALETTE_HAS_CHANGED
/**
* Tell user application that palette has changed from previous frame.
*/
attribute_deprecated
int palette_has_changed;
#endif
#if FF_API_REORDERED_OPAQUE
/**
@ -582,10 +622,23 @@ typedef struct AVFrame {
* The frame data may be corrupted, e.g. due to decoding errors.
*/
#define AV_FRAME_FLAG_CORRUPT (1 << 0)
/**
* A flag to mark frames that are keyframes.
*/
#define AV_FRAME_FLAG_KEY (1 << 1)
/**
* A flag to mark the frames which need to be decoded, but shouldn't be output.
*/
#define AV_FRAME_FLAG_DISCARD (1 << 2)
/**
* A flag to mark frames whose content is interlaced.
*/
#define AV_FRAME_FLAG_INTERLACED (1 << 3)
/**
* A flag to mark frames where the top field is displayed first if the content
* is interlaced.
*/
#define AV_FRAME_FLAG_TOP_FIELD_FIRST (1 << 4)
/**
* @}
*/
@ -793,6 +846,19 @@ void av_frame_free(AVFrame **frame);
*/
int av_frame_ref(AVFrame *dst, const AVFrame *src);
/**
* Ensure the destination frame refers to the same data described by the source
* frame, either by creating a new reference for each AVBufferRef from src if
* they differ from those in dst, by allocating new buffers and copying data if
* src is not reference counted, or by unrefencing it if src is empty.
*
* Frame properties on dst will be replaced by those from src.
*
* @return 0 on success, a negative AVERROR on error. On error, dst is
* unreferenced.
*/
int av_frame_replace(AVFrame *dst, const AVFrame *src);
/**
* Create a new frame that references the same data as src.
*
@ -901,7 +967,7 @@ int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
* @return the buffer reference that contains the plane or NULL if the input
* frame is not valid.
*/
AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
AVBufferRef *av_frame_get_plane_buffer(const AVFrame *frame, int plane);
/**
* Add a new side data to a frame.

Просмотреть файл

@ -18,14 +18,13 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avassert.h"
#include "hdr_dynamic_metadata.h"
#include "mem.h"
#include "libavcodec/defs.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/put_bits.h"
#define T35_PAYLOAD_MAX_SIZE 907
static const int64_t luminance_den = 1;
static const int32_t peak_luminance_den = 15;
static const int64_t rgb_den = 100000;
@ -62,14 +61,14 @@ AVDynamicHDRPlus *av_dynamic_hdr_plus_create_side_data(AVFrame *frame)
int av_dynamic_hdr_plus_from_t35(AVDynamicHDRPlus *s, const uint8_t *data,
size_t size)
{
uint8_t padded_buf[T35_PAYLOAD_MAX_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t padded_buf[AV_HDR_PLUS_MAX_PAYLOAD_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
GetBitContext gbc, *gb = &gbc;
int ret;
if (!s)
return AVERROR(ENOMEM);
if (size > T35_PAYLOAD_MAX_SIZE)
if (size > AV_HDR_PLUS_MAX_PAYLOAD_SIZE)
return AVERROR(EINVAL);
memcpy(padded_buf, data, size);
@ -243,8 +242,10 @@ int av_dynamic_hdr_plus_to_t35(const AVDynamicHDRPlus *s, uint8_t **data, size_t
size_t size_bits, size_bytes;
PutBitContext pbc, *pb = &pbc;
if (!s || !data)
if (!s)
return AVERROR(EINVAL);
if ((!data || *data) && !size)
return AVERROR(EINVAL);
/**
* Buffer size per CTA-861-H p.253-254:
@ -296,9 +297,20 @@ int av_dynamic_hdr_plus_to_t35(const AVDynamicHDRPlus *s, uint8_t **data, size_t
size_bytes = (size_bits + 7) / 8;
buf = av_mallocz(size_bytes);
if (!buf)
return AVERROR(ENOMEM);
av_assert0(size_bytes <= AV_HDR_PLUS_MAX_PAYLOAD_SIZE);
if (!data) {
*size = size_bytes;
return 0;
} else if (*data) {
if (*size < size_bytes)
return AVERROR_BUFFER_TOO_SMALL;
buf = *data;
} else {
buf = av_malloc(size_bytes);
if (!buf)
return AVERROR(ENOMEM);
}
init_put_bits(pb, buf, size_bytes);

Просмотреть файл

@ -353,13 +353,21 @@ AVDynamicHDRPlus *av_dynamic_hdr_plus_create_side_data(AVFrame *frame);
int av_dynamic_hdr_plus_from_t35(AVDynamicHDRPlus *s, const uint8_t *data,
size_t size);
#define AV_HDR_PLUS_MAX_PAYLOAD_SIZE 907
/**
* Serialize dynamic HDR10+ metadata to a user data registered ITU-T T.35 buffer,
* excluding the first 48 bytes of the header, and beginning with the application mode.
* @param s A pointer containing the decoded AVDynamicHDRPlus structure.
* @param data A pointer to a byte buffer to be allocated and filled
* with the serialized metadata.
* @param size A pointer to a size to be set to the returned buffer's size (optional).
* @param data[in,out] A pointer to pointer to a byte buffer to be filled with the
* serialized metadata.
* If *data is NULL, a buffer be will be allocated and a pointer to
* it stored in its place. The caller assumes ownership of the buffer.
* May be NULL, in which case the function will only store the
* required buffer size in *size.
* @param size[in,out] A pointer to a size to be set to the returned buffer's size.
* If *data is not NULL, *size must contain the size of the input
* buffer. May be NULL only if *data is NULL.
*
* @return >= 0 on success. Otherwise, returns the appropriate AVERROR.
*/

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше