Bug 1765480 - Update ffvpx to a recent ffmpeg version, reapply the in-tree patch, fix moz.build for the new files, fix the symbol files. r=alwu

Depends on D150972

Differential Revision: https://phabricator.services.mozilla.com/D150973
This commit is contained in:
Paul Adenot 2022-08-17 16:29:33 +00:00
Родитель e53a91d5e3
Коммит 12e3f0e9a9
293 изменённых файлов: 11845 добавлений и 11194 удалений

Просмотреть файл

@ -96,7 +96,7 @@ do { \
atomic_load(object)
#define atomic_exchange(object, desired) \
InterlockedExchangePointer(object, desired);
InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
#define atomic_exchange_explicit(object, desired, order) \
atomic_exchange(object, desired)

Просмотреть файл

@ -38,11 +38,13 @@
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <process.h>
#include <time.h>
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/fftime.h"
typedef struct pthread_t {
void *handle;
@ -61,6 +63,9 @@ typedef CONDITION_VARIABLE pthread_cond_t;
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
#define PTHREAD_CANCEL_ENABLE 1
#define PTHREAD_CANCEL_DISABLE 0
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
{
pthread_t *h = (pthread_t*)arg;
@ -156,10 +161,31 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex
return 0;
}
static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime)
{
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
DWORD t = av_clip64(abs_milli - av_gettime() / 1000, 0, UINT32_MAX);
if (!SleepConditionVariableSRW(cond, mutex, t, 0)) {
DWORD err = GetLastError();
if (err == ERROR_TIMEOUT)
return ETIMEDOUT;
else
return EINVAL;
}
return 0;
}
static inline int pthread_cond_signal(pthread_cond_t *cond)
{
WakeConditionVariable(cond);
return 0;
}
static inline int pthread_setcancelstate(int state, int *oldstate)
{
return 0;
}
#endif /* COMPAT_W32PTHREADS_H */

Просмотреть файл

@ -36,6 +36,7 @@
function fft4_neon
AARCH64_VALID_JUMP_TARGET
ld1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0]
fadd v4.2s, v0.2s, v1.2s // r0+r1,i0+i1
@ -58,6 +59,7 @@ function fft4_neon
endfunc
function fft8_neon
AARCH64_VALID_JUMP_TARGET
mov x1, x0
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0]
@ -108,6 +110,7 @@ function fft8_neon
endfunc
function fft16_neon
AARCH64_VALID_JUMP_TARGET
mov x1, x0
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0], #32
@ -337,6 +340,8 @@ endfunc
.macro def_fft n, n2, n4
function fft\n\()_neon, align=6
AARCH64_VALID_JUMP_TARGET
AARCH64_SIGN_LINK_REGISTER
sub sp, sp, #16
stp x28, x30, [sp]
add x28, x0, #\n4*2*8
@ -347,6 +352,7 @@ function fft\n\()_neon, align=6
bl fft\n4\()_neon
sub x0, x28, #\n4*2*8
ldp x28, x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
movrel x4, X(ff_cos_\n)
mov x2, #\n4>>1
b fft_pass_neon

Просмотреть файл

@ -19,6 +19,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config_components.h"
#include "libavutil/aarch64/asm.S"
/* chroma_mc8(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y) */

Просмотреть файл

@ -69,19 +69,42 @@ void ff_h264_idct_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct_add16_neon(uint8_t *dst, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
const uint8_t nnzc[5 * 8]);
void ff_h264_idct_add16intra_neon(uint8_t *dst, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
const uint8_t nnzc[5 * 8]);
void ff_h264_idct_add8_neon(uint8_t **dest, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
const uint8_t nnzc[15 * 8]);
void ff_h264_idct8_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct8_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct8_add4_neon(uint8_t *dst, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
const uint8_t nnzc[5 * 8]);
void ff_h264_v_loop_filter_luma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_h_loop_filter_luma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_v_loop_filter_luma_intra_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
int beta);
void ff_h264_h_loop_filter_luma_intra_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
int beta);
void ff_h264_v_loop_filter_chroma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_h_loop_filter_chroma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_h_loop_filter_chroma422_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_v_loop_filter_chroma_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
int alpha, int beta);
void ff_h264_h_loop_filter_chroma_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
int alpha, int beta);
void ff_h264_h_loop_filter_chroma422_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
int alpha, int beta);
void ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
int alpha, int beta);
av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc)
@ -125,5 +148,19 @@ av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
c->h264_idct8_add = ff_h264_idct8_add_neon;
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
c->h264_idct8_add4 = ff_h264_idct8_add4_neon;
} else if (have_neon(cpu_flags) && bit_depth == 10) {
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon_10;
c->h264_v_loop_filter_chroma_intra = ff_h264_v_loop_filter_chroma_intra_neon_10;
if (chroma_format_idc <= 1) {
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon_10;
c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma_intra_neon_10;
c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10;
} else {
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma422_neon_10;
c->h264_h_loop_filter_chroma_mbaff = ff_h264_h_loop_filter_chroma_neon_10;
c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma422_intra_neon_10;
c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_intra_neon_10;
}
}
}

Просмотреть файл

@ -110,7 +110,6 @@
function ff_h264_v_loop_filter_luma_neon, export=1
h264_loop_filter_start
sxtw x1, w1
ld1 {v0.16B}, [x0], x1
ld1 {v2.16B}, [x0], x1
@ -134,7 +133,6 @@ endfunc
function ff_h264_h_loop_filter_luma_neon, export=1
h264_loop_filter_start
sxtw x1, w1
sub x0, x0, #4
ld1 {v6.8B}, [x0], x1
@ -184,199 +182,198 @@ endfunc
.macro h264_loop_filter_start_intra
orr w4, w2, w3
cbnz w4, 1f
ret
orr w4, w2, w3
cbnz w4, 1f
ret
1:
sxtw x1, w1
dup v30.16b, w2 // alpha
dup v31.16b, w3 // beta
dup v30.16b, w2 // alpha
dup v31.16b, w3 // beta
.endm
.macro h264_loop_filter_luma_intra
uabd v16.16b, v7.16b, v0.16b // abs(p0 - q0)
uabd v17.16b, v6.16b, v7.16b // abs(p1 - p0)
uabd v18.16b, v1.16b, v0.16b // abs(q1 - q0)
cmhi v19.16b, v30.16b, v16.16b // < alpha
cmhi v17.16b, v31.16b, v17.16b // < beta
cmhi v18.16b, v31.16b, v18.16b // < beta
uabd v16.16b, v7.16b, v0.16b // abs(p0 - q0)
uabd v17.16b, v6.16b, v7.16b // abs(p1 - p0)
uabd v18.16b, v1.16b, v0.16b // abs(q1 - q0)
cmhi v19.16b, v30.16b, v16.16b // < alpha
cmhi v17.16b, v31.16b, v17.16b // < beta
cmhi v18.16b, v31.16b, v18.16b // < beta
movi v29.16b, #2
ushr v30.16b, v30.16b, #2 // alpha >> 2
add v30.16b, v30.16b, v29.16b // (alpha >> 2) + 2
cmhi v16.16b, v30.16b, v16.16b // < (alpha >> 2) + 2
movi v29.16b, #2
ushr v30.16b, v30.16b, #2 // alpha >> 2
add v30.16b, v30.16b, v29.16b // (alpha >> 2) + 2
cmhi v16.16b, v30.16b, v16.16b // < (alpha >> 2) + 2
and v19.16b, v19.16b, v17.16b
and v19.16b, v19.16b, v18.16b
shrn v20.8b, v19.8h, #4
mov x4, v20.d[0]
cbz x4, 9f
and v19.16b, v19.16b, v17.16b
and v19.16b, v19.16b, v18.16b
shrn v20.8b, v19.8h, #4
mov x4, v20.d[0]
cbz x4, 9f
ushll v20.8h, v6.8b, #1
ushll v22.8h, v1.8b, #1
ushll2 v21.8h, v6.16b, #1
ushll2 v23.8h, v1.16b, #1
uaddw v20.8h, v20.8h, v7.8b
uaddw v22.8h, v22.8h, v0.8b
uaddw2 v21.8h, v21.8h, v7.16b
uaddw2 v23.8h, v23.8h, v0.16b
uaddw v20.8h, v20.8h, v1.8b
uaddw v22.8h, v22.8h, v6.8b
uaddw2 v21.8h, v21.8h, v1.16b
uaddw2 v23.8h, v23.8h, v6.16b
ushll v20.8h, v6.8b, #1
ushll v22.8h, v1.8b, #1
ushll2 v21.8h, v6.16b, #1
ushll2 v23.8h, v1.16b, #1
uaddw v20.8h, v20.8h, v7.8b
uaddw v22.8h, v22.8h, v0.8b
uaddw2 v21.8h, v21.8h, v7.16b
uaddw2 v23.8h, v23.8h, v0.16b
uaddw v20.8h, v20.8h, v1.8b
uaddw v22.8h, v22.8h, v6.8b
uaddw2 v21.8h, v21.8h, v1.16b
uaddw2 v23.8h, v23.8h, v6.16b
rshrn v24.8b, v20.8h, #2 // p0'_1
rshrn v25.8b, v22.8h, #2 // q0'_1
rshrn2 v24.16b, v21.8h, #2 // p0'_1
rshrn2 v25.16b, v23.8h, #2 // q0'_1
rshrn v24.8b, v20.8h, #2 // p0'_1
rshrn v25.8b, v22.8h, #2 // q0'_1
rshrn2 v24.16b, v21.8h, #2 // p0'_1
rshrn2 v25.16b, v23.8h, #2 // q0'_1
uabd v17.16b, v5.16b, v7.16b // abs(p2 - p0)
uabd v18.16b, v2.16b, v0.16b // abs(q2 - q0)
cmhi v17.16b, v31.16b, v17.16b // < beta
cmhi v18.16b, v31.16b, v18.16b // < beta
uabd v17.16b, v5.16b, v7.16b // abs(p2 - p0)
uabd v18.16b, v2.16b, v0.16b // abs(q2 - q0)
cmhi v17.16b, v31.16b, v17.16b // < beta
cmhi v18.16b, v31.16b, v18.16b // < beta
and v17.16b, v16.16b, v17.16b // if_2 && if_3
and v18.16b, v16.16b, v18.16b // if_2 && if_4
and v17.16b, v16.16b, v17.16b // if_2 && if_3
and v18.16b, v16.16b, v18.16b // if_2 && if_4
not v30.16b, v17.16b
not v31.16b, v18.16b
not v30.16b, v17.16b
not v31.16b, v18.16b
and v30.16b, v30.16b, v19.16b // if_1 && !(if_2 && if_3)
and v31.16b, v31.16b, v19.16b // if_1 && !(if_2 && if_4)
and v30.16b, v30.16b, v19.16b // if_1 && !(if_2 && if_3)
and v31.16b, v31.16b, v19.16b // if_1 && !(if_2 && if_4)
and v17.16b, v19.16b, v17.16b // if_1 && if_2 && if_3
and v18.16b, v19.16b, v18.16b // if_1 && if_2 && if_4
and v17.16b, v19.16b, v17.16b // if_1 && if_2 && if_3
and v18.16b, v19.16b, v18.16b // if_1 && if_2 && if_4
//calc p, v7, v6, v5, v4, v17, v7, v6, v5, v4
uaddl v26.8h, v5.8b, v7.8b
uaddl2 v27.8h, v5.16b, v7.16b
uaddw v26.8h, v26.8h, v0.8b
uaddw2 v27.8h, v27.8h, v0.16b
add v20.8h, v20.8h, v26.8h
add v21.8h, v21.8h, v27.8h
uaddw v20.8h, v20.8h, v0.8b
uaddw2 v21.8h, v21.8h, v0.16b
rshrn v20.8b, v20.8h, #3 // p0'_2
rshrn2 v20.16b, v21.8h, #3 // p0'_2
uaddw v26.8h, v26.8h, v6.8b
uaddw2 v27.8h, v27.8h, v6.16b
rshrn v21.8b, v26.8h, #2 // p1'_2
rshrn2 v21.16b, v27.8h, #2 // p1'_2
uaddl v28.8h, v4.8b, v5.8b
uaddl2 v29.8h, v4.16b, v5.16b
shl v28.8h, v28.8h, #1
shl v29.8h, v29.8h, #1
add v28.8h, v28.8h, v26.8h
add v29.8h, v29.8h, v27.8h
rshrn v19.8b, v28.8h, #3 // p2'_2
rshrn2 v19.16b, v29.8h, #3 // p2'_2
//calc p, v7, v6, v5, v4, v17, v7, v6, v5, v4
uaddl v26.8h, v5.8b, v7.8b
uaddl2 v27.8h, v5.16b, v7.16b
uaddw v26.8h, v26.8h, v0.8b
uaddw2 v27.8h, v27.8h, v0.16b
add v20.8h, v20.8h, v26.8h
add v21.8h, v21.8h, v27.8h
uaddw v20.8h, v20.8h, v0.8b
uaddw2 v21.8h, v21.8h, v0.16b
rshrn v20.8b, v20.8h, #3 // p0'_2
rshrn2 v20.16b, v21.8h, #3 // p0'_2
uaddw v26.8h, v26.8h, v6.8b
uaddw2 v27.8h, v27.8h, v6.16b
rshrn v21.8b, v26.8h, #2 // p1'_2
rshrn2 v21.16b, v27.8h, #2 // p1'_2
uaddl v28.8h, v4.8b, v5.8b
uaddl2 v29.8h, v4.16b, v5.16b
shl v28.8h, v28.8h, #1
shl v29.8h, v29.8h, #1
add v28.8h, v28.8h, v26.8h
add v29.8h, v29.8h, v27.8h
rshrn v19.8b, v28.8h, #3 // p2'_2
rshrn2 v19.16b, v29.8h, #3 // p2'_2
//calc q, v0, v1, v2, v3, v18, v0, v1, v2, v3
uaddl v26.8h, v2.8b, v0.8b
uaddl2 v27.8h, v2.16b, v0.16b
uaddw v26.8h, v26.8h, v7.8b
uaddw2 v27.8h, v27.8h, v7.16b
add v22.8h, v22.8h, v26.8h
add v23.8h, v23.8h, v27.8h
uaddw v22.8h, v22.8h, v7.8b
uaddw2 v23.8h, v23.8h, v7.16b
rshrn v22.8b, v22.8h, #3 // q0'_2
rshrn2 v22.16b, v23.8h, #3 // q0'_2
uaddw v26.8h, v26.8h, v1.8b
uaddw2 v27.8h, v27.8h, v1.16b
rshrn v23.8b, v26.8h, #2 // q1'_2
rshrn2 v23.16b, v27.8h, #2 // q1'_2
uaddl v28.8h, v2.8b, v3.8b
uaddl2 v29.8h, v2.16b, v3.16b
shl v28.8h, v28.8h, #1
shl v29.8h, v29.8h, #1
add v28.8h, v28.8h, v26.8h
add v29.8h, v29.8h, v27.8h
rshrn v26.8b, v28.8h, #3 // q2'_2
rshrn2 v26.16b, v29.8h, #3 // q2'_2
//calc q, v0, v1, v2, v3, v18, v0, v1, v2, v3
uaddl v26.8h, v2.8b, v0.8b
uaddl2 v27.8h, v2.16b, v0.16b
uaddw v26.8h, v26.8h, v7.8b
uaddw2 v27.8h, v27.8h, v7.16b
add v22.8h, v22.8h, v26.8h
add v23.8h, v23.8h, v27.8h
uaddw v22.8h, v22.8h, v7.8b
uaddw2 v23.8h, v23.8h, v7.16b
rshrn v22.8b, v22.8h, #3 // q0'_2
rshrn2 v22.16b, v23.8h, #3 // q0'_2
uaddw v26.8h, v26.8h, v1.8b
uaddw2 v27.8h, v27.8h, v1.16b
rshrn v23.8b, v26.8h, #2 // q1'_2
rshrn2 v23.16b, v27.8h, #2 // q1'_2
uaddl v28.8h, v2.8b, v3.8b
uaddl2 v29.8h, v2.16b, v3.16b
shl v28.8h, v28.8h, #1
shl v29.8h, v29.8h, #1
add v28.8h, v28.8h, v26.8h
add v29.8h, v29.8h, v27.8h
rshrn v26.8b, v28.8h, #3 // q2'_2
rshrn2 v26.16b, v29.8h, #3 // q2'_2
bit v7.16b, v24.16b, v30.16b // p0'_1
bit v0.16b, v25.16b, v31.16b // q0'_1
bit v7.16b, v20.16b, v17.16b // p0'_2
bit v6.16b, v21.16b, v17.16b // p1'_2
bit v5.16b, v19.16b, v17.16b // p2'_2
bit v0.16b, v22.16b, v18.16b // q0'_2
bit v1.16b, v23.16b, v18.16b // q1'_2
bit v2.16b, v26.16b, v18.16b // q2'_2
bit v7.16b, v24.16b, v30.16b // p0'_1
bit v0.16b, v25.16b, v31.16b // q0'_1
bit v7.16b, v20.16b, v17.16b // p0'_2
bit v6.16b, v21.16b, v17.16b // p1'_2
bit v5.16b, v19.16b, v17.16b // p2'_2
bit v0.16b, v22.16b, v18.16b // q0'_2
bit v1.16b, v23.16b, v18.16b // q1'_2
bit v2.16b, v26.16b, v18.16b // q2'_2
.endm
function ff_h264_v_loop_filter_luma_intra_neon, export=1
h264_loop_filter_start_intra
h264_loop_filter_start_intra
ld1 {v0.16b}, [x0], x1 // q0
ld1 {v1.16b}, [x0], x1 // q1
ld1 {v2.16b}, [x0], x1 // q2
ld1 {v3.16b}, [x0], x1 // q3
sub x0, x0, x1, lsl #3
ld1 {v4.16b}, [x0], x1 // p3
ld1 {v5.16b}, [x0], x1 // p2
ld1 {v6.16b}, [x0], x1 // p1
ld1 {v7.16b}, [x0] // p0
ld1 {v0.16b}, [x0], x1 // q0
ld1 {v1.16b}, [x0], x1 // q1
ld1 {v2.16b}, [x0], x1 // q2
ld1 {v3.16b}, [x0], x1 // q3
sub x0, x0, x1, lsl #3
ld1 {v4.16b}, [x0], x1 // p3
ld1 {v5.16b}, [x0], x1 // p2
ld1 {v6.16b}, [x0], x1 // p1
ld1 {v7.16b}, [x0] // p0
h264_loop_filter_luma_intra
h264_loop_filter_luma_intra
sub x0, x0, x1, lsl #1
st1 {v5.16b}, [x0], x1 // p2
st1 {v6.16b}, [x0], x1 // p1
st1 {v7.16b}, [x0], x1 // p0
st1 {v0.16b}, [x0], x1 // q0
st1 {v1.16b}, [x0], x1 // q1
st1 {v2.16b}, [x0] // q2
sub x0, x0, x1, lsl #1
st1 {v5.16b}, [x0], x1 // p2
st1 {v6.16b}, [x0], x1 // p1
st1 {v7.16b}, [x0], x1 // p0
st1 {v0.16b}, [x0], x1 // q0
st1 {v1.16b}, [x0], x1 // q1
st1 {v2.16b}, [x0] // q2
9:
ret
ret
endfunc
function ff_h264_h_loop_filter_luma_intra_neon, export=1
h264_loop_filter_start_intra
h264_loop_filter_start_intra
sub x0, x0, #4
ld1 {v4.8b}, [x0], x1
ld1 {v5.8b}, [x0], x1
ld1 {v6.8b}, [x0], x1
ld1 {v7.8b}, [x0], x1
ld1 {v0.8b}, [x0], x1
ld1 {v1.8b}, [x0], x1
ld1 {v2.8b}, [x0], x1
ld1 {v3.8b}, [x0], x1
ld1 {v4.d}[1], [x0], x1
ld1 {v5.d}[1], [x0], x1
ld1 {v6.d}[1], [x0], x1
ld1 {v7.d}[1], [x0], x1
ld1 {v0.d}[1], [x0], x1
ld1 {v1.d}[1], [x0], x1
ld1 {v2.d}[1], [x0], x1
ld1 {v3.d}[1], [x0], x1
sub x0, x0, #4
ld1 {v4.8b}, [x0], x1
ld1 {v5.8b}, [x0], x1
ld1 {v6.8b}, [x0], x1
ld1 {v7.8b}, [x0], x1
ld1 {v0.8b}, [x0], x1
ld1 {v1.8b}, [x0], x1
ld1 {v2.8b}, [x0], x1
ld1 {v3.8b}, [x0], x1
ld1 {v4.d}[1], [x0], x1
ld1 {v5.d}[1], [x0], x1
ld1 {v6.d}[1], [x0], x1
ld1 {v7.d}[1], [x0], x1
ld1 {v0.d}[1], [x0], x1
ld1 {v1.d}[1], [x0], x1
ld1 {v2.d}[1], [x0], x1
ld1 {v3.d}[1], [x0], x1
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
h264_loop_filter_luma_intra
h264_loop_filter_luma_intra
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
sub x0, x0, x1, lsl #4
st1 {v4.8b}, [x0], x1
st1 {v5.8b}, [x0], x1
st1 {v6.8b}, [x0], x1
st1 {v7.8b}, [x0], x1
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x0], x1
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x0], x1
st1 {v4.d}[1], [x0], x1
st1 {v5.d}[1], [x0], x1
st1 {v6.d}[1], [x0], x1
st1 {v7.d}[1], [x0], x1
st1 {v0.d}[1], [x0], x1
st1 {v1.d}[1], [x0], x1
st1 {v2.d}[1], [x0], x1
st1 {v3.d}[1], [x0], x1
sub x0, x0, x1, lsl #4
st1 {v4.8b}, [x0], x1
st1 {v5.8b}, [x0], x1
st1 {v6.8b}, [x0], x1
st1 {v7.8b}, [x0], x1
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x0], x1
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x0], x1
st1 {v4.d}[1], [x0], x1
st1 {v5.d}[1], [x0], x1
st1 {v6.d}[1], [x0], x1
st1 {v7.d}[1], [x0], x1
st1 {v0.d}[1], [x0], x1
st1 {v1.d}[1], [x0], x1
st1 {v2.d}[1], [x0], x1
st1 {v3.d}[1], [x0], x1
9:
ret
ret
endfunc
.macro h264_loop_filter_chroma
@ -414,7 +411,6 @@ endfunc
function ff_h264_v_loop_filter_chroma_neon, export=1
h264_loop_filter_start
sxtw x1, w1
sub x0, x0, x1, lsl #1
ld1 {v18.8B}, [x0], x1
@ -433,7 +429,6 @@ endfunc
function ff_h264_h_loop_filter_chroma_neon, export=1
h264_loop_filter_start
sxtw x1, w1
sub x0, x0, #2
h_loop_filter_chroma420:
@ -466,7 +461,6 @@ h_loop_filter_chroma420:
endfunc
function ff_h264_h_loop_filter_chroma422_neon, export=1
sxtw x1, w1
h264_loop_filter_start
add x5, x0, x1
sub x0, x0, #2
@ -480,113 +474,113 @@ function ff_h264_h_loop_filter_chroma422_neon, export=1
endfunc
.macro h264_loop_filter_chroma_intra
uabd v26.8b, v16.8b, v17.8b // abs(p0 - q0)
uabd v27.8b, v18.8b, v16.8b // abs(p1 - p0)
uabd v28.8b, v19.8b, v17.8b // abs(q1 - q0)
cmhi v26.8b, v30.8b, v26.8b // < alpha
cmhi v27.8b, v31.8b, v27.8b // < beta
cmhi v28.8b, v31.8b, v28.8b // < beta
and v26.8b, v26.8b, v27.8b
and v26.8b, v26.8b, v28.8b
mov x2, v26.d[0]
uabd v26.8b, v16.8b, v17.8b // abs(p0 - q0)
uabd v27.8b, v18.8b, v16.8b // abs(p1 - p0)
uabd v28.8b, v19.8b, v17.8b // abs(q1 - q0)
cmhi v26.8b, v30.8b, v26.8b // < alpha
cmhi v27.8b, v31.8b, v27.8b // < beta
cmhi v28.8b, v31.8b, v28.8b // < beta
and v26.8b, v26.8b, v27.8b
and v26.8b, v26.8b, v28.8b
mov x2, v26.d[0]
ushll v4.8h, v18.8b, #1
ushll v6.8h, v19.8b, #1
cbz x2, 9f
uaddl v20.8h, v16.8b, v19.8b
uaddl v22.8h, v17.8b, v18.8b
add v20.8h, v20.8h, v4.8h
add v22.8h, v22.8h, v6.8h
uqrshrn v24.8b, v20.8h, #2
uqrshrn v25.8b, v22.8h, #2
bit v16.8b, v24.8b, v26.8b
bit v17.8b, v25.8b, v26.8b
ushll v4.8h, v18.8b, #1
ushll v6.8h, v19.8b, #1
cbz x2, 9f
uaddl v20.8h, v16.8b, v19.8b
uaddl v22.8h, v17.8b, v18.8b
add v20.8h, v20.8h, v4.8h
add v22.8h, v22.8h, v6.8h
uqrshrn v24.8b, v20.8h, #2
uqrshrn v25.8b, v22.8h, #2
bit v16.8b, v24.8b, v26.8b
bit v17.8b, v25.8b, v26.8b
.endm
function ff_h264_v_loop_filter_chroma_intra_neon, export=1
h264_loop_filter_start_intra
h264_loop_filter_start_intra
sub x0, x0, x1, lsl #1
ld1 {v18.8b}, [x0], x1
ld1 {v16.8b}, [x0], x1
ld1 {v17.8b}, [x0], x1
ld1 {v19.8b}, [x0]
sub x0, x0, x1, lsl #1
ld1 {v18.8b}, [x0], x1
ld1 {v16.8b}, [x0], x1
ld1 {v17.8b}, [x0], x1
ld1 {v19.8b}, [x0]
h264_loop_filter_chroma_intra
h264_loop_filter_chroma_intra
sub x0, x0, x1, lsl #1
st1 {v16.8b}, [x0], x1
st1 {v17.8b}, [x0], x1
sub x0, x0, x1, lsl #1
st1 {v16.8b}, [x0], x1
st1 {v17.8b}, [x0], x1
9:
ret
ret
endfunc
function ff_h264_h_loop_filter_chroma_mbaff_intra_neon, export=1
h264_loop_filter_start_intra
h264_loop_filter_start_intra
sub x4, x0, #2
sub x0, x0, #1
ld1 {v18.8b}, [x4], x1
ld1 {v16.8b}, [x4], x1
ld1 {v17.8b}, [x4], x1
ld1 {v19.8b}, [x4], x1
sub x4, x0, #2
sub x0, x0, #1
ld1 {v18.8b}, [x4], x1
ld1 {v16.8b}, [x4], x1
ld1 {v17.8b}, [x4], x1
ld1 {v19.8b}, [x4], x1
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra
h264_loop_filter_chroma_intra
st2 {v16.b,v17.b}[0], [x0], x1
st2 {v16.b,v17.b}[1], [x0], x1
st2 {v16.b,v17.b}[2], [x0], x1
st2 {v16.b,v17.b}[3], [x0], x1
st2 {v16.b,v17.b}[0], [x0], x1
st2 {v16.b,v17.b}[1], [x0], x1
st2 {v16.b,v17.b}[2], [x0], x1
st2 {v16.b,v17.b}[3], [x0], x1
9:
ret
ret
endfunc
function ff_h264_h_loop_filter_chroma_intra_neon, export=1
h264_loop_filter_start_intra
h264_loop_filter_start_intra
sub x4, x0, #2
sub x0, x0, #1
sub x4, x0, #2
sub x0, x0, #1
h_loop_filter_chroma420_intra:
ld1 {v18.8b}, [x4], x1
ld1 {v16.8b}, [x4], x1
ld1 {v17.8b}, [x4], x1
ld1 {v19.8b}, [x4], x1
ld1 {v18.s}[1], [x4], x1
ld1 {v16.s}[1], [x4], x1
ld1 {v17.s}[1], [x4], x1
ld1 {v19.s}[1], [x4], x1
ld1 {v18.8b}, [x4], x1
ld1 {v16.8b}, [x4], x1
ld1 {v17.8b}, [x4], x1
ld1 {v19.8b}, [x4], x1
ld1 {v18.s}[1], [x4], x1
ld1 {v16.s}[1], [x4], x1
ld1 {v17.s}[1], [x4], x1
ld1 {v19.s}[1], [x4], x1
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra
h264_loop_filter_chroma_intra
st2 {v16.b,v17.b}[0], [x0], x1
st2 {v16.b,v17.b}[1], [x0], x1
st2 {v16.b,v17.b}[2], [x0], x1
st2 {v16.b,v17.b}[3], [x0], x1
st2 {v16.b,v17.b}[4], [x0], x1
st2 {v16.b,v17.b}[5], [x0], x1
st2 {v16.b,v17.b}[6], [x0], x1
st2 {v16.b,v17.b}[7], [x0], x1
st2 {v16.b,v17.b}[0], [x0], x1
st2 {v16.b,v17.b}[1], [x0], x1
st2 {v16.b,v17.b}[2], [x0], x1
st2 {v16.b,v17.b}[3], [x0], x1
st2 {v16.b,v17.b}[4], [x0], x1
st2 {v16.b,v17.b}[5], [x0], x1
st2 {v16.b,v17.b}[6], [x0], x1
st2 {v16.b,v17.b}[7], [x0], x1
9:
ret
ret
endfunc
function ff_h264_h_loop_filter_chroma422_intra_neon, export=1
h264_loop_filter_start_intra
sub x4, x0, #2
add x5, x0, x1, lsl #3
sub x0, x0, #1
mov x7, x30
bl h_loop_filter_chroma420_intra
sub x0, x5, #1
mov x30, x7
b h_loop_filter_chroma420_intra
h264_loop_filter_start_intra
sub x4, x0, #2
add x5, x0, x1, lsl #3
sub x0, x0, #1
mov x7, x30
bl h_loop_filter_chroma420_intra
sub x0, x5, #1
mov x30, x7
b h_loop_filter_chroma420_intra
endfunc
.macro biweight_16 macs, macd
@ -691,7 +685,6 @@ endfunc
.macro biweight_func w
function ff_biweight_h264_pixels_\w\()_neon, export=1
sxtw x2, w2
lsr w8, w5, #31
add w7, w7, #1
eor w8, w8, w6, lsr #30
@ -800,7 +793,6 @@ endfunc
.macro weight_func w
function ff_weight_h264_pixels_\w\()_neon, export=1
sxtw x1, w1
cmp w3, #1
mov w6, #1
lsl w5, w5, w3
@ -827,3 +819,258 @@ endfunc
weight_func 16
weight_func 8
weight_func 4
.macro h264_loop_filter_start_10
cmp w2, #0
ldr w6, [x4]
ccmp w3, #0, #0, ne
lsl w2, w2, #2
mov v24.S[0], w6
lsl w3, w3, #2
and w8, w6, w6, lsl #16
b.eq 1f
ands w8, w8, w8, lsl #8
b.ge 2f
1:
ret
2:
.endm
.macro h264_loop_filter_start_intra_10
orr w4, w2, w3
cbnz w4, 1f
ret
1:
lsl w2, w2, #2
lsl w3, w3, #2
dup v30.8h, w2 // alpha
dup v31.8h, w3 // beta
.endm
.macro h264_loop_filter_chroma_10
dup v22.8h, w2 // alpha
dup v23.8h, w3 // beta
uxtl v24.8h, v24.8b // tc0
uabd v26.8h, v16.8h, v0.8h // abs(p0 - q0)
uabd v28.8h, v18.8h, v16.8h // abs(p1 - p0)
uabd v30.8h, v2.8h, v0.8h // abs(q1 - q0)
cmhi v26.8h, v22.8h, v26.8h // < alpha
cmhi v28.8h, v23.8h, v28.8h // < beta
cmhi v30.8h, v23.8h, v30.8h // < beta
and v26.16b, v26.16b, v28.16b
mov v4.16b, v0.16b
sub v4.8h, v4.8h, v16.8h
and v26.16b, v26.16b, v30.16b
shl v4.8h, v4.8h, #2
mov x8, v26.d[0]
mov x9, v26.d[1]
sli v24.8h, v24.8h, #8
uxtl v24.8h, v24.8b
add v4.8h, v4.8h, v18.8h
adds x8, x8, x9
shl v24.8h, v24.8h, #2
b.eq 9f
movi v31.8h, #3 // (tc0 - 1) << (BIT_DEPTH - 8)) + 1
uqsub v24.8h, v24.8h, v31.8h
sub v4.8h, v4.8h, v2.8h
srshr v4.8h, v4.8h, #3
smin v4.8h, v4.8h, v24.8h
neg v25.8h, v24.8h
smax v4.8h, v4.8h, v25.8h
and v4.16b, v4.16b, v26.16b
add v16.8h, v16.8h, v4.8h
sub v0.8h, v0.8h, v4.8h
mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
movi v5.8h, #0
smin v0.8h, v0.8h, v4.8h
smin v16.8h, v16.8h, v4.8h
smax v0.8h, v0.8h, v5.8h
smax v16.8h, v16.8h, v5.8h
.endm
function ff_h264_v_loop_filter_chroma_neon_10, export=1
h264_loop_filter_start_10
mov x10, x0
sub x0, x0, x1, lsl #1
ld1 {v18.8h}, [x0 ], x1
ld1 {v0.8h}, [x10], x1
ld1 {v16.8h}, [x0 ], x1
ld1 {v2.8h}, [x10]
h264_loop_filter_chroma_10
sub x0, x10, x1, lsl #1
st1 {v16.8h}, [x0], x1
st1 {v0.8h}, [x0], x1
9:
ret
endfunc
function ff_h264_h_loop_filter_chroma_neon_10, export=1
h264_loop_filter_start_10
sub x0, x0, #4 // access the 2nd left pixel
h_loop_filter_chroma420_10:
add x10, x0, x1, lsl #2
ld1 {v18.d}[0], [x0 ], x1
ld1 {v18.d}[1], [x10], x1
ld1 {v16.d}[0], [x0 ], x1
ld1 {v16.d}[1], [x10], x1
ld1 {v0.d}[0], [x0 ], x1
ld1 {v0.d}[1], [x10], x1
ld1 {v2.d}[0], [x0 ], x1
ld1 {v2.d}[1], [x10], x1
transpose_4x8H v18, v16, v0, v2, v28, v29, v30, v31
h264_loop_filter_chroma_10
transpose_4x8H v18, v16, v0, v2, v28, v29, v30, v31
sub x0, x10, x1, lsl #3
st1 {v18.d}[0], [x0], x1
st1 {v16.d}[0], [x0], x1
st1 {v0.d}[0], [x0], x1
st1 {v2.d}[0], [x0], x1
st1 {v18.d}[1], [x0], x1
st1 {v16.d}[1], [x0], x1
st1 {v0.d}[1], [x0], x1
st1 {v2.d}[1], [x0], x1
9:
ret
endfunc
function ff_h264_h_loop_filter_chroma422_neon_10, export=1
h264_loop_filter_start_10
add x5, x0, x1
sub x0, x0, #4
add x1, x1, x1
mov x7, x30
bl h_loop_filter_chroma420_10
mov x30, x7
sub x0, x5, #4
mov v24.s[0], w6
b h_loop_filter_chroma420_10
endfunc
.macro h264_loop_filter_chroma_intra_10
uabd v26.8h, v16.8h, v17.8h // abs(p0 - q0)
uabd v27.8h, v18.8h, v16.8h // abs(p1 - p0)
uabd v28.8h, v19.8h, v17.8h // abs(q1 - q0)
cmhi v26.8h, v30.8h, v26.8h // < alpha
cmhi v27.8h, v31.8h, v27.8h // < beta
cmhi v28.8h, v31.8h, v28.8h // < beta
and v26.16b, v26.16b, v27.16b
and v26.16b, v26.16b, v28.16b
mov x2, v26.d[0]
mov x3, v26.d[1]
shl v4.8h, v18.8h, #1
shl v6.8h, v19.8h, #1
adds x2, x2, x3
b.eq 9f
add v20.8h, v16.8h, v19.8h
add v22.8h, v17.8h, v18.8h
add v20.8h, v20.8h, v4.8h
add v22.8h, v22.8h, v6.8h
urshr v24.8h, v20.8h, #2
urshr v25.8h, v22.8h, #2
bit v16.16b, v24.16b, v26.16b
bit v17.16b, v25.16b, v26.16b
.endm
function ff_h264_v_loop_filter_chroma_intra_neon_10, export=1
h264_loop_filter_start_intra_10
mov x9, x0
sub x0, x0, x1, lsl #1
ld1 {v18.8h}, [x0], x1
ld1 {v17.8h}, [x9], x1
ld1 {v16.8h}, [x0], x1
ld1 {v19.8h}, [x9]
h264_loop_filter_chroma_intra_10
sub x0, x9, x1, lsl #1
st1 {v16.8h}, [x0], x1
st1 {v17.8h}, [x0], x1
9:
ret
endfunc
function ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10, export=1
h264_loop_filter_start_intra_10
sub x4, x0, #4
sub x0, x0, #2
add x9, x4, x1, lsl #1
ld1 {v18.8h}, [x4], x1
ld1 {v17.8h}, [x9], x1
ld1 {v16.8h}, [x4], x1
ld1 {v19.8h}, [x9], x1
transpose_4x8H v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra_10
st2 {v16.h,v17.h}[0], [x0], x1
st2 {v16.h,v17.h}[1], [x0], x1
st2 {v16.h,v17.h}[2], [x0], x1
st2 {v16.h,v17.h}[3], [x0], x1
9:
ret
endfunc
function ff_h264_h_loop_filter_chroma_intra_neon_10, export=1
h264_loop_filter_start_intra_10
sub x4, x0, #4
sub x0, x0, #2
h_loop_filter_chroma420_intra_10:
add x9, x4, x1, lsl #2
ld1 {v18.4h}, [x4], x1
ld1 {v18.d}[1], [x9], x1
ld1 {v16.4h}, [x4], x1
ld1 {v16.d}[1], [x9], x1
ld1 {v17.4h}, [x4], x1
ld1 {v17.d}[1], [x9], x1
ld1 {v19.4h}, [x4], x1
ld1 {v19.d}[1], [x9], x1
transpose_4x8H v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra_10
st2 {v16.h,v17.h}[0], [x0], x1
st2 {v16.h,v17.h}[1], [x0], x1
st2 {v16.h,v17.h}[2], [x0], x1
st2 {v16.h,v17.h}[3], [x0], x1
st2 {v16.h,v17.h}[4], [x0], x1
st2 {v16.h,v17.h}[5], [x0], x1
st2 {v16.h,v17.h}[6], [x0], x1
st2 {v16.h,v17.h}[7], [x0], x1
9:
ret
endfunc
function ff_h264_h_loop_filter_chroma422_intra_neon_10, export=1
h264_loop_filter_start_intra_10
sub x4, x0, #4
add x5, x0, x1, lsl #3
sub x0, x0, #2
mov x7, x30
bl h_loop_filter_chroma420_intra_10
mov x4, x9
sub x0, x5, #2
mov x30, x7
b h_loop_filter_chroma420_intra_10
endfunc

Просмотреть файл

@ -24,6 +24,7 @@
function ff_h264_idct_add_neon, export=1
.L_ff_h264_idct_add_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.4H, v1.4H, v2.4H, v3.4H}, [x1]
sxtw x2, w2
movi v30.8H, #0
@ -79,6 +80,7 @@ endfunc
function ff_h264_idct_dc_add_neon, export=1
.L_ff_h264_idct_dc_add_neon:
AARCH64_VALID_CALL_TARGET
sxtw x2, w2
mov w3, #0
ld1r {v2.8H}, [x1]
@ -266,6 +268,7 @@ endfunc
function ff_h264_idct8_add_neon, export=1
.L_ff_h264_idct8_add_neon:
AARCH64_VALID_CALL_TARGET
movi v19.8H, #0
sxtw x2, w2
ld1 {v24.8H, v25.8H}, [x1]
@ -330,6 +333,7 @@ endfunc
function ff_h264_idct8_dc_add_neon, export=1
.L_ff_h264_idct8_dc_add_neon:
AARCH64_VALID_CALL_TARGET
mov w3, #0
sxtw x2, w2
ld1r {v31.8H}, [x1]

Просмотреть файл

@ -45,42 +45,84 @@ void ff_pred8x8_0lt_dc_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_l00_dc_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_0l0_dc_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred16x16_vert_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred16x16_hor_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred16x16_plane_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred16x16_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred16x16_top_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_vert_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_hor_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_plane_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_128_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_left_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_top_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_l0t_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_0lt_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_l00_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_0l0_dc_neon_10(uint8_t *src, ptrdiff_t stride);
static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id,
const int bit_depth,
const int chroma_format_idc)
{
const int high_depth = bit_depth > 8;
if (high_depth)
return;
if (chroma_format_idc <= 1) {
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon;
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon;
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon;
h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon;
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
codec_id != AV_CODEC_ID_VP8) {
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon;
h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon;
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon;
h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon;
h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon;
h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon;
h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon;
if (bit_depth == 8) {
if (chroma_format_idc <= 1) {
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon;
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon;
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon;
h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon;
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
codec_id != AV_CODEC_ID_VP8) {
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon;
h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon;
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon;
h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon;
h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon;
h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon;
h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon;
}
}
}
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon;
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon;
h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon;
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon;
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon;
if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon;
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon;
h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon;
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon;
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon;
if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon;
}
if (bit_depth == 10) {
if (chroma_format_idc <= 1) {
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon_10;
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon_10;
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon_10;
h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon_10;
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
codec_id != AV_CODEC_ID_VP8) {
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon_10;
h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon_10;
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon_10;
h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon_10;
h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon_10;
h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon_10;
h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon_10;
}
}
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon_10;
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon_10;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon_10;
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon_10;
if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon_10;
}
}
av_cold void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id,

Просмотреть файл

@ -81,8 +81,8 @@ function ff_pred16x16_dc_neon, export=1
.L_pred16x16_dc_end:
mov w3, #8
6: st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x0], x1
subs w3, w3, #1
st1 {v0.16b}, [x0], x1
b.ne 6b
ret
endfunc
@ -91,8 +91,8 @@ function ff_pred16x16_hor_neon, export=1
sub x2, x0, #1
mov w3, #16
1: ld1r {v0.16b}, [x2], x1
st1 {v0.16b}, [x0], x1
subs w3, w3, #1
st1 {v0.16b}, [x0], x1
b.ne 1b
ret
endfunc
@ -102,9 +102,9 @@ function ff_pred16x16_vert_neon, export=1
add x1, x1, x1
ld1 {v0.16b}, [x2], x1
mov w3, #8
1: st1 {v0.16b}, [x0], x1
1: subs w3, w3, #1
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x2], x1
subs w3, w3, #1
b.ne 1b
ret
endfunc
@ -158,8 +158,8 @@ function ff_pred16x16_plane_neon, export=1
add v1.8h, v1.8h, v2.8h
sqshrun2 v0.16b, v1.8h, #5
add v1.8h, v1.8h, v3.8h
st1 {v0.16b}, [x0], x1
subs w3, w3, #1
st1 {v0.16b}, [x0], x1
b.ne 1b
ret
endfunc
@ -175,8 +175,8 @@ function ff_pred8x8_hor_neon, export=1
sub x2, x0, #1
mov w3, #8
1: ld1r {v0.8b}, [x2], x1
st1 {v0.8b}, [x0], x1
subs w3, w3, #1
st1 {v0.8b}, [x0], x1
b.ne 1b
ret
endfunc
@ -186,9 +186,9 @@ function ff_pred8x8_vert_neon, export=1
lsl x1, x1, #1
ld1 {v0.8b}, [x2], x1
mov w3, #4
1: st1 {v0.8b}, [x0], x1
1: subs w3, w3, #1
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x2], x1
subs w3, w3, #1
b.ne 1b
ret
endfunc
@ -232,9 +232,9 @@ function ff_pred8x8_plane_neon, export=1
mov w3, #8
1:
sqshrun v0.8b, v1.8h, #5
subs w3, w3, #1
add v1.8h, v1.8h, v2.8h
st1 {v0.8b}, [x0], x1
subs w3, w3, #1
b.ne 1b
ret
endfunc
@ -290,9 +290,9 @@ function ff_pred8x8_dc_neon, export=1
.L_pred8x8_dc_end:
mov w3, #4
add x2, x0, x1, lsl #2
6: st1 {v0.8b}, [x0], x1
6: subs w3, w3, #1
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x2], x1
subs w3, w3, #1
b.ne 6b
ret
endfunc
@ -359,3 +359,407 @@ function ff_pred8x8_0l0_dc_neon, export=1
dup v1.8b, v1.b[0]
b .L_pred8x8_dc_end
endfunc
.macro ldcol.16 rd, rs, rt, n=4, hi=0
.if \n >= 4 && \hi == 0
ld1 {\rd\().h}[0], [\rs], \rt
ld1 {\rd\().h}[1], [\rs], \rt
ld1 {\rd\().h}[2], [\rs], \rt
ld1 {\rd\().h}[3], [\rs], \rt
.endif
.if \n == 8 || \hi == 1
ld1 {\rd\().h}[4], [\rs], \rt
ld1 {\rd\().h}[5], [\rs], \rt
ld1 {\rd\().h}[6], [\rs], \rt
ld1 {\rd\().h}[7], [\rs], \rt
.endif
.endm
// slower than C
/*
function ff_pred16x16_128_dc_neon_10, export=1
movi v0.8h, #2, lsl #8 // 512, 1 << (bit_depth - 1)
b .L_pred16x16_dc_10_end
endfunc
*/
function ff_pred16x16_top_dc_neon_10, export=1
sub x2, x0, x1
ld1 {v0.8h, v1.8h}, [x2]
add v0.8h, v0.8h, v1.8h
addv h0, v0.8h
urshr v0.4h, v0.4h, #4
dup v0.8h, v0.h[0]
b .L_pred16x16_dc_10_end
endfunc
// slower than C
/*
function ff_pred16x16_left_dc_neon_10, export=1
sub x2, x0, #2 // access to the "left" column
ldcol.16 v0, x2, x1, 8
ldcol.16 v1, x2, x1, 8 // load "left" column
add v0.8h, v0.8h, v1.8h
addv h0, v0.8h
urshr v0.4h, v0.4h, #4
dup v0.8h, v0.h[0]
b .L_pred16x16_dc_10_end
endfunc
*/
function ff_pred16x16_dc_neon_10, export=1
sub x2, x0, x1 // access to the "top" row
sub x3, x0, #2 // access to the "left" column
ld1 {v0.8h, v1.8h}, [x2]
ldcol.16 v2, x3, x1, 8
ldcol.16 v3, x3, x1, 8 // load pixels in "top" row and "left" col
add v0.8h, v0.8h, v1.8h
add v2.8h, v2.8h, v3.8h
add v0.8h, v0.8h, v2.8h
addv h0, v0.8h
urshr v0.4h, v0.4h, #5
dup v0.8h, v0.h[0]
.L_pred16x16_dc_10_end:
mov v1.16b, v0.16b
mov w3, #8
6: st1 {v0.8h, v1.8h}, [x0], x1
subs w3, w3, #1
st1 {v0.8h, v1.8h}, [x0], x1
b.ne 6b
ret
endfunc
function ff_pred16x16_hor_neon_10, export=1
sub x2, x0, #2
add x3, x0, #16
mov w4, #16
1: ld1r {v0.8h}, [x2], x1
subs w4, w4, #1
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x3], x1
b.ne 1b
ret
endfunc
function ff_pred16x16_vert_neon_10, export=1
sub x2, x0, x1
add x1, x1, x1
ld1 {v0.8h, v1.8h}, [x2], x1
mov w3, #8
1: subs w3, w3, #1
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x2], x1
b.ne 1b
ret
endfunc
function ff_pred16x16_plane_neon_10, export=1
sub x3, x0, x1
movrel x4, p16weight
add x2, x3, #16
sub x3, x3, #2
ld1 {v0.8h}, [x3]
ld1 {v2.8h}, [x2], x1
ldcol.16 v1, x3, x1, 8
add x3, x3, x1
ldcol.16 v3, x3, x1, 8
rev64 v16.8h, v0.8h
rev64 v17.8h, v1.8h
ext v0.16b, v16.16b, v16.16b, #8
ext v1.16b, v17.16b, v17.16b, #8
add v7.8h, v2.8h, v3.8h
sub v2.8h, v2.8h, v0.8h
sub v3.8h, v3.8h, v1.8h
ld1 {v0.8h}, [x4]
mul v2.8h, v2.8h, v0.8h
mul v3.8h, v3.8h, v0.8h
addp v2.8h, v2.8h, v3.8h
addp v2.8h, v2.8h, v2.8h
addp v2.4h, v2.4h, v2.4h
sshll v3.4s, v2.4h, #2
saddw v2.4s, v3.4s, v2.4h
rshrn v4.4h, v2.4s, #6
trn2 v5.4h, v4.4h, v4.4h
add v2.4h, v4.4h, v5.4h
shl v3.4h, v2.4h, #3
ext v7.16b, v7.16b, v7.16b, #14
sub v3.4h, v3.4h, v2.4h // 7 * (b + c)
add v7.4h, v7.4h, v0.4h
shl v2.4h, v7.4h, #4
ssubl v2.4s, v2.4h, v3.4h
shl v3.4h, v4.4h, #4
ext v0.16b, v0.16b, v0.16b, #14
ssubl v6.4s, v5.4h, v3.4h
mov v0.h[0], wzr
mul v0.8h, v0.8h, v4.h[0]
dup v16.4s, v2.s[0]
dup v17.4s, v2.s[0]
dup v2.8h, v4.h[0]
dup v3.4s, v6.s[0]
shl v2.8h, v2.8h, #3
saddw v16.4s, v16.4s, v0.4h
saddw2 v17.4s, v17.4s, v0.8h
saddw v3.4s, v3.4s, v2.4h
mov w3, #16
mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
1:
sqshrun v0.4h, v16.4s, #5
sqshrun2 v0.8h, v17.4s, #5
saddw v16.4s, v16.4s, v2.4h
saddw v17.4s, v17.4s, v2.4h
sqshrun v1.4h, v16.4s, #5
sqshrun2 v1.8h, v17.4s, #5
add v16.4s, v16.4s, v3.4s
add v17.4s, v17.4s, v3.4s
subs w3, w3, #1
smin v0.8h, v0.8h, v4.8h
smin v1.8h, v1.8h, v4.8h
st1 {v0.8h, v1.8h}, [x0], x1
b.ne 1b
ret
endfunc
function ff_pred8x8_hor_neon_10, export=1
sub x2, x0, #2
mov w3, #8
1: ld1r {v0.8h}, [x2], x1
subs w3, w3, #1
st1 {v0.8h}, [x0], x1
b.ne 1b
ret
endfunc
function ff_pred8x8_vert_neon_10, export=1
sub x2, x0, x1
lsl x1, x1, #1
ld1 {v0.8h}, [x2], x1
mov w3, #4
1: subs w3, w3, #1
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x2], x1
b.ne 1b
ret
endfunc
function ff_pred8x8_plane_neon_10, export=1
sub x3, x0, x1
movrel x4, p8weight
movrel x5, p16weight
add x2, x3, #8
sub x3, x3, #2
ld1 {v0.d}[0], [x3]
ld1 {v2.d}[0], [x2], x1
ldcol.16 v0, x3, x1, hi=1
add x3, x3, x1
ldcol.16 v3, x3, x1, 4
add v7.8h, v2.8h, v3.8h
rev64 v0.8h, v0.8h
trn1 v2.2d, v2.2d, v3.2d
sub v2.8h, v2.8h, v0.8h
ld1 {v6.8h}, [x4]
mul v2.8h, v2.8h, v6.8h
ld1 {v0.8h}, [x5]
saddlp v2.4s, v2.8h
addp v2.4s, v2.4s, v2.4s
shl v3.4s, v2.4s, #4
add v2.4s, v3.4s, v2.4s
rshrn v5.4h, v2.4s, #5
addp v2.4h, v5.4h, v5.4h
shl v3.4h, v2.4h, #1
add v3.4h, v3.4h, v2.4h
rev64 v7.4h, v7.4h
add v7.4h, v7.4h, v0.4h
shl v2.4h, v7.4h, #4
ssubl v2.4s, v2.4h, v3.4h
ext v0.16b, v0.16b, v0.16b, #14
mov v0.h[0], wzr
mul v0.8h, v0.8h, v5.h[0]
dup v1.4s, v2.s[0]
dup v2.4s, v2.s[0]
dup v3.8h, v5.h[1]
saddw v1.4s, v1.4s, v0.4h
saddw2 v2.4s, v2.4s, v0.8h
mov w3, #8
mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
1:
sqshrun v0.4h, v1.4s, #5
sqshrun2 v0.8h, v2.4s, #5
saddw v1.4s, v1.4s, v3.4h
saddw v2.4s, v2.4s, v3.4h
subs w3, w3, #1
smin v0.8h, v0.8h, v4.8h
st1 {v0.8h}, [x0], x1
b.ne 1b
ret
endfunc
function ff_pred8x8_128_dc_neon_10, export=1
movi v0.8h, #2, lsl #8 // 512, 1 << (bit_depth - 1)
movi v1.8h, #2, lsl #8
b .L_pred8x8_dc_10_end
endfunc
function ff_pred8x8_top_dc_neon_10, export=1
sub x2, x0, x1
ld1 {v0.8h}, [x2]
addp v0.8h, v0.8h, v0.8h
addp v0.4h, v0.4h, v0.4h
zip1 v0.4h, v0.4h, v0.4h
urshr v2.4h, v0.4h, #2
zip1 v0.8h, v2.8h, v2.8h
zip1 v1.8h, v2.8h, v2.8h
b .L_pred8x8_dc_10_end
endfunc
function ff_pred8x8_left_dc_neon_10, export=1
sub x2, x0, #2
ldcol.16 v0, x2, x1, 8
addp v0.8h, v0.8h, v0.8h
addp v0.4h, v0.4h, v0.4h
urshr v2.4h, v0.4h, #2
dup v1.8h, v2.h[1]
dup v0.8h, v2.h[0]
b .L_pred8x8_dc_10_end
endfunc
function ff_pred8x8_dc_neon_10, export=1
sub x2, x0, x1
sub x3, x0, #2
ld1 {v0.8h}, [x2]
ldcol.16 v1, x3, x1, 8
addp v0.8h, v0.8h, v0.8h
addp v1.8h, v1.8h, v1.8h
trn1 v2.2s, v0.2s, v1.2s
trn2 v3.2s, v0.2s, v1.2s
addp v4.4h, v2.4h, v3.4h
addp v5.4h, v4.4h, v4.4h
urshr v6.4h, v5.4h, #3
urshr v7.4h, v4.4h, #2
dup v0.8h, v6.h[0]
dup v2.8h, v7.h[2]
dup v1.8h, v7.h[3]
dup v3.8h, v6.h[1]
zip1 v0.2d, v0.2d, v2.2d
zip1 v1.2d, v1.2d, v3.2d
.L_pred8x8_dc_10_end:
mov w3, #4
add x2, x0, x1, lsl #2
6: st1 {v0.8h}, [x0], x1
subs w3, w3, #1
st1 {v1.8h}, [x2], x1
b.ne 6b
ret
endfunc
function ff_pred8x8_l0t_dc_neon_10, export=1
sub x2, x0, x1
sub x3, x0, #2
ld1 {v0.8h}, [x2]
ldcol.16 v1, x3, x1, 4
addp v0.8h, v0.8h, v0.8h
addp v1.4h, v1.4h, v1.4h
addp v0.4h, v0.4h, v0.4h
addp v1.4h, v1.4h, v1.4h
add v1.4h, v1.4h, v0.4h
urshr v2.4h, v0.4h, #2
urshr v3.4h, v1.4h, #3 // the pred4x4 part
dup v4.4h, v3.h[0]
dup v5.4h, v2.h[0]
dup v6.4h, v2.h[1]
zip1 v0.2d, v4.2d, v6.2d
zip1 v1.2d, v5.2d, v6.2d
b .L_pred8x8_dc_10_end
endfunc
function ff_pred8x8_l00_dc_neon_10, export=1
sub x2, x0, #2
ldcol.16 v0, x2, x1, 4
addp v0.4h, v0.4h, v0.4h
addp v0.4h, v0.4h, v0.4h
urshr v0.4h, v0.4h, #2
movi v1.8h, #2, lsl #8 // 512
dup v0.8h, v0.h[0]
b .L_pred8x8_dc_10_end
endfunc
function ff_pred8x8_0lt_dc_neon_10, export=1
add x3, x0, x1, lsl #2
sub x2, x0, x1
sub x3, x3, #2
ld1 {v0.8h}, [x2]
ldcol.16 v1, x3, x1, hi=1
addp v0.8h, v0.8h, v0.8h
addp v1.8h, v1.8h, v1.8h
addp v0.4h, v0.4h, v0.4h
addp v1.4h, v1.4h, v1.4h
zip1 v0.2s, v0.2s, v1.2s
add v1.4h, v0.4h, v1.4h
urshr v2.4h, v0.4h, #2
urshr v3.4h, v1.4h, #3
dup v4.4h, v2.h[0]
dup v5.4h, v2.h[3]
dup v6.4h, v2.h[1]
dup v7.4h, v3.h[1]
zip1 v0.2d, v4.2d, v6.2d
zip1 v1.2d, v5.2d, v7.2d
b .L_pred8x8_dc_10_end
endfunc
function ff_pred8x8_0l0_dc_neon_10, export=1
add x2, x0, x1, lsl #2
sub x2, x2, #2
ldcol.16 v1, x2, x1, 4
addp v2.8h, v1.8h, v1.8h
addp v2.4h, v2.4h, v2.4h
urshr v1.4h, v2.4h, #2
movi v0.8h, #2, lsl #8 // 512
dup v1.8h, v1.h[0]
b .L_pred8x8_dc_10_end
endfunc

Просмотреть файл

@ -19,6 +19,7 @@
#ifndef AVCODEC_AARCH64_IDCT_H
#define AVCODEC_AARCH64_IDCT_H
#include <stddef.h>
#include <stdint.h>
void ff_simple_idct_neon(int16_t *data);

Просмотреть файл

@ -27,19 +27,29 @@
#include "libavcodec/idctdsp.h"
#include "idct.h"
void ff_put_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
void ff_put_signed_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
void ff_add_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
av_cold void ff_idctdsp_init_aarch64(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth)
{
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags) && !avctx->lowres && !high_bit_depth) {
if (avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
avctx->idct_algo == FF_IDCT_SIMPLENEON) {
c->idct_put = ff_simple_idct_put_neon;
c->idct_add = ff_simple_idct_add_neon;
c->idct = ff_simple_idct_neon;
c->perm_type = FF_IDCT_PERM_PARTTRANS;
if (have_neon(cpu_flags)) {
if (!avctx->lowres && !high_bit_depth) {
if (avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
avctx->idct_algo == FF_IDCT_SIMPLENEON) {
c->idct_put = ff_simple_idct_put_neon;
c->idct_add = ff_simple_idct_add_neon;
c->idct = ff_simple_idct_neon;
c->perm_type = FF_IDCT_PERM_PARTTRANS;
}
}
c->add_pixels_clamped = ff_add_pixels_clamped_neon;
c->put_pixels_clamped = ff_put_pixels_clamped_neon;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_neon;
}
}

Просмотреть файл

@ -0,0 +1,130 @@
/*
* IDCT AArch64 NEON optimisations
*
* Copyright (c) 2022 Ben Avison <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
// Clamp 16-bit signed block coefficients to unsigned 8-bit
// On entry:
// x0 -> array of 64x 16-bit coefficients
// x1 -> 8-bit results
// x2 = row stride for results, bytes
function ff_put_pixels_clamped_neon, export=1
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #64
ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x0]
sqxtun v0.8b, v0.8h
sqxtun v1.8b, v1.8h
sqxtun v2.8b, v2.8h
sqxtun v3.8b, v3.8h
sqxtun v4.8b, v4.8h
st1 {v0.8b}, [x1], x2
sqxtun v0.8b, v5.8h
st1 {v1.8b}, [x1], x2
sqxtun v1.8b, v6.8h
st1 {v2.8b}, [x1], x2
sqxtun v2.8b, v7.8h
st1 {v3.8b}, [x1], x2
st1 {v4.8b}, [x1], x2
st1 {v0.8b}, [x1], x2
st1 {v1.8b}, [x1], x2
st1 {v2.8b}, [x1]
ret
endfunc
// Clamp 16-bit signed block coefficients to signed 8-bit (biased by 128)
// On entry:
// x0 -> array of 64x 16-bit coefficients
// x1 -> 8-bit results
// x2 = row stride for results, bytes
function ff_put_signed_pixels_clamped_neon, export=1
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #64
movi v4.8b, #128
ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0]
sqxtn v0.8b, v0.8h
sqxtn v1.8b, v1.8h
sqxtn v2.8b, v2.8h
sqxtn v3.8b, v3.8h
sqxtn v5.8b, v16.8h
add v0.8b, v0.8b, v4.8b
sqxtn v6.8b, v17.8h
add v1.8b, v1.8b, v4.8b
sqxtn v7.8b, v18.8h
add v2.8b, v2.8b, v4.8b
sqxtn v16.8b, v19.8h
add v3.8b, v3.8b, v4.8b
st1 {v0.8b}, [x1], x2
add v0.8b, v5.8b, v4.8b
st1 {v1.8b}, [x1], x2
add v1.8b, v6.8b, v4.8b
st1 {v2.8b}, [x1], x2
add v2.8b, v7.8b, v4.8b
st1 {v3.8b}, [x1], x2
add v3.8b, v16.8b, v4.8b
st1 {v0.8b}, [x1], x2
st1 {v1.8b}, [x1], x2
st1 {v2.8b}, [x1], x2
st1 {v3.8b}, [x1]
ret
endfunc
// Add 16-bit signed block coefficients to unsigned 8-bit
// On entry:
// x0 -> array of 64x 16-bit coefficients
// x1 -> 8-bit input and results
// x2 = row stride for 8-bit input and results, bytes
function ff_add_pixels_clamped_neon, export=1
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #64
mov x3, x1
ld1 {v4.8b}, [x1], x2
ld1 {v5.8b}, [x1], x2
ld1 {v6.8b}, [x1], x2
ld1 {v7.8b}, [x1], x2
ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0]
uaddw v0.8h, v0.8h, v4.8b
uaddw v1.8h, v1.8h, v5.8b
uaddw v2.8h, v2.8h, v6.8b
ld1 {v4.8b}, [x1], x2
uaddw v3.8h, v3.8h, v7.8b
ld1 {v5.8b}, [x1], x2
sqxtun v0.8b, v0.8h
ld1 {v6.8b}, [x1], x2
sqxtun v1.8b, v1.8h
ld1 {v7.8b}, [x1]
sqxtun v2.8b, v2.8h
sqxtun v3.8b, v3.8h
uaddw v4.8h, v16.8h, v4.8b
st1 {v0.8b}, [x3], x2
uaddw v0.8h, v17.8h, v5.8b
st1 {v1.8b}, [x3], x2
uaddw v1.8h, v18.8h, v6.8b
st1 {v2.8b}, [x3], x2
uaddw v2.8h, v19.8h, v7.8b
sqxtun v4.8b, v4.8h
sqxtun v0.8b, v0.8h
st1 {v3.8b}, [x3], x2
sqxtun v1.8b, v1.8h
sqxtun v2.8b, v2.8h
st1 {v4.8b}, [x3], x2
st1 {v0.8b}, [x3], x2
st1 {v1.8b}, [x3], x2
st1 {v2.8b}, [x3]
ret
endfunc

Просмотреть файл

@ -25,6 +25,7 @@
function ff_imdct_half_neon, export=1
sub sp, sp, #32
stp x19, x20, [sp]
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #16]
mov x12, #1
ldr w14, [x0, #28] // mdct_bits
@ -121,6 +122,7 @@ function ff_imdct_half_neon, export=1
ldp x19, x20, [sp]
ldr x30, [sp, #16]
AARCH64_VALIDATE_LINK_REGISTER
add sp, sp, #32
ret
@ -129,6 +131,7 @@ endfunc
function ff_imdct_calc_neon, export=1
sub sp, sp, #32
stp x19, x20, [sp]
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #16]
ldr w3, [x0, #28] // mdct_bits
mov x19, #1
@ -160,8 +163,10 @@ function ff_imdct_calc_neon, export=1
subs x19, x19, #16
b.gt 1b
ldp x19, x20, [sp], #16
ldr x30, [sp], #16
ldp x19, x20, [sp]
ldr x30, [sp, #16]
AARCH64_VALIDATE_LINK_REGISTER
add sp, sp, #32
ret
endfunc
@ -170,6 +175,7 @@ endfunc
function ff_mdct_calc_neon, export=1
sub sp, sp, #32
stp x19, x20, [sp]
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #16]
mov x12, #1
@ -317,7 +323,10 @@ function ff_mdct_calc_neon, export=1
st2 {v4.2s,v5.2s}, [x0]
st2 {v6.2s,v7.2s}, [x8]
ldp x19, x20, [sp], #16
ldr x30, [sp], #16
ldp x19, x20, [sp]
ldr x30, [sp, #16]
AARCH64_VALIDATE_LINK_REGISTER
add sp, sp, #32
ret
endfunc

Просмотреть файл

@ -4,48 +4,56 @@
## License, v. 2.0. If a copy of the MPL was not distributed with this
## file, You can obtain one at http://mozilla.org/MPL/2.0/.
SOURCES += [
'h264chroma_init_aarch64.c',
'h264cmc_neon.S',
'h264dsp_init_aarch64.c',
'h264dsp_neon.S',
'h264idct_neon.S',
'h264pred_init.c',
'h264pred_neon.S',
'hpeldsp_init_aarch64.c',
'hpeldsp_neon.S',
'idctdsp_init_aarch64.c',
'mdct_neon.S',
'mpegaudiodsp_init.c',
'mpegaudiodsp_neon.S',
'neon.S',
'simple_idct_neon.S',
'videodsp.S',
'videodsp_init.c',
'vp8dsp_init_aarch64.c',
'vp8dsp_neon.S',
'vp9dsp_init_10bpp_aarch64.c',
'vp9dsp_init_12bpp_aarch64.c',
'vp9dsp_init_aarch64.c',
'vp9itxfm_16bpp_neon.S',
'vp9itxfm_neon.S',
'vp9lpf_16bpp_neon.S',
'vp9lpf_neon.S',
'vp9mc_16bpp_neon.S',
'vp9mc_aarch64.S',
'vp9mc_neon.S',
]
if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
SOURCES += [
'fft_init_aarch64.c',
'fft_neon.S',
'h264chroma_init_aarch64.c',
'h264cmc_neon.S',
'h264dsp_init_aarch64.c',
'h264dsp_neon.S',
'h264idct_neon.S',
'h264pred_init.c',
'h264pred_neon.S',
'hpeldsp_init_aarch64.c',
'hpeldsp_neon.S',
'idctdsp_init_aarch64.c',
'idctdsp_neon.S',
'mdct_neon.S',
'mpegaudiodsp_init.c',
'mpegaudiodsp_neon.S',
'neon.S',
'simple_idct_neon.S',
'videodsp.S',
'videodsp_init.c',
'vp8dsp_init_aarch64.c',
'vp8dsp_neon.S',
'vp9dsp_init_10bpp_aarch64.c',
'vp9dsp_init_12bpp_aarch64.c',
'vp9dsp_init_aarch64.c',
'vp9itxfm_16bpp_neon.S',
'vp9itxfm_neon.S',
'vp9lpf_16bpp_neon.S',
'vp9lpf_neon.S',
'vp9mc_16bpp_neon.S',
'vp9mc_aarch64.S',
'vp9mc_neon.S',
]
else:
SOURCES += [
'fft_init_aarch64.c',
'fft_neon.S',
'idctdsp_init_aarch64.c',
'idctdsp_neon.S',
'mpegaudiodsp_init.c',
'mpegaudiodsp_neon.S',
'simple_idct_neon.S',
]
if CONFIG['OS_ARCH'] == 'WINNT':
USE_INTEGRATED_CLANGCL_AS = True
DEFINES['EXTERN_ASM'] = ''
if CONFIG['MOZ_LIBAV_FFT']:
SOURCES += [
'fft_init_aarch64.c',
'fft_neon.S',
]
FINAL_LIBRARY = 'mozavcodec'
include('/media/ffvpx/ffvpxcommon.mozbuild')

Просмотреть файл

@ -109,12 +109,25 @@
trn2 \r5\().4H, \r0\().4H, \r1\().4H
trn1 \r6\().4H, \r2\().4H, \r3\().4H
trn2 \r7\().4H, \r2\().4H, \r3\().4H
trn1 \r0\().2S, \r4\().2S, \r6\().2S
trn2 \r2\().2S, \r4\().2S, \r6\().2S
trn1 \r1\().2S, \r5\().2S, \r7\().2S
trn2 \r3\().2S, \r5\().2S, \r7\().2S
.endm
.macro transpose_4x8H r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().8H, \r0\().8H, \r1\().8H
trn2 \t5\().8H, \r0\().8H, \r1\().8H
trn1 \t6\().8H, \r2\().8H, \r3\().8H
trn2 \t7\().8H, \r2\().8H, \r3\().8H
trn1 \r0\().4S, \t4\().4S, \t6\().4S
trn2 \r2\().4S, \t4\().4S, \t6\().4S
trn1 \r1\().4S, \t5\().4S, \t7\().4S
trn2 \r3\().4S, \t5\().4S, \t7\().4S
.endm
.macro transpose_8x8H r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
trn1 \r8\().8H, \r0\().8H, \r1\().8H
trn2 \r9\().8H, \r0\().8H, \r1\().8H

Просмотреть файл

@ -58,7 +58,7 @@ endconst
.endm
.macro idct_end
br x10
ret x10
.endm
.macro smull1 a, b, c

Просмотреть файл

@ -21,10 +21,28 @@
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/aarch64/cpu.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/vc1dsp.h"
#include "config.h"
void ff_vc1_inv_trans_8x8_neon(int16_t *block);
void ff_vc1_inv_trans_8x4_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_4x8_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_4x4_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_8x8_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_8x4_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_4x8_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_4x4_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_v_loop_filter4_neon(uint8_t *src, ptrdiff_t stride, int pq);
void ff_vc1_h_loop_filter4_neon(uint8_t *src, ptrdiff_t stride, int pq);
void ff_vc1_v_loop_filter8_neon(uint8_t *src, ptrdiff_t stride, int pq);
void ff_vc1_h_loop_filter8_neon(uint8_t *src, ptrdiff_t stride, int pq);
void ff_vc1_v_loop_filter16_neon(uint8_t *src, ptrdiff_t stride, int pq);
void ff_vc1_h_loop_filter16_neon(uint8_t *src, ptrdiff_t stride, int pq);
void ff_put_vc1_chroma_mc8_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
int h, int x, int y);
void ff_avg_vc1_chroma_mc8_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
@ -34,14 +52,90 @@ void ff_put_vc1_chroma_mc4_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
void ff_avg_vc1_chroma_mc4_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
int h, int x, int y);
int ff_vc1_unescape_buffer_helper_neon(const uint8_t *src, int size, uint8_t *dst);
static int vc1_unescape_buffer_neon(const uint8_t *src, int size, uint8_t *dst)
{
/* Dealing with starting and stopping, and removing escape bytes, are
* comparatively less time-sensitive, so are more clearly expressed using
* a C wrapper around the assembly inner loop. Note that we assume a
* little-endian machine that supports unaligned loads. */
int dsize = 0;
while (size >= 4)
{
int found = 0;
while (!found && (((uintptr_t) dst) & 7) && size >= 4)
{
found = (AV_RL32(src) &~ 0x03000000) == 0x00030000;
if (!found)
{
*dst++ = *src++;
--size;
++dsize;
}
}
if (!found)
{
int skip = size - ff_vc1_unescape_buffer_helper_neon(src, size, dst);
dst += skip;
src += skip;
size -= skip;
dsize += skip;
while (!found && size >= 4)
{
found = (AV_RL32(src) &~ 0x03000000) == 0x00030000;
if (!found)
{
*dst++ = *src++;
--size;
++dsize;
}
}
}
if (found)
{
*dst++ = *src++;
*dst++ = *src++;
++src;
size -= 3;
dsize += 2;
}
}
while (size > 0)
{
*dst++ = *src++;
--size;
++dsize;
}
return dsize;
}
av_cold void ff_vc1dsp_init_aarch64(VC1DSPContext *dsp)
{
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags)) {
dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_neon;
dsp->vc1_inv_trans_8x4 = ff_vc1_inv_trans_8x4_neon;
dsp->vc1_inv_trans_4x8 = ff_vc1_inv_trans_4x8_neon;
dsp->vc1_inv_trans_4x4 = ff_vc1_inv_trans_4x4_neon;
dsp->vc1_inv_trans_8x8_dc = ff_vc1_inv_trans_8x8_dc_neon;
dsp->vc1_inv_trans_8x4_dc = ff_vc1_inv_trans_8x4_dc_neon;
dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_neon;
dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_neon;
dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_neon;
dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_neon;
dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_neon;
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_neon;
dsp->vc1_v_loop_filter16 = ff_vc1_v_loop_filter16_neon;
dsp->vc1_h_loop_filter16 = ff_vc1_h_loop_filter16_neon;
dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_neon;
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_neon;
dsp->put_no_rnd_vc1_chroma_pixels_tab[1] = ff_put_vc1_chroma_mc4_neon;
dsp->avg_no_rnd_vc1_chroma_pixels_tab[1] = ff_avg_vc1_chroma_mc4_neon;
dsp->vc1_unescape_buffer = vc1_unescape_buffer_neon;
}
}

Просмотреть файл

@ -19,10 +19,11 @@
#include "libavutil/aarch64/asm.S"
function ff_prefetch_aarch64, export=1
1:
subs w2, w2, #2
prfm pldl1strm, [x0]
prfm pldl1strm, [x0, x1]
add x0, x0, x1, lsl #1
b.gt X(ff_prefetch_aarch64)
b.gt 1b
ret
endfunc

Просмотреть файл

@ -1040,7 +1040,7 @@ function \txfm\()16_1d_4x16_pass1_neon
.irp i, 16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31
store \i, x0, #16
.endr
br x14
ret x14
1:
// Special case: For the last input column (x1 == 12),
// which would be stored as the last row in the temp buffer,
@ -1068,7 +1068,7 @@ function \txfm\()16_1d_4x16_pass1_neon
mov v29.16b, v17.16b
mov v30.16b, v18.16b
mov v31.16b, v19.16b
br x14
ret x14
endfunc
// Read a vertical 4x16 slice out of a 16x16 matrix, do a transform on it,
@ -1098,7 +1098,7 @@ function \txfm\()16_1d_4x16_pass2_neon
load_add_store v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
load_add_store v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
br x14
ret x14
endfunc
.endm
@ -1208,7 +1208,7 @@ function vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
.endif
br x15
ret x15
endfunc
function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_10_neon, export=1
@ -1264,7 +1264,7 @@ function idct16_1d_4x16_pass1_quarter_neon
st1 {v23.4s}, [x0], #16
st1 {v27.4s}, [x0], #16
st1 {v31.4s}, [x0], #16
br x14
ret x14
endfunc
function idct16_1d_4x16_pass2_quarter_neon
@ -1286,7 +1286,7 @@ function idct16_1d_4x16_pass2_quarter_neon
load_add_store v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
load_add_store v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
br x14
ret x14
endfunc
function idct16_1d_4x16_pass1_half_neon
@ -1313,7 +1313,7 @@ function idct16_1d_4x16_pass1_half_neon
.irp i, 16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31
store \i, x0, #16
.endr
br x14
ret x14
1:
// Special case: For the second input column (r1 == 4),
// which would be stored as the second row in the temp buffer,
@ -1341,7 +1341,7 @@ function idct16_1d_4x16_pass1_half_neon
mov v21.16b, v17.16b
mov v22.16b, v18.16b
mov v23.16b, v19.16b
br x14
ret x14
endfunc
function idct16_1d_4x16_pass2_half_neon
@ -1364,7 +1364,7 @@ function idct16_1d_4x16_pass2_half_neon
load_add_store v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
load_add_store v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
br x14
ret x14
endfunc
.macro idct16_partial size
@ -1390,7 +1390,7 @@ function idct16x16_\size\()_add_16_neon
add sp, sp, #1024
ldp d8, d9, [sp], 0x10
br x15
ret x15
endfunc
.endm
@ -1729,7 +1729,7 @@ function idct32_1d_4x32_pass1\suffix\()_neon
store_rev v29.4s, v25.4s, v21.4s, v17.4s, v29.16b, v25.16b
store_rev v28.4s, v24.4s, v20.4s, v16.4s, v28.16b, v24.16b
.purgem store_rev
br x14
ret x14
endfunc
// This is mostly the same as 4x32_pass1, but without the transpose,
@ -1849,7 +1849,7 @@ function idct32_1d_4x32_pass2\suffix\()_neon
load_acc_store v24.4s, v25.4s, v26.4s, v27.4s, 1
load_acc_store v28.4s, v29.4s, v30.4s, v31.4s, 1
.purgem load_acc_store
br x14
ret x14
endfunc
.endm
@ -1943,7 +1943,7 @@ function vp9_idct_idct_32x32_add_16_neon
ldp d10, d11, [sp], 0x10
ldp d8, d9, [sp], 0x10
br x15
ret x15
endfunc
function ff_vp9_idct_idct_32x32_add_10_neon, export=1
@ -2009,7 +2009,7 @@ function idct32x32_\size\()_add_16_neon
ldp d10, d11, [sp], 0x10
ldp d8, d9, [sp], 0x10
br x15
ret x15
endfunc
.endm

Просмотреть файл

@ -787,7 +787,7 @@ function \txfm\()16_1d_8x16_pass1_neon
.irp i, 16, 24, 17, 25, 18, 26, 19, 27, 20, 28, 21, 29, 22, 30, 23, 31
store \i, x0, #16
.endr
br x14
ret x14
1:
// Special case: For the last input column (x1 == 8),
// which would be stored as the last row in the temp buffer,
@ -806,7 +806,7 @@ function \txfm\()16_1d_8x16_pass1_neon
mov v29.16b, v21.16b
mov v30.16b, v22.16b
mov v31.16b, v23.16b
br x14
ret x14
endfunc
// Read a vertical 8x16 slice out of a 16x16 matrix, do a transform on it,
@ -834,7 +834,7 @@ function \txfm\()16_1d_8x16_pass2_neon
load_add_store v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v16.8b, v17.8b
load_add_store v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h, v16.8b, v17.8b
br x14
ret x14
endfunc
.endm
@ -925,7 +925,7 @@ function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_neon, export=1
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
.endif
br x15
ret x15
endfunc
.endm
@ -960,7 +960,7 @@ function idct16_1d_8x16_pass1_quarter_neon
.irp i, 24, 25, 26, 27
store \i, x0, x9
.endr
br x14
ret x14
endfunc
function idct16_1d_8x16_pass2_quarter_neon
@ -978,7 +978,7 @@ function idct16_1d_8x16_pass2_quarter_neon
load_add_store v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v16.8b, v17.8b
load_add_store v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h, v16.8b, v17.8b
br x14
ret x14
endfunc
function idct16_1d_8x16_pass1_half_neon
@ -1003,7 +1003,7 @@ function idct16_1d_8x16_pass1_half_neon
.irp i, 24, 25, 26, 27, 28, 29, 30, 31
store \i, x0, x9
.endr
br x14
ret x14
endfunc
function idct16_1d_8x16_pass2_half_neon
@ -1021,7 +1021,7 @@ function idct16_1d_8x16_pass2_half_neon
load_add_store v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v16.8b, v17.8b
load_add_store v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h, v16.8b, v17.8b
br x14
ret x14
endfunc
.macro idct16_partial size
@ -1038,7 +1038,7 @@ function idct16x16_\size\()_add_neon
.endr
add sp, sp, #512
br x15
ret x15
endfunc
.endm
@ -1349,7 +1349,7 @@ function idct32_1d_8x32_pass1\suffix\()_neon
store_rev v25.8h, v17.8h
store_rev v24.8h, v16.8h
.purgem store_rev
br x14
ret x14
endfunc
// This is mostly the same as 8x32_pass1, but without the transpose,
@ -1466,7 +1466,7 @@ function idct32_1d_8x32_pass2\suffix\()_neon
load_acc_store v24.8h, v25.8h, v26.8h, v27.8h, 1
load_acc_store v28.8h, v29.8h, v30.8h, v31.8h, 1
.purgem load_acc_store
br x14
ret x14
endfunc
.endm
@ -1547,7 +1547,7 @@ function ff_vp9_idct_idct_32x32_add_neon, export=1
ldp d8, d9, [sp], 0x10
ldp d10, d11, [sp], 0x10
br x15
ret x15
endfunc
.macro idct32_partial size
@ -1572,7 +1572,7 @@ function idct32x32_\size\()_add_neon
ldp d8, d9, [sp], 0x10
ldp d10, d11, [sp], 0x10
br x15
ret x15
endfunc
.endm

Просмотреть файл

@ -22,18 +22,6 @@
#include "neon.S"
.macro transpose_4x8H r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().8h, \r0\().8h, \r1\().8h
trn2 \t5\().8h, \r0\().8h, \r1\().8h
trn1 \t6\().8h, \r2\().8h, \r3\().8h
trn2 \t7\().8h, \r2\().8h, \r3\().8h
trn1 \r0\().4s, \t4\().4s, \t6\().4s
trn2 \r2\().4s, \t4\().4s, \t6\().4s
trn1 \r1\().4s, \t5\().4s, \t7\().4s
trn2 \r3\().4s, \t5\().4s, \t7\().4s
.endm
// The input to and output from this macro is in the registers v16-v31,
// and v0-v7 are used as scratch registers.
// p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
@ -69,7 +57,7 @@
mov x12, v4.d[1]
adds x11, x11, x12
b.ne 1f
br x10
ret x10
1:
.if \wd >= 8
@ -205,7 +193,7 @@
b.eq 6f
.else
b.ne 1f
br x13
ret x13
1:
.endif
@ -264,7 +252,7 @@
b.ne 1f
// If no pixels needed flat8in nor flat8out, jump to a
// writeout of the inner 4 pixels
br x14
ret x14
1:
mov x11, v7.d[0]
@ -272,7 +260,7 @@
adds x11, x11, x12
b.ne 1f
// If no pixels need flat8out, jump to a writeout of the inner 6 pixels
br x15
ret x15
1:
// flat8out
@ -446,7 +434,7 @@ function ff_\func\()_\bpp\()_neon, export=1
ldp d10, d11, [sp], 0x10
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
br x16
ret x16
.else
b \func\()_16_neon
.endif
@ -486,7 +474,7 @@ function ff_\func\()_\suffix\()_\bpp\()_neon, export=1
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
.endif
br x16
ret x16
endfunc
.endm
@ -520,7 +508,7 @@ function ff_vp9_loop_filter_\dir\()_\wd1\()\wd2\()_16_\bpp\()_neon, export=1
lsl w3, w14, #\bpp - 8
lsl w4, w15, #\bpp - 8
bl vp9_loop_filter_\dir\()_\wd2\()_8_16_neon
br x16
ret x16
endfunc
.endm
@ -553,7 +541,7 @@ function vp9_loop_filter_v_4_8_16_neon
st1 {v25.8h}, [x0], x1
sub x0, x0, x1, lsl #1
br x10
ret x10
endfunc
bpp_frontends vp9_loop_filter_v_4_8
@ -601,7 +589,7 @@ function vp9_loop_filter_h_4_8_16_neon
sub x0, x0, x1, lsl #3
add x0, x0, #4
br x10
ret x10
endfunc
bpp_frontends vp9_loop_filter_h_4_8
@ -632,7 +620,7 @@ function vp9_loop_filter_v_8_8_16_neon
sub x0, x0, x1, lsl #1
sub x0, x0, x1
br x10
ret x10
6:
sub x9, x0, x1, lsl #1
st1 {v22.8h}, [x9], x1
@ -640,7 +628,7 @@ function vp9_loop_filter_v_8_8_16_neon
st1 {v23.8h}, [x9], x1
st1 {v25.8h}, [x0], x1
sub x0, x0, x1, lsl #1
br x10
ret x10
endfunc
bpp_frontends vp9_loop_filter_v_8_8
@ -683,7 +671,7 @@ function vp9_loop_filter_h_8_8_16_neon
sub x0, x0, x1, lsl #3
add x0, x0, #8
br x10
ret x10
6:
// If we didn't need to do the flat8in part, we use the same writeback
// as in loop_filter_h_4_8.
@ -700,7 +688,7 @@ function vp9_loop_filter_h_8_8_16_neon
st1 {v25.d}[1], [x0], x1
sub x0, x0, x1, lsl #3
add x0, x0, #4
br x10
ret x10
endfunc
bpp_frontends vp9_loop_filter_h_8_8
@ -755,7 +743,7 @@ function vp9_loop_filter_v_16_8_16_neon
sub x0, x0, x1, lsl #3
add x0, x0, x1
br x10
ret x10
8:
add x9, x9, x1, lsl #2
// If we didn't do the flat8out part, the output is left in the
@ -768,7 +756,7 @@ function vp9_loop_filter_v_16_8_16_neon
st1 {v26.8h}, [x0], x1
sub x0, x0, x1, lsl #1
sub x0, x0, x1
br x10
ret x10
7:
sub x9, x0, x1, lsl #1
st1 {v22.8h}, [x9], x1
@ -776,7 +764,7 @@ function vp9_loop_filter_v_16_8_16_neon
st1 {v23.8h}, [x9], x1
st1 {v25.8h}, [x0], x1
sub x0, x0, x1, lsl #1
br x10
ret x10
endfunc
bpp_frontends vp9_loop_filter_v_16_8, push=1
@ -833,7 +821,7 @@ function vp9_loop_filter_h_16_8_16_neon
st1 {v31.8h}, [x0], x1
sub x0, x0, x1, lsl #3
br x10
ret x10
8:
// The same writeback as in loop_filter_h_8_8
sub x9, x0, #8
@ -850,7 +838,7 @@ function vp9_loop_filter_h_16_8_16_neon
st1 {v27.8h}, [x0], x1
sub x0, x0, x1, lsl #3
add x0, x0, #8
br x10
ret x10
7:
// The same writeback as in loop_filter_h_4_8
sub x9, x0, #4
@ -866,7 +854,7 @@ function vp9_loop_filter_h_16_8_16_neon
st1 {v25.d}[1], [x0], x1
sub x0, x0, x1, lsl #3
add x0, x0, #4
br x10
ret x10
endfunc
bpp_frontends vp9_loop_filter_h_16_8, push=1

Просмотреть файл

@ -399,7 +399,7 @@
.endif
// If no pixels needed flat8in nor flat8out, jump to a
// writeout of the inner 4 pixels
br x14
ret x14
1:
mov x5, v7.d[0]
@ -411,7 +411,7 @@
cbnz x5, 1f
.endif
// If no pixels need flat8out, jump to a writeout of the inner 6 pixels
br x15
ret x15
1:
// flat8out
@ -532,32 +532,32 @@ function vp9_loop_filter_4
loop_filter 4, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
ret
9:
br x10
ret x10
endfunc
function vp9_loop_filter_4_16b_mix_44
loop_filter 4, .16b, 44, v16, v17, v18, v19, v28, v29, v30, v31
ret
9:
br x10
ret x10
endfunc
function vp9_loop_filter_8
loop_filter 8, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
ret
6:
br x13
ret x13
9:
br x10
ret x10
endfunc
function vp9_loop_filter_8_16b_mix
loop_filter 8, .16b, 88, v16, v17, v18, v19, v28, v29, v30, v31
ret
6:
br x13
ret x13
9:
br x10
ret x10
endfunc
function vp9_loop_filter_16
@ -568,7 +568,7 @@ function vp9_loop_filter_16
ldp d10, d11, [sp], 0x10
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
br x10
ret x10
endfunc
function vp9_loop_filter_16_16b
@ -579,7 +579,7 @@ function vp9_loop_filter_16_16b
ldp d10, d11, [sp], 0x10
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
br x10
ret x10
endfunc
.macro loop_filter_4
@ -648,7 +648,7 @@ function ff_vp9_loop_filter_v_4_8_neon, export=1
st1 {v23.8b}, [x9], x1
st1 {v25.8b}, [x0], x1
br x10
ret x10
endfunc
function ff_vp9_loop_filter_v_44_16_neon, export=1
@ -672,7 +672,7 @@ function ff_vp9_loop_filter_v_44_16_neon, export=1
st1 {v23.16b}, [x9], x1
st1 {v25.16b}, [x0], x1
br x10
ret x10
endfunc
function ff_vp9_loop_filter_h_4_8_neon, export=1
@ -714,7 +714,7 @@ function ff_vp9_loop_filter_h_4_8_neon, export=1
st1 {v25.s}[0], [x9], x1
st1 {v25.s}[1], [x0], x1
br x10
ret x10
endfunc
function ff_vp9_loop_filter_h_44_16_neon, export=1
@ -766,7 +766,7 @@ function ff_vp9_loop_filter_h_44_16_neon, export=1
st1 {v25.s}[1], [x9], x1
st1 {v25.s}[3], [x0], x1
br x10
ret x10
endfunc
function ff_vp9_loop_filter_v_8_8_neon, export=1
@ -793,14 +793,14 @@ function ff_vp9_loop_filter_v_8_8_neon, export=1
st1 {v23.8b}, [x9], x1
st1 {v26.8b}, [x0], x1
br x10
ret x10
6:
sub x9, x0, x1, lsl #1
st1 {v22.8b}, [x9], x1
st1 {v24.8b}, [x0], x1
st1 {v23.8b}, [x9], x1
st1 {v25.8b}, [x0], x1
br x10
ret x10
endfunc
.macro mix_v_16 mix
@ -828,14 +828,14 @@ function ff_vp9_loop_filter_v_\mix\()_16_neon, export=1
st1 {v23.16b}, [x9], x1
st1 {v26.16b}, [x0], x1
br x10
ret x10
6:
sub x9, x0, x1, lsl #1
st1 {v22.16b}, [x9], x1
st1 {v24.16b}, [x0], x1
st1 {v23.16b}, [x9], x1
st1 {v25.16b}, [x0], x1
br x10
ret x10
endfunc
.endm
@ -876,7 +876,7 @@ function ff_vp9_loop_filter_h_8_8_neon, export=1
st1 {v23.8b}, [x9], x1
st1 {v27.8b}, [x0], x1
br x10
ret x10
6:
// If we didn't need to do the flat8in part, we use the same writeback
// as in loop_filter_h_4_8.
@ -891,7 +891,7 @@ function ff_vp9_loop_filter_h_8_8_neon, export=1
st1 {v24.s}[1], [x0], x1
st1 {v25.s}[0], [x9], x1
st1 {v25.s}[1], [x0], x1
br x10
ret x10
endfunc
.macro mix_h_16 mix
@ -942,7 +942,7 @@ function ff_vp9_loop_filter_h_\mix\()_16_neon, export=1
st1 {v27.8b}, [x9], x1
st1 {v27.d}[1], [x0], x1
br x10
ret x10
6:
add x9, x9, #2
add x0, x0, #2
@ -963,7 +963,7 @@ function ff_vp9_loop_filter_h_\mix\()_16_neon, export=1
st1 {v24.s}[3], [x0], x1
st1 {v25.s}[1], [x9], x1
st1 {v25.s}[3], [x0], x1
br x10
ret x10
endfunc
.endm
@ -1022,7 +1022,7 @@ function ff_vp9_loop_filter_v_16_8_neon, export=1
ldp d10, d11, [sp], 0x10
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
br x10
ret x10
8:
add x9, x9, x1, lsl #2
// If we didn't do the flat8out part, the output is left in the
@ -1091,7 +1091,7 @@ function ff_vp9_loop_filter_v_16_16_neon, export=1
ldp d10, d11, [sp], 0x10
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
br x10
ret x10
8:
add x9, x9, x1, lsl #2
st1 {v21.16b}, [x9], x1
@ -1168,7 +1168,7 @@ function ff_vp9_loop_filter_h_16_8_neon, export=1
ldp d10, d11, [sp], 0x10
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
br x10
ret x10
8:
// The same writeback as in loop_filter_h_8_8
sub x9, x0, #4
@ -1287,7 +1287,7 @@ function ff_vp9_loop_filter_h_16_16_neon, export=1
ldp d10, d11, [sp], 0x10
ldp d12, d13, [sp], 0x10
ldp d14, d15, [sp], 0x10
br x10
ret x10
8:
sub x9, x0, #4
add x0, x9, x1, lsl #3

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -18,8 +18,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavcodec/flacdsp.h"
#include "config.h"
#include "config_components.h"
void ff_flac_lpc_16_arm(int32_t *samples, const int coeffs[32], int order,
int qlevel, int len);

Просмотреть файл

@ -73,7 +73,7 @@ int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
int ret, cc_count;
if (size < 3)
return AVERROR(EINVAL);
return AVERROR_INVALIDDATA;
ret = init_get_bits8(&gb, data, size);
if (ret < 0)
@ -95,12 +95,12 @@ int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
/* 3 bytes per CC plus one byte marker_bits at the end */
if (cc_count * 3 >= (get_bits_left(&gb) >> 3))
return AVERROR(EINVAL);
return AVERROR_INVALIDDATA;
new_size = (old_size + cc_count * 3);
if (new_size > INT_MAX)
return AVERROR(EINVAL);
return AVERROR_INVALIDDATA;
/* Allow merging of the cc data from two fields. */
ret = av_buffer_realloc(pbuf, new_size);

Просмотреть файл

@ -114,6 +114,13 @@ enum {
AV1_WARP_MODEL_TRANSLATION = 1,
AV1_WARP_MODEL_ROTZOOM = 2,
AV1_WARP_MODEL_AFFINE = 3,
AV1_WARP_PARAM_REDUCE_BITS = 6,
AV1_DIV_LUT_BITS = 8,
AV1_DIV_LUT_PREC_BITS = 14,
AV1_DIV_LUT_NUM = 257,
AV1_MAX_LOOP_FILTER = 63,
};

Просмотреть файл

@ -21,10 +21,14 @@
#ifndef AVCODEC_AV1_PARSE_H
#define AVCODEC_AV1_PARSE_H
#include <limits.h>
#include <stdint.h>
#include "libavutil/error.h"
#include "libavutil/intmath.h"
#include "libavutil/macros.h"
#include "av1.h"
#include "avcodec.h"
#include "get_bits.h"
// OBU header fields + max leb128 length

Просмотреть файл

@ -20,10 +20,9 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "av1_parse.h"
#include "libavutil/avassert.h"
#include "cbs.h"
#include "cbs_av1.h"
#include "internal.h"
#include "parser.h"
typedef struct AV1ParseContext {
@ -56,9 +55,9 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
{
AV1ParseContext *s = ctx->priv_data;
CodedBitstreamFragment *td = &s->temporal_unit;
CodedBitstreamAV1Context *av1 = s->cbc->priv_data;
AV1RawSequenceHeader *seq;
AV1RawColorConfig *color;
const CodedBitstreamAV1Context *av1 = s->cbc->priv_data;
const AV1RawSequenceHeader *seq;
const AV1RawColorConfig *color;
int ret;
*out_data = data;
@ -96,9 +95,9 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
color = &seq->color_config;
for (int i = 0; i < td->nb_units; i++) {
CodedBitstreamUnit *unit = &td->units[i];
AV1RawOBU *obu = unit->content;
AV1RawFrameHeader *frame;
const CodedBitstreamUnit *unit = &td->units[i];
const AV1RawOBU *obu = unit->content;
const AV1RawFrameHeader *frame;
if (unit->type == AV1_OBU_FRAME)
frame = &obu->obu.frame.header;
@ -205,33 +204,10 @@ static void av1_parser_close(AVCodecParserContext *ctx)
ff_cbs_close(&s->cbc);
}
static int av1_parser_split(AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
AV1OBU obu;
const uint8_t *ptr = buf, *end = buf + buf_size;
while (ptr < end) {
int len = ff_av1_extract_obu(&obu, ptr, buf_size, avctx);
if (len < 0)
break;
if (obu.type == AV1_OBU_FRAME_HEADER ||
obu.type == AV1_OBU_FRAME) {
return ptr - buf;
}
ptr += len;
buf_size -= len;
}
return 0;
}
AVCodecParser ff_av1_parser = {
const AVCodecParser ff_av1_parser = {
.codec_ids = { AV_CODEC_ID_AV1 },
.priv_data_size = sizeof(AV1ParseContext),
.parser_init = av1_parser_init,
.parser_close = av1_parser_close,
.parser_parse = av1_parser_parse,
.split = av1_parser_split,
};

Просмотреть файл

@ -18,15 +18,47 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config_components.h"
#include "libavutil/film_grain_params.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "av1dec.h"
#include "bytestream.h"
#include "codec_internal.h"
#include "hwconfig.h"
#include "internal.h"
#include "profiles.h"
#include "thread.h"
/**< same with Div_Lut defined in spec 7.11.3.7 */
static const uint16_t div_lut[AV1_DIV_LUT_NUM] = {
16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768,
15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142,
15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564,
14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028,
13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530,
13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066,
13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633,
12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228,
12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848,
11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491,
11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155,
11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838,
10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538,
10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255,
10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986,
9963, 9939, 9916, 9892, 9869, 9846, 9823, 9800, 9777, 9754, 9732,
9709, 9687, 9664, 9642, 9620, 9598, 9576, 9554, 9533, 9511, 9489,
9468, 9447, 9425, 9404, 9383, 9362, 9341, 9321, 9300, 9279, 9259,
9239, 9218, 9198, 9178, 9158, 9138, 9118, 9098, 9079, 9059, 9039,
9020, 9001, 8981, 8962, 8943, 8924, 8905, 8886, 8867, 8849, 8830,
8812, 8793, 8775, 8756, 8738, 8720, 8702, 8684, 8666, 8648, 8630,
8613, 8595, 8577, 8560, 8542, 8525, 8508, 8490, 8473, 8456, 8439,
8422, 8405, 8389, 8372, 8355, 8339, 8322, 8306, 8289, 8273, 8257,
8240, 8224, 8208, 8192
};
static uint32_t inverse_recenter(int r, uint32_t v)
{
@ -97,6 +129,70 @@ static void read_global_param(AV1DecContext *s, int type, int ref, int idx)
-mx, mx + 1, r) << prec_diff) + round;
}
static uint64_t round_two(uint64_t x, uint16_t n)
{
if (n == 0)
return x;
return ((x + ((uint64_t)1 << (n - 1))) >> n);
}
static int64_t round_two_signed(int64_t x, uint16_t n)
{
return ((x<0) ? -((int64_t)round_two(-x, n)) : (int64_t)round_two(x, n));
}
/**
* Resolve divisor process.
* see spec 7.11.3.7
*/
static int16_t resolve_divisor(uint32_t d, uint16_t *shift)
{
int32_t e, f;
*shift = av_log2(d);
e = d - (1 << (*shift));
if (*shift > AV1_DIV_LUT_BITS)
f = round_two(e, *shift - AV1_DIV_LUT_BITS);
else
f = e << (AV1_DIV_LUT_BITS - (*shift));
*shift += AV1_DIV_LUT_PREC_BITS;
return div_lut[f];
}
/**
* check if global motion params is valid.
* see spec 7.11.3.6
*/
static uint8_t get_shear_params_valid(AV1DecContext *s, int idx)
{
int16_t alpha, beta, gamma, delta, divf, divs;
int64_t v, w;
int32_t *param = &s->cur_frame.gm_params[idx][0];
if (param[2] < 0)
return 0;
alpha = av_clip_int16(param[2] - (1 << AV1_WARPEDMODEL_PREC_BITS));
beta = av_clip_int16(param[3]);
divf = resolve_divisor(abs(param[2]), &divs);
v = (int64_t)param[4] * (1 << AV1_WARPEDMODEL_PREC_BITS);
w = (int64_t)param[3] * param[4];
gamma = av_clip_int16((int)round_two_signed((v * divf), divs));
delta = av_clip_int16(param[5] - (int)round_two_signed((w * divf), divs) - (1 << AV1_WARPEDMODEL_PREC_BITS));
alpha = round_two_signed(alpha, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
beta = round_two_signed(beta, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
gamma = round_two_signed(gamma, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
delta = round_two_signed(delta, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
if ((4 * abs(alpha) + 7 * abs(beta)) >= (1 << AV1_WARPEDMODEL_PREC_BITS) ||
(4 * abs(gamma) + 4 * abs(delta)) >= (1 << AV1_WARPEDMODEL_PREC_BITS))
return 0;
return 1;
}
/**
* update gm type/params, since cbs already implemented part of this funcation,
* so we don't need to full implement spec.
@ -144,6 +240,9 @@ static void global_motion_params(AV1DecContext *s)
read_global_param(s, type, ref, 0);
read_global_param(s, type, ref, 1);
}
if (type <= AV1_WARP_MODEL_AFFINE) {
s->cur_frame.gm_invalid[ref] = !get_shear_params_valid(s, ref);
}
}
}
@ -404,9 +503,8 @@ static int get_pixel_format(AVCodecContext *avctx)
if (pix_fmt == AV_PIX_FMT_NONE)
return -1;
s->pix_fmt = pix_fmt;
switch (s->pix_fmt) {
switch (pix_fmt) {
case AV_PIX_FMT_YUV420P:
#if CONFIG_AV1_DXVA2_HWACCEL
*fmtp++ = AV_PIX_FMT_DXVA2_VLD;
@ -449,7 +547,7 @@ static int get_pixel_format(AVCodecContext *avctx)
break;
}
*fmtp++ = s->pix_fmt;
*fmtp++ = pix_fmt;
*fmtp = AV_PIX_FMT_NONE;
ret = ff_thread_get_format(avctx, pix_fmts);
@ -467,6 +565,7 @@ static int get_pixel_format(AVCodecContext *avctx)
return AVERROR(ENOSYS);
}
s->pix_fmt = pix_fmt;
avctx->pix_fmt = ret;
return 0;
@ -474,7 +573,7 @@ static int get_pixel_format(AVCodecContext *avctx)
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
{
ff_thread_release_buffer(avctx, &f->tf);
ff_thread_release_buffer(avctx, f->f);
av_buffer_unref(&f->hwaccel_priv_buf);
f->hwaccel_picture_private = NULL;
av_buffer_unref(&f->header_ref);
@ -490,16 +589,19 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s
{
int ret;
ret = ff_thread_ref_frame(&dst->tf, &src->tf);
ret = av_buffer_replace(&dst->header_ref, src->header_ref);
if (ret < 0)
return ret;
dst->header_ref = av_buffer_ref(src->header_ref);
if (!dst->header_ref)
goto fail;
dst->raw_frame_header = src->raw_frame_header;
if (!src->f->buf[0])
return 0;
ret = av_frame_ref(dst->f, src->f);
if (ret < 0)
goto fail;
if (src->hwaccel_picture_private) {
dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
if (!dst->hwaccel_priv_buf)
@ -509,6 +611,9 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s
dst->spatial_id = src->spatial_id;
dst->temporal_id = src->temporal_id;
memcpy(dst->gm_invalid,
src->gm_invalid,
AV1_NUM_REF_FRAMES * sizeof(uint8_t));
memcpy(dst->gm_type,
src->gm_type,
AV1_NUM_REF_FRAMES * sizeof(uint8_t));
@ -536,10 +641,10 @@ static av_cold int av1_decode_free(AVCodecContext *avctx)
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
av1_frame_unref(avctx, &s->ref[i]);
av_frame_free(&s->ref[i].tf.f);
av_frame_free(&s->ref[i].f);
}
av1_frame_unref(avctx, &s->cur_frame);
av_frame_free(&s->cur_frame.tf.f);
av_frame_free(&s->cur_frame.f);
av_buffer_unref(&s->seq_ref);
av_buffer_unref(&s->header_ref);
@ -575,6 +680,11 @@ static int set_context_with_sequence(AVCodecContext *avctx,
break;
}
if (seq->film_grain_params_present)
avctx->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
else
avctx->properties &= ~FF_CODEC_PROPERTY_FILM_GRAIN;
if (avctx->width != width || avctx->height != height) {
int ret = ff_set_dimensions(avctx, width, height);
if (ret < 0)
@ -635,16 +745,16 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
s->pix_fmt = AV_PIX_FMT_NONE;
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
s->ref[i].tf.f = av_frame_alloc();
if (!s->ref[i].tf.f) {
s->ref[i].f = av_frame_alloc();
if (!s->ref[i].f) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate reference frame buffer %d.\n", i);
return AVERROR(ENOMEM);
}
}
s->cur_frame.tf.f = av_frame_alloc();
if (!s->cur_frame.tf.f) {
s->cur_frame.f = av_frame_alloc();
if (!s->cur_frame.f) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate current frame buffer.\n");
return AVERROR(ENOMEM);
@ -697,16 +807,10 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
return ret;
}
f->header_ref = av_buffer_ref(s->header_ref);
if (!f->header_ref)
return AVERROR(ENOMEM);
f->raw_frame_header = s->raw_frame_header;
if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
if ((ret = ff_thread_get_buffer(avctx, f->f, AV_GET_BUFFER_FLAG_REF)) < 0)
goto fail;
frame = f->tf.f;
frame = f->f;
frame->key_frame = header->frame_type == AV1_FRAME_KEY;
switch (header->frame_type) {
@ -805,7 +909,7 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
const AVPacket *pkt, int *got_frame)
{
AV1DecContext *s = avctx->priv_data;
const AVFrame *srcframe = s->cur_frame.tf.f;
const AVFrame *srcframe = s->cur_frame.f;
int ret;
// TODO: all layers
@ -842,8 +946,7 @@ static int update_reference_list(AVCodecContext *avctx)
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
if (header->refresh_frame_flags & (1 << i)) {
if (s->ref[i].tf.f->buf[0])
av1_frame_unref(avctx, &s->ref[i]);
av1_frame_unref(avctx, &s->ref[i]);
if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) {
av_log(avctx, AV_LOG_ERROR,
"Failed to update frame %d in reference list\n", i);
@ -859,8 +962,27 @@ static int get_current_frame(AVCodecContext *avctx)
AV1DecContext *s = avctx->priv_data;
int ret;
if (s->cur_frame.tf.f->buf[0])
av1_frame_unref(avctx, &s->cur_frame);
av1_frame_unref(avctx, &s->cur_frame);
s->cur_frame.header_ref = av_buffer_ref(s->header_ref);
if (!s->cur_frame.header_ref)
return AVERROR(ENOMEM);
s->cur_frame.raw_frame_header = s->raw_frame_header;
ret = init_tile_data(s);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n");
return ret;
}
if ((avctx->skip_frame >= AVDISCARD_NONINTRA &&
(s->raw_frame_header->frame_type != AV1_FRAME_KEY &&
s->raw_frame_header->frame_type != AV1_FRAME_INTRA_ONLY)) ||
(avctx->skip_frame >= AVDISCARD_NONKEY &&
s->raw_frame_header->frame_type != AV1_FRAME_KEY) ||
avctx->skip_frame >= AVDISCARD_ALL)
return 0;
ret = av1_frame_alloc(avctx, &s->cur_frame);
if (ret < 0) {
@ -869,12 +991,6 @@ static int get_current_frame(AVCodecContext *avctx)
return ret;
}
ret = init_tile_data(s);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n");
return ret;
}
global_motion_params(s);
skip_mode_params(s);
coded_lossless_param(s);
@ -883,7 +999,7 @@ static int get_current_frame(AVCodecContext *avctx)
return ret;
}
static int av1_decode_frame(AVCodecContext *avctx, void *frame,
static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame, AVPacket *pkt)
{
AV1DecContext *s = avctx->priv_data;
@ -974,8 +1090,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
s->raw_frame_header = &obu->obu.frame_header;
if (s->raw_frame_header->show_existing_frame) {
if (s->cur_frame.tf.f->buf[0])
av1_frame_unref(avctx, &s->cur_frame);
av1_frame_unref(avctx, &s->cur_frame);
ret = av1_frame_ref(avctx, &s->cur_frame,
&s->ref[s->raw_frame_header->frame_to_show_map_idx]);
@ -990,9 +1105,11 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
goto end;
}
ret = set_output_frame(avctx, frame, pkt, got_frame);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
if (s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
}
s->raw_frame_header = NULL;
@ -1008,7 +1125,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
s->cur_frame.spatial_id = header->spatial_id;
s->cur_frame.temporal_id = header->temporal_id;
if (avctx->hwaccel) {
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->start_frame(avctx, unit->data,
unit->data_size);
if (ret < 0) {
@ -1035,7 +1152,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
if (ret < 0)
goto end;
if (avctx->hwaccel) {
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->decode_slice(avctx,
raw_tile_group->tile_data.data,
raw_tile_group->tile_data.data_size);
@ -1058,7 +1175,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
}
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
if (avctx->hwaccel) {
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->end_frame(avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
@ -1072,7 +1189,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
goto end;
}
if (s->raw_frame_header->show_frame) {
if (s->raw_frame_header->show_frame && s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
@ -1121,22 +1238,23 @@ static const AVClass av1_class = {
.version = LIBAVUTIL_VERSION_INT,
};
AVCodec ff_av1_decoder = {
.name = "av1",
.long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AV1,
const FFCodec ff_av1_decoder = {
.p.name = "av1",
.p.long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AV1,
.priv_data_size = sizeof(AV1DecContext),
.init = av1_decode_init,
.close = av1_decode_free,
.decode = av1_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
FF_CODEC_DECODE_CB(av1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SETS_PKT_DTS,
.flush = av1_decode_flush,
.profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
.priv_class = &av1_class,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
.p.priv_class = &av1_class,
.bsfs = "av1_frame_split",
.hw_configs = (const AVCodecHWConfigInternal *const []) {
#if CONFIG_AV1_DXVA2_HWACCEL
HWACCEL_DXVA2(av1),

Просмотреть файл

@ -24,14 +24,14 @@
#include <stdint.h>
#include "libavutil/buffer.h"
#include "libavutil/frame.h"
#include "libavutil/pixfmt.h"
#include "avcodec.h"
#include "cbs.h"
#include "cbs_av1.h"
#include "thread.h"
typedef struct AV1Frame {
ThreadFrame tf;
AVFrame *f;
AVBufferRef *hwaccel_priv_buf;
void *hwaccel_picture_private;
@ -42,6 +42,7 @@ typedef struct AV1Frame {
int temporal_id;
int spatial_id;
uint8_t gm_invalid[AV1_NUM_REF_FRAMES];
uint8_t gm_type[AV1_NUM_REF_FRAMES];
int32_t gm_params[AV1_NUM_REF_FRAMES][6];

Просмотреть файл

@ -26,43 +26,21 @@
#include "config.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/fifo.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/thread.h"
#include "avcodec.h"
#include "bsf.h"
#include "codec_internal.h"
#include "decode.h"
#include "encode.h"
#include "frame_thread_encoder.h"
#include "internal.h"
#include "thread.h"
#if CONFIG_ICONV
# include <iconv.h>
#endif
#include "libavutil/ffversion.h"
const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
unsigned avcodec_version(void)
{
av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563);
av_assert0(AV_CODEC_ID_ADPCM_G722==69660);
av_assert0(AV_CODEC_ID_SRT==94216);
av_assert0(LIBAVCODEC_VERSION_MICRO >= 100);
return LIBAVCODEC_VERSION_INT;
}
const char *avcodec_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
const char *avcodec_license(void)
{
#define LICENSE_PREFIX "libavcodec license: "
return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
}
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
{
@ -92,25 +70,18 @@ int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2,
static AVMutex codec_mutex = AV_MUTEX_INITIALIZER;
static void lock_avcodec(const AVCodec *codec)
static void lock_avcodec(const FFCodec *codec)
{
if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init)
ff_mutex_lock(&codec_mutex);
}
static void unlock_avcodec(const AVCodec *codec)
static void unlock_avcodec(const FFCodec *codec)
{
if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init)
ff_mutex_unlock(&codec_mutex);
}
#if FF_API_LOCKMGR
int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op))
{
return 0;
}
#endif
static int64_t get_bit_rate(AVCodecContext *ctx)
{
int64_t bit_rate;
@ -126,7 +97,7 @@ static int64_t get_bit_rate(AVCodecContext *ctx)
case AVMEDIA_TYPE_AUDIO:
bits_per_sample = av_get_bits_per_sample(ctx->codec_id);
if (bits_per_sample) {
bit_rate = ctx->sample_rate * (int64_t)ctx->channels;
bit_rate = ctx->sample_rate * (int64_t)ctx->ch_layout.nb_channels;
if (bit_rate > INT64_MAX / bits_per_sample) {
bit_rate = 0;
} else
@ -144,9 +115,8 @@ static int64_t get_bit_rate(AVCodecContext *ctx)
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
{
int ret = 0;
int codec_init_ok = 0;
AVDictionary *tmp = NULL;
AVCodecInternal *avci;
const FFCodec *codec2;
if (avcodec_is_open(avctx))
return 0;
@ -162,15 +132,21 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
}
if (!codec)
codec = avctx->codec;
codec2 = ffcodec(codec);
if ((avctx->codec_type != AVMEDIA_TYPE_UNKNOWN && avctx->codec_type != codec->type) ||
(avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec->id)) {
av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n");
return AVERROR(EINVAL);
}
avctx->codec_type = codec->type;
avctx->codec_id = codec->id;
avctx->codec = codec;
if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
return AVERROR(EINVAL);
if (options)
av_dict_copy(&tmp, *options, 0);
lock_avcodec(codec);
avci = av_mallocz(sizeof(*avci));
if (!avci) {
ret = AVERROR(ENOMEM);
@ -178,33 +154,18 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
}
avctx->internal = avci;
#if FF_API_OLD_ENCDEC
avci->to_free = av_frame_alloc();
avci->compat_decode_frame = av_frame_alloc();
avci->compat_encode_packet = av_packet_alloc();
if (!avci->to_free || !avci->compat_decode_frame || !avci->compat_encode_packet) {
ret = AVERROR(ENOMEM);
goto free_and_end;
}
#endif
avci->buffer_frame = av_frame_alloc();
avci->buffer_pkt = av_packet_alloc();
avci->es.in_frame = av_frame_alloc();
avci->ds.in_pkt = av_packet_alloc();
avci->last_pkt_props = av_packet_alloc();
avci->pkt_props = av_fifo_alloc(sizeof(*avci->last_pkt_props));
if (!avci->buffer_frame || !avci->buffer_pkt ||
!avci->es.in_frame || !avci->ds.in_pkt ||
!avci->last_pkt_props || !avci->pkt_props) {
if (!avci->buffer_frame || !avci->buffer_pkt) {
ret = AVERROR(ENOMEM);
goto free_and_end;
}
avci->skip_samples_multiplier = 1;
if (codec->priv_data_size > 0) {
if (codec2->priv_data_size > 0) {
if (!avctx->priv_data) {
avctx->priv_data = av_mallocz(codec->priv_data_size);
avctx->priv_data = av_mallocz(codec2->priv_data_size);
if (!avctx->priv_data) {
ret = AVERROR(ENOMEM);
goto free_and_end;
@ -214,12 +175,12 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
av_opt_set_defaults(avctx->priv_data);
}
}
if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0)
if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, options)) < 0)
goto free_and_end;
} else {
avctx->priv_data = NULL;
}
if ((ret = av_opt_set_dict(avctx, &tmp)) < 0)
if ((ret = av_opt_set_dict(avctx, options)) < 0)
goto free_and_end;
if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) {
@ -256,20 +217,6 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
}
}
if (avctx->channels > FF_SANE_NB_CHANNELS || avctx->channels < 0) {
av_log(avctx, AV_LOG_ERROR, "Too many or invalid channels: %d\n", avctx->channels);
ret = AVERROR(EINVAL);
goto free_and_end;
}
if (av_codec_is_decoder(codec) &&
codec->type == AVMEDIA_TYPE_AUDIO &&
!(codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF) &&
avctx->channels == 0) {
av_log(avctx, AV_LOG_ERROR, "Decoder requires channel count but channels not set\n");
ret = AVERROR(EINVAL);
goto free_and_end;
}
if (avctx->sample_rate < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid sample rate: %d\n", avctx->sample_rate);
ret = AVERROR(EINVAL);
@ -281,18 +228,31 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
goto free_and_end;
}
avctx->codec = codec;
if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
avctx->codec_id == AV_CODEC_ID_NONE) {
avctx->codec_type = codec->type;
avctx->codec_id = codec->id;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
/* compat wrapper for old-style callers */
if (avctx->channel_layout && !avctx->channels)
avctx->channels = av_popcount64(avctx->channel_layout);
if ((avctx->channels > 0 && avctx->ch_layout.nb_channels != avctx->channels) ||
(avctx->channel_layout && (avctx->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
avctx->ch_layout.u.mask != avctx->channel_layout))) {
if (avctx->channel_layout) {
av_channel_layout_from_mask(&avctx->ch_layout, avctx->channel_layout);
} else {
avctx->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
avctx->ch_layout.nb_channels = avctx->channels;
}
}
if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
&& avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n");
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->ch_layout.nb_channels > FF_SANE_NB_CHANNELS) {
av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->ch_layout.nb_channels);
ret = AVERROR(EINVAL);
goto free_and_end;
}
avctx->frame_number = 0;
avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
@ -325,35 +285,37 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
if (ret < 0)
goto free_and_end;
if (!HAVE_THREADS)
av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n");
if (CONFIG_FRAME_THREAD_ENCODER && av_codec_is_encoder(avctx->codec)) {
unlock_avcodec(codec); //we will instantiate a few encoders thus kick the counter to prevent false detection of a problem
ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL);
lock_avcodec(codec);
ret = ff_frame_thread_encoder_init(avctx);
if (ret < 0)
goto free_and_end;
}
if (HAVE_THREADS
&& !(avci->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) {
/* Frame-threaded decoders call FFCodec.init for their child contexts. */
lock_avcodec(codec2);
ret = ff_thread_init(avctx);
unlock_avcodec(codec2);
if (ret < 0) {
goto free_and_end;
}
}
if (!HAVE_THREADS && !(codec->caps_internal & FF_CODEC_CAP_AUTO_THREADS))
if (!HAVE_THREADS && !(codec2->caps_internal & FF_CODEC_CAP_AUTO_THREADS))
avctx->thread_count = 1;
if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME)
|| avci->frame_thread_encoder)) {
ret = avctx->codec->init(avctx);
if (ret < 0) {
codec_init_ok = -1;
goto free_and_end;
if (!(avctx->active_thread_type & FF_THREAD_FRAME) ||
avci->frame_thread_encoder) {
if (codec2->init) {
lock_avcodec(codec2);
ret = codec2->init(avctx);
unlock_avcodec(codec2);
if (ret < 0) {
avci->needs_close = codec2->caps_internal & FF_CODEC_CAP_INIT_CLEANUP;
goto free_and_end;
}
}
codec_init_ok = 1;
avci->needs_close = 1;
}
ret=0;
@ -361,6 +323,14 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
if (av_codec_is_decoder(avctx->codec)) {
if (!avctx->bit_rate)
avctx->bit_rate = get_bit_rate(avctx);
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
/* update the deprecated fields for old-style callers */
avctx->channels = avctx->ch_layout.nb_channels;
avctx->channel_layout = avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
avctx->ch_layout.u.mask : 0;
/* validate channel layout from the decoder */
if (avctx->channel_layout) {
int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
@ -385,106 +355,22 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
ret = AVERROR(EINVAL);
goto free_and_end;
}
if (avctx->sub_charenc) {
if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) {
av_log(avctx, AV_LOG_ERROR, "Character encoding is only "
"supported with subtitles codecs\n");
ret = AVERROR(EINVAL);
goto free_and_end;
} else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) {
av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, "
"subtitles character encoding will be ignored\n",
avctx->codec_descriptor->name);
avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING;
} else {
/* input character encoding is set for a text based subtitle
* codec at this point */
if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC)
avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER;
if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) {
#if CONFIG_ICONV
iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc);
if (cd == (iconv_t)-1) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context "
"with input character encoding \"%s\"\n", avctx->sub_charenc);
goto free_and_end;
}
iconv_close(cd);
#else
av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles "
"conversion needs a libavcodec built with iconv support "
"for this codec\n");
ret = AVERROR(ENOSYS);
goto free_and_end;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
}
}
#if FF_API_AVCTX_TIMEBASE
if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
#endif
}
if (codec->priv_data_size > 0 && avctx->priv_data && codec->priv_class) {
if (codec->priv_class)
av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class);
}
end:
unlock_avcodec(codec);
if (options) {
av_dict_free(options);
*options = tmp;
}
return ret;
free_and_end:
if (avctx->codec && avctx->codec->close &&
(codec_init_ok > 0 || (codec_init_ok < 0 &&
avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP)))
avctx->codec->close(avctx);
if (HAVE_THREADS && avci->thread_ctx)
ff_thread_free(avctx);
if (codec->priv_class && avctx->priv_data)
av_opt_free(avctx->priv_data);
av_opt_free(avctx);
if (av_codec_is_encoder(avctx->codec)) {
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
av_frame_free(&avctx->coded_frame);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
av_freep(&avctx->extradata);
avctx->extradata_size = 0;
}
av_dict_free(&tmp);
av_freep(&avctx->priv_data);
av_freep(&avctx->subtitle_header);
#if FF_API_OLD_ENCDEC
av_frame_free(&avci->to_free);
av_frame_free(&avci->compat_decode_frame);
av_packet_free(&avci->compat_encode_packet);
#endif
av_frame_free(&avci->buffer_frame);
av_packet_free(&avci->buffer_pkt);
av_packet_free(&avci->last_pkt_props);
av_fifo_freep(&avci->pkt_props);
av_packet_free(&avci->ds.in_pkt);
av_frame_free(&avci->es.in_frame);
av_bsf_free(&avci->bsf);
av_buffer_unref(&avci->pool);
av_freep(&avci);
avctx->internal = NULL;
avctx->codec = NULL;
avcodec_close(avctx);
goto end;
}
@ -502,50 +388,31 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
"that doesn't support it\n");
return;
}
if (avci->in_frame)
av_frame_unref(avci->in_frame);
} else {
av_packet_unref(avci->last_pkt_props);
while (av_fifo_read(avci->pkt_props, avci->last_pkt_props, 1) >= 0)
av_packet_unref(avci->last_pkt_props);
// We haven't implemented flushing for frame-threaded encoders.
av_assert0(!(caps & AV_CODEC_CAP_FRAME_THREADS));
av_packet_unref(avci->in_pkt);
avctx->pts_correction_last_pts =
avctx->pts_correction_last_dts = INT64_MIN;
av_bsf_flush(avci->bsf);
}
avci->draining = 0;
avci->draining_done = 0;
avci->nb_draining_errors = 0;
av_frame_unref(avci->buffer_frame);
#if FF_API_OLD_ENCDEC
av_frame_unref(avci->compat_decode_frame);
av_packet_unref(avci->compat_encode_packet);
#endif
av_packet_unref(avci->buffer_pkt);
av_packet_unref(avci->last_pkt_props);
while (av_fifo_size(avci->pkt_props) >= sizeof(*avci->last_pkt_props)) {
av_fifo_generic_read(avci->pkt_props,
avci->last_pkt_props, sizeof(*avci->last_pkt_props),
NULL);
av_packet_unref(avci->last_pkt_props);
}
av_fifo_reset(avci->pkt_props);
av_frame_unref(avci->es.in_frame);
av_packet_unref(avci->ds.in_pkt);
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
ff_thread_flush(avctx);
else if (avctx->codec->flush)
avctx->codec->flush(avctx);
avctx->pts_correction_last_pts =
avctx->pts_correction_last_dts = INT64_MIN;
if (av_codec_is_decoder(avctx->codec))
av_bsf_flush(avci->bsf);
#if FF_API_OLD_ENCDEC
FF_DISABLE_DEPRECATION_WARNINGS
if (!avctx->refcounted_frames)
av_frame_unref(avci->to_free);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
else if (ffcodec(avctx->codec)->flush)
ffcodec(avctx->codec)->flush(avctx);
}
void avsubtitle_free(AVSubtitle *sub)
@ -553,12 +420,15 @@ void avsubtitle_free(AVSubtitle *sub)
int i;
for (i = 0; i < sub->num_rects; i++) {
av_freep(&sub->rects[i]->data[0]);
av_freep(&sub->rects[i]->data[1]);
av_freep(&sub->rects[i]->data[2]);
av_freep(&sub->rects[i]->data[3]);
av_freep(&sub->rects[i]->text);
av_freep(&sub->rects[i]->ass);
AVSubtitleRect *const rect = sub->rects[i];
av_freep(&rect->data[0]);
av_freep(&rect->data[1]);
av_freep(&rect->data[2]);
av_freep(&rect->data[3]);
av_freep(&rect->text);
av_freep(&rect->ass);
av_freep(&sub->rects[i]);
}
@ -575,45 +445,41 @@ av_cold int avcodec_close(AVCodecContext *avctx)
return 0;
if (avcodec_is_open(avctx)) {
AVCodecInternal *avci = avctx->internal;
if (CONFIG_FRAME_THREAD_ENCODER &&
avctx->internal->frame_thread_encoder && avctx->thread_count > 1) {
avci->frame_thread_encoder && avctx->thread_count > 1) {
ff_frame_thread_encoder_free(avctx);
}
if (HAVE_THREADS && avctx->internal->thread_ctx)
if (HAVE_THREADS && avci->thread_ctx)
ff_thread_free(avctx);
if (avctx->codec && avctx->codec->close)
avctx->codec->close(avctx);
avctx->internal->byte_buffer_size = 0;
av_freep(&avctx->internal->byte_buffer);
#if FF_API_OLD_ENCDEC
av_frame_free(&avctx->internal->to_free);
av_frame_free(&avctx->internal->compat_decode_frame);
av_packet_free(&avctx->internal->compat_encode_packet);
#endif
av_frame_free(&avctx->internal->buffer_frame);
av_packet_free(&avctx->internal->buffer_pkt);
av_packet_unref(avctx->internal->last_pkt_props);
while (av_fifo_size(avctx->internal->pkt_props) >=
sizeof(*avctx->internal->last_pkt_props)) {
av_fifo_generic_read(avctx->internal->pkt_props,
avctx->internal->last_pkt_props,
sizeof(*avctx->internal->last_pkt_props),
NULL);
av_packet_unref(avctx->internal->last_pkt_props);
if (avci->needs_close && ffcodec(avctx->codec)->close)
ffcodec(avctx->codec)->close(avctx);
avci->byte_buffer_size = 0;
av_freep(&avci->byte_buffer);
av_frame_free(&avci->buffer_frame);
av_packet_free(&avci->buffer_pkt);
if (avci->pkt_props) {
while (av_fifo_can_read(avci->pkt_props)) {
av_packet_unref(avci->last_pkt_props);
av_fifo_read(avci->pkt_props, avci->last_pkt_props, 1);
}
av_fifo_freep2(&avci->pkt_props);
}
av_packet_free(&avctx->internal->last_pkt_props);
av_fifo_freep(&avctx->internal->pkt_props);
av_packet_free(&avci->last_pkt_props);
av_packet_free(&avctx->internal->ds.in_pkt);
av_frame_free(&avctx->internal->es.in_frame);
av_packet_free(&avci->in_pkt);
av_frame_free(&avci->in_frame);
av_buffer_unref(&avctx->internal->pool);
av_buffer_unref(&avci->pool);
if (avctx->hwaccel && avctx->hwaccel->uninit)
avctx->hwaccel->uninit(avctx);
av_freep(&avctx->internal->hwaccel_priv_data);
av_freep(&avci->hwaccel_priv_data);
av_bsf_free(&avctx->internal->bsf);
av_bsf_free(&avci->bsf);
av_channel_layout_uninit(&avci->initial_ch_layout);
av_freep(&avctx->internal);
}
@ -632,12 +498,10 @@ av_cold int avcodec_close(AVCodecContext *avctx)
av_freep(&avctx->priv_data);
if (av_codec_is_encoder(avctx->codec)) {
av_freep(&avctx->extradata);
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
av_frame_free(&avctx->coded_frame);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
avctx->extradata_size = 0;
} else if (av_codec_is_decoder(avctx->codec))
av_freep(&avctx->subtitle_header);
avctx->codec = NULL;
avctx->active_thread_type = 0;
@ -654,6 +518,7 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
const char *codec_type;
const char *codec_name;
const char *profile = NULL;
AVBPrint bprint;
int64_t bitrate;
int new_line = 0;
AVRational display_aspect_ratio;
@ -662,46 +527,54 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
if (!buf || buf_size <= 0)
return;
av_bprint_init_for_buffer(&bprint, buf, buf_size);
codec_type = av_get_media_type_string(enc->codec_type);
codec_name = avcodec_get_name(enc->codec_id);
profile = avcodec_profile_name(enc->codec_id, enc->profile);
snprintf(buf, buf_size, "%s: %s", codec_type ? codec_type : "unknown",
codec_name);
av_bprintf(&bprint, "%s: %s", codec_type ? codec_type : "unknown",
codec_name);
buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */
if (enc->codec && strcmp(enc->codec->name, codec_name))
snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", enc->codec->name);
av_bprintf(&bprint, " (%s)", enc->codec->name);
if (profile)
snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile);
av_bprintf(&bprint, " (%s)", profile);
if ( enc->codec_type == AVMEDIA_TYPE_VIDEO
&& av_log_get_level() >= AV_LOG_VERBOSE
&& enc->refs)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %d reference frame%s",
enc->refs, enc->refs > 1 ? "s" : "");
av_bprintf(&bprint, ", %d reference frame%s",
enc->refs, enc->refs > 1 ? "s" : "");
if (enc->codec_tag)
snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s / 0x%04X)",
av_fourcc2str(enc->codec_tag), enc->codec_tag);
av_bprintf(&bprint, " (%s / 0x%04X)",
av_fourcc2str(enc->codec_tag), enc->codec_tag);
switch (enc->codec_type) {
case AVMEDIA_TYPE_VIDEO:
{
char detail[256] = "(";
unsigned len;
av_strlcat(buf, separator, buf_size);
av_bprintf(&bprint, "%s%s", separator,
enc->pix_fmt == AV_PIX_FMT_NONE ? "none" :
unknown_if_null(av_get_pix_fmt_name(enc->pix_fmt)));
av_bprint_chars(&bprint, '(', 1);
len = bprint.len;
/* The following check ensures that '(' has been written
* and therefore allows us to erase it if it turns out
* to be unnecessary. */
if (!av_bprint_is_complete(&bprint))
return;
snprintf(buf + strlen(buf), buf_size - strlen(buf),
"%s", enc->pix_fmt == AV_PIX_FMT_NONE ? "none" :
unknown_if_null(av_get_pix_fmt_name(enc->pix_fmt)));
if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE &&
enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth)
av_strlcatf(detail, sizeof(detail), "%d bpc, ", enc->bits_per_raw_sample);
av_bprintf(&bprint, "%d bpc, ", enc->bits_per_raw_sample);
if (enc->color_range != AVCOL_RANGE_UNSPECIFIED &&
(str = av_color_range_name(enc->color_range)))
av_strlcatf(detail, sizeof(detail), "%s, ", str);
av_bprintf(&bprint, "%s, ", str);
if (enc->colorspace != AVCOL_SPC_UNSPECIFIED ||
enc->color_primaries != AVCOL_PRI_UNSPECIFIED ||
@ -711,10 +584,9 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
const char *trc = unknown_if_null(av_color_transfer_name(enc->color_trc));
if (strcmp(col, pri) || strcmp(col, trc)) {
new_line = 1;
av_strlcatf(detail, sizeof(detail), "%s/%s/%s, ",
col, pri, trc);
av_bprintf(&bprint, "%s/%s/%s, ", col, pri, trc);
} else
av_strlcatf(detail, sizeof(detail), "%s, ", col);
av_bprintf(&bprint, "%s, ", col);
}
if (enc->field_order != AV_FIELD_UNKNOWN) {
@ -728,120 +600,115 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
else if (enc->field_order == AV_FIELD_BT)
field_order = "bottom coded first (swapped)";
av_strlcatf(detail, sizeof(detail), "%s, ", field_order);
av_bprintf(&bprint, "%s, ", field_order);
}
if (av_log_get_level() >= AV_LOG_VERBOSE &&
enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED &&
(str = av_chroma_location_name(enc->chroma_sample_location)))
av_strlcatf(detail, sizeof(detail), "%s, ", str);
av_bprintf(&bprint, "%s, ", str);
if (strlen(detail) > 1) {
detail[strlen(detail) - 2] = 0;
av_strlcatf(buf, buf_size, "%s)", detail);
if (len == bprint.len) {
bprint.str[len - 1] = '\0';
bprint.len--;
} else {
if (bprint.len - 2 < bprint.size) {
/* Erase the last ", " */
bprint.len -= 2;
bprint.str[bprint.len] = '\0';
}
av_bprint_chars(&bprint, ')', 1);
}
}
if (enc->width) {
av_strlcat(buf, new_line ? separator : ", ", buf_size);
snprintf(buf + strlen(buf), buf_size - strlen(buf),
"%dx%d",
enc->width, enc->height);
av_bprintf(&bprint, "%s%dx%d", new_line ? separator : ", ",
enc->width, enc->height);
if (av_log_get_level() >= AV_LOG_VERBOSE &&
(enc->width != enc->coded_width ||
enc->height != enc->coded_height))
snprintf(buf + strlen(buf), buf_size - strlen(buf),
" (%dx%d)", enc->coded_width, enc->coded_height);
av_bprintf(&bprint, " (%dx%d)",
enc->coded_width, enc->coded_height);
if (enc->sample_aspect_ratio.num) {
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
enc->width * (int64_t)enc->sample_aspect_ratio.num,
enc->height * (int64_t)enc->sample_aspect_ratio.den,
1024 * 1024);
snprintf(buf + strlen(buf), buf_size - strlen(buf),
" [SAR %d:%d DAR %d:%d]",
av_bprintf(&bprint, " [SAR %d:%d DAR %d:%d]",
enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den,
display_aspect_ratio.num, display_aspect_ratio.den);
}
if (av_log_get_level() >= AV_LOG_DEBUG) {
int g = av_gcd(enc->time_base.num, enc->time_base.den);
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %d/%d",
enc->time_base.num / g, enc->time_base.den / g);
av_bprintf(&bprint, ", %d/%d",
enc->time_base.num / g, enc->time_base.den / g);
}
}
if (encode) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", q=%d-%d", enc->qmin, enc->qmax);
av_bprintf(&bprint, ", q=%d-%d", enc->qmin, enc->qmax);
} else {
if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", Closed Captions");
av_bprintf(&bprint, ", Closed Captions");
if (enc->properties & FF_CODEC_PROPERTY_FILM_GRAIN)
av_bprintf(&bprint, ", Film Grain");
if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", lossless");
av_bprintf(&bprint, ", lossless");
}
break;
case AVMEDIA_TYPE_AUDIO:
av_strlcat(buf, separator, buf_size);
av_bprintf(&bprint, "%s", separator);
if (enc->sample_rate) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
"%d Hz, ", enc->sample_rate);
av_bprintf(&bprint, "%d Hz, ", enc->sample_rate);
}
{
char buf[512];
int ret = av_channel_layout_describe(&enc->ch_layout, buf, sizeof(buf));
if (ret >= 0)
av_bprintf(&bprint, "%s", buf);
}
av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout);
if (enc->sample_fmt != AV_SAMPLE_FMT_NONE &&
(str = av_get_sample_fmt_name(enc->sample_fmt))) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %s", str);
av_bprintf(&bprint, ", %s", str);
}
if ( enc->bits_per_raw_sample > 0
&& enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
" (%d bit)", enc->bits_per_raw_sample);
av_bprintf(&bprint, " (%d bit)", enc->bits_per_raw_sample);
if (av_log_get_level() >= AV_LOG_VERBOSE) {
if (enc->initial_padding)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", delay %d", enc->initial_padding);
av_bprintf(&bprint, ", delay %d", enc->initial_padding);
if (enc->trailing_padding)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", padding %d", enc->trailing_padding);
av_bprintf(&bprint, ", padding %d", enc->trailing_padding);
}
break;
case AVMEDIA_TYPE_DATA:
if (av_log_get_level() >= AV_LOG_DEBUG) {
int g = av_gcd(enc->time_base.num, enc->time_base.den);
if (g)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %d/%d",
enc->time_base.num / g, enc->time_base.den / g);
av_bprintf(&bprint, ", %d/%d",
enc->time_base.num / g, enc->time_base.den / g);
}
break;
case AVMEDIA_TYPE_SUBTITLE:
if (enc->width)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %dx%d", enc->width, enc->height);
av_bprintf(&bprint, ", %dx%d", enc->width, enc->height);
break;
default:
return;
}
if (encode) {
if (enc->flags & AV_CODEC_FLAG_PASS1)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", pass 1");
av_bprintf(&bprint, ", pass 1");
if (enc->flags & AV_CODEC_FLAG_PASS2)
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", pass 2");
av_bprintf(&bprint, ", pass 2");
}
bitrate = get_bit_rate(enc);
if (bitrate != 0) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %"PRId64" kb/s", bitrate / 1000);
av_bprintf(&bprint, ", %"PRId64" kb/s", bitrate / 1000);
} else if (enc->rc_max_rate > 0) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", max. %"PRId64" kb/s", enc->rc_max_rate / 1000);
av_bprintf(&bprint, ", max. %"PRId64" kb/s", enc->rc_max_rate / 1000);
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,47 +1,26 @@
av_codec_ffversion
av_codec_get_chroma_intra_matrix
av_codec_get_codec_descriptor
av_codec_get_codec_properties
av_codec_get_lowres
av_codec_get_max_lowres
av_codec_get_pkt_timebase
av_codec_get_seek_preroll
av_codec_is_decoder
av_codec_is_encoder
av_codec_iterate
av_codec_next
av_codec_set_chroma_intra_matrix
av_codec_set_codec_descriptor
av_codec_set_lowres
av_codec_set_pkt_timebase
av_codec_set_seek_preroll
av_copy_packet
av_copy_packet_side_data
av_dup_packet
av_fast_padded_malloc
av_fast_padded_mallocz
av_free_packet
av_get_audio_frame_duration
av_get_bits_per_sample
av_get_codec_tag_string
av_get_exact_bits_per_sample
av_get_pcm_codec
av_get_profile_name
av_grow_packet
av_hwaccel_next
av_hwdevice_ctx_init
av_hwdevice_ctx_alloc
av_hwdevice_ctx_create_derived
av_hwframe_transfer_get_formats
av_hwframe_ctx_alloc
av_init_packet
av_lockmgr_register
av_new_packet
av_packet_copy_props
av_packet_free_side_data
av_packet_from_data
av_packet_get_side_data
av_packet_merge_side_data
av_packet_move_ref
av_packet_new_side_data
av_packet_pack_dictionary
@ -49,21 +28,16 @@ av_packet_ref
av_packet_rescale_ts
av_packet_shrink_side_data
av_packet_side_data_name
av_packet_split_side_data
av_packet_unpack_dictionary
av_packet_unref
av_parser_change
av_parser_close
av_parser_init
av_parser_next
av_parser_parse2
#ifdef MOZ_LIBAV_FFT
av_rdft_calc
av_rdft_end
av_rdft_init
#endif
av_register_codec_parser
av_register_hwaccel
av_shrink_packet
av_vorbis_parse_frame
av_vorbis_parse_frame_flags
@ -77,10 +51,7 @@ avcodec_alloc_context3
avcodec_chroma_pos_to_enum
avcodec_close
avcodec_configuration
avcodec_copy_context
avcodec_decode_audio4
avcodec_decode_subtitle2
avcodec_decode_video2
avcodec_default_execute
avcodec_default_execute2
avcodec_default_get_buffer2
@ -97,7 +68,6 @@ avcodec_find_encoder_by_name
avcodec_flush_buffers
avcodec_free_context
avcodec_get_class
avcodec_get_context_defaults3
avcodec_get_frame_class
avcodec_get_hw_config
avcodec_get_name
@ -106,10 +76,12 @@ avcodec_get_type
avcodec_is_open
avcodec_license
avcodec_open2
avcodec_register
avcodec_register_all
avcodec_string
avcodec_version
avsubtitle_free
avcodec_send_packet
avcodec_receive_frame
ff_init_vlc_from_lengths
ff_init_vlc_sparse
ff_mpa_freq_tab
ff_mpa_bitrate_tab

Просмотреть файл

@ -22,13 +22,12 @@
#include <string.h>
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
#include "libavutil/rational.h"
#include "bytestream.h"
#include "internal.h"
#include "defs.h"
#include "packet.h"
#include "packet_internal.h"
@ -39,16 +38,14 @@ void av_init_packet(AVPacket *pkt)
pkt->dts = AV_NOPTS_VALUE;
pkt->pos = -1;
pkt->duration = 0;
#if FF_API_CONVERGENCE_DURATION
FF_DISABLE_DEPRECATION_WARNINGS
pkt->convergence_duration = 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->flags = 0;
pkt->stream_index = 0;
pkt->buf = NULL;
pkt->side_data = NULL;
pkt->side_data_elems = 0;
pkt->opaque = NULL;
pkt->opaque_ref = NULL;
pkt->time_base = av_make_q(0, 1);
}
#endif
@ -59,11 +56,12 @@ static void get_packet_defaults(AVPacket *pkt)
pkt->pts = AV_NOPTS_VALUE;
pkt->dts = AV_NOPTS_VALUE;
pkt->pos = -1;
pkt->time_base = av_make_q(0, 1);
}
AVPacket *av_packet_alloc(void)
{
AVPacket *pkt = av_mallocz(sizeof(AVPacket));
AVPacket *pkt = av_malloc(sizeof(AVPacket));
if (!pkt)
return pkt;
@ -142,7 +140,14 @@ int av_grow_packet(AVPacket *pkt, int grow_by)
if (new_size + data_offset > pkt->buf->size ||
!av_buffer_is_writable(pkt->buf)) {
int ret = av_buffer_realloc(&pkt->buf, new_size + data_offset);
int ret;
// allocate slightly more than requested to avoid excessive
// reallocations
if (new_size + data_offset < INT_MAX - new_size/16)
new_size += new_size/16;
ret = av_buffer_realloc(&pkt->buf, new_size + data_offset);
if (ret < 0) {
pkt->data = old_data;
return ret;
@ -179,108 +184,6 @@ int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size)
return 0;
}
#if FF_API_AVPACKET_OLD_API
FF_DISABLE_DEPRECATION_WARNINGS
#define ALLOC_MALLOC(data, size) data = av_malloc(size)
#define ALLOC_BUF(data, size) \
do { \
av_buffer_realloc(&pkt->buf, size); \
data = pkt->buf ? pkt->buf->data : NULL; \
} while (0)
#define DUP_DATA(dst, src, size, padding, ALLOC) \
do { \
void *data; \
if (padding) { \
if ((unsigned)(size) > \
(unsigned)(size) + AV_INPUT_BUFFER_PADDING_SIZE) \
goto failed_alloc; \
ALLOC(data, size + AV_INPUT_BUFFER_PADDING_SIZE); \
} else { \
ALLOC(data, size); \
} \
if (!data) \
goto failed_alloc; \
memcpy(data, src, size); \
if (padding) \
memset((uint8_t *)data + size, 0, \
AV_INPUT_BUFFER_PADDING_SIZE); \
dst = data; \
} while (0)
/* Makes duplicates of data, side_data, but does not copy any other fields */
static int copy_packet_data(AVPacket *pkt, const AVPacket *src, int dup)
{
pkt->data = NULL;
pkt->side_data = NULL;
pkt->side_data_elems = 0;
if (pkt->buf) {
AVBufferRef *ref = av_buffer_ref(src->buf);
if (!ref)
return AVERROR(ENOMEM);
pkt->buf = ref;
pkt->data = ref->data;
} else {
DUP_DATA(pkt->data, src->data, pkt->size, 1, ALLOC_BUF);
}
if (src->side_data_elems && dup) {
pkt->side_data = src->side_data;
pkt->side_data_elems = src->side_data_elems;
}
if (src->side_data_elems && !dup) {
return av_copy_packet_side_data(pkt, src);
}
return 0;
failed_alloc:
av_packet_unref(pkt);
return AVERROR(ENOMEM);
}
int av_copy_packet_side_data(AVPacket *pkt, const AVPacket *src)
{
if (src->side_data_elems) {
int i;
DUP_DATA(pkt->side_data, src->side_data,
src->side_data_elems * sizeof(*src->side_data), 0, ALLOC_MALLOC);
if (src != pkt) {
memset(pkt->side_data, 0,
src->side_data_elems * sizeof(*src->side_data));
}
for (i = 0; i < src->side_data_elems; i++) {
DUP_DATA(pkt->side_data[i].data, src->side_data[i].data,
src->side_data[i].size, 1, ALLOC_MALLOC);
pkt->side_data[i].size = src->side_data[i].size;
pkt->side_data[i].type = src->side_data[i].type;
}
}
pkt->side_data_elems = src->side_data_elems;
return 0;
failed_alloc:
av_packet_unref(pkt);
return AVERROR(ENOMEM);
}
int av_dup_packet(AVPacket *pkt)
{
AVPacket tmp_pkt;
if (!pkt->buf && pkt->data) {
tmp_pkt = *pkt;
return copy_packet_data(pkt, &tmp_pkt, 1);
}
return 0;
}
int av_copy_packet(AVPacket *dst, const AVPacket *src)
{
*dst = *src;
return copy_packet_data(dst, src, 0);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
void av_packet_free_side_data(AVPacket *pkt)
{
int i;
@ -290,22 +193,6 @@ void av_packet_free_side_data(AVPacket *pkt)
pkt->side_data_elems = 0;
}
#if FF_API_AVPACKET_OLD_API
FF_DISABLE_DEPRECATION_WARNINGS
void av_free_packet(AVPacket *pkt)
{
if (pkt) {
if (pkt->buf)
av_buffer_unref(&pkt->buf);
pkt->data = NULL;
pkt->size = 0;
av_packet_free_side_data(pkt);
}
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
uint8_t *data, size_t size)
{
@ -341,16 +228,12 @@ int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
uint8_t *av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
buffer_size_t size)
size_t size)
{
int ret;
uint8_t *data;
#if FF_API_BUFFER_SIZE_T
if ((unsigned)size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
#else
if (size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
#endif
return NULL;
data = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!data)
@ -366,7 +249,7 @@ uint8_t *av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
}
uint8_t *av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type,
buffer_size_t *size)
size_t *size)
{
int i;
@ -416,102 +299,12 @@ const char *av_packet_side_data_name(enum AVPacketSideDataType type)
case AV_PKT_DATA_ICC_PROFILE: return "ICC Profile";
case AV_PKT_DATA_DOVI_CONF: return "DOVI configuration record";
case AV_PKT_DATA_S12M_TIMECODE: return "SMPTE ST 12-1:2014 timecode";
case AV_PKT_DATA_DYNAMIC_HDR10_PLUS: return "HDR10+ Dynamic Metadata (SMPTE 2094-40)";
}
return NULL;
}
#if FF_API_MERGE_SD_API
#define FF_MERGE_MARKER 0x8c4d9d108e25e9feULL
int av_packet_merge_side_data(AVPacket *pkt){
if(pkt->side_data_elems){
AVBufferRef *buf;
int i;
uint8_t *p;
uint64_t size= pkt->size + 8LL + AV_INPUT_BUFFER_PADDING_SIZE;
AVPacket old= *pkt;
for (i=0; i<old.side_data_elems; i++) {
size += old.side_data[i].size + 5LL;
}
if (size > INT_MAX)
return AVERROR(EINVAL);
buf = av_buffer_alloc(size);
if (!buf)
return AVERROR(ENOMEM);
pkt->buf = buf;
pkt->data = p = buf->data;
pkt->size = size - AV_INPUT_BUFFER_PADDING_SIZE;
bytestream_put_buffer(&p, old.data, old.size);
for (i=old.side_data_elems-1; i>=0; i--) {
bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
bytestream_put_be32(&p, old.side_data[i].size);
*p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
}
bytestream_put_be64(&p, FF_MERGE_MARKER);
av_assert0(p-pkt->data == pkt->size);
memset(p, 0, AV_INPUT_BUFFER_PADDING_SIZE);
av_packet_unref(&old);
pkt->side_data_elems = 0;
pkt->side_data = NULL;
return 1;
}
return 0;
}
int av_packet_split_side_data(AVPacket *pkt){
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
int i;
unsigned int size;
uint8_t *p;
p = pkt->data + pkt->size - 8 - 5;
for (i=1; ; i++){
size = AV_RB32(p);
if (size>INT_MAX - 5 || p - pkt->data < size)
return 0;
if (p[4]&128)
break;
if (p - pkt->data < size + 5)
return 0;
p-= size+5;
}
if (i > AV_PKT_DATA_NB)
return AVERROR(ERANGE);
pkt->side_data = av_malloc_array(i, sizeof(*pkt->side_data));
if (!pkt->side_data)
return AVERROR(ENOMEM);
p= pkt->data + pkt->size - 8 - 5;
for (i=0; ; i++){
size= AV_RB32(p);
av_assert0(size<=INT_MAX - 5 && p - pkt->data >= size);
pkt->side_data[i].data = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
pkt->side_data[i].size = size;
pkt->side_data[i].type = p[4]&127;
if (!pkt->side_data[i].data)
return AVERROR(ENOMEM);
memcpy(pkt->side_data[i].data, p-size, size);
pkt->size -= size + 5;
if(p[4]&128)
break;
p-= size+5;
}
pkt->size -= 8;
pkt->side_data_elems = i+1;
return 1;
}
return 0;
}
#endif
#if FF_API_BUFFER_SIZE_T
uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size)
#else
uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
#endif
{
uint8_t *data = NULL;
*size = 0;
@ -530,11 +323,7 @@ uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
if (pass)
memcpy(data + total_length, str, len);
#if FF_API_BUFFER_SIZE_T
else if (len > INT_MAX - total_length)
#else
else if (len > SIZE_MAX - total_length)
#endif
return NULL;
total_length += len;
}
@ -550,12 +339,8 @@ uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
return data;
}
#if FF_API_BUFFER_SIZE_T
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
#else
int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
AVDictionary **dict)
#endif
{
const uint8_t *end;
int ret;
@ -582,7 +367,7 @@ int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
}
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
buffer_size_t size)
size_t size)
{
int i;
@ -599,29 +384,32 @@ int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
{
int i;
int i, ret;
dst->pts = src->pts;
dst->dts = src->dts;
dst->pos = src->pos;
dst->duration = src->duration;
#if FF_API_CONVERGENCE_DURATION
FF_DISABLE_DEPRECATION_WARNINGS
dst->convergence_duration = src->convergence_duration;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
dst->flags = src->flags;
dst->stream_index = src->stream_index;
dst->opaque = src->opaque;
dst->time_base = src->time_base;
dst->opaque_ref = NULL;
dst->side_data = NULL;
dst->side_data_elems = 0;
ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
if (ret < 0)
return ret;
for (i = 0; i < src->side_data_elems; i++) {
enum AVPacketSideDataType type = src->side_data[i].type;
buffer_size_t size = src->side_data[i].size;
size_t size = src->side_data[i].size;
uint8_t *src_data = src->side_data[i].data;
uint8_t *dst_data = av_packet_new_side_data(dst, type, size);
if (!dst_data) {
av_buffer_unref(&dst->opaque_ref);
av_packet_free_side_data(dst);
return AVERROR(ENOMEM);
}
@ -634,6 +422,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
void av_packet_unref(AVPacket *pkt)
{
av_packet_free_side_data(pkt);
av_buffer_unref(&pkt->opaque_ref);
av_buffer_unref(&pkt->buf);
get_packet_defaults(pkt);
}
@ -742,27 +531,21 @@ void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
pkt->dts = av_rescale_q(pkt->dts, src_tb, dst_tb);
if (pkt->duration > 0)
pkt->duration = av_rescale_q(pkt->duration, src_tb, dst_tb);
#if FF_API_CONVERGENCE_DURATION
FF_DISABLE_DEPRECATION_WARNINGS
if (pkt->convergence_duration > 0)
pkt->convergence_duration = av_rescale_q(pkt->convergence_duration, src_tb, dst_tb);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
int avpriv_packet_list_put(PacketList **packet_buffer,
PacketList **plast_pktl,
int avpriv_packet_list_put(PacketList *packet_buffer,
AVPacket *pkt,
int (*copy)(AVPacket *dst, const AVPacket *src),
int flags)
{
PacketList *pktl = av_mallocz(sizeof(PacketList));
PacketListEntry *pktl = av_malloc(sizeof(*pktl));
int ret;
if (!pktl)
return AVERROR(ENOMEM);
if (copy) {
get_packet_defaults(&pktl->pkt);
ret = copy(&pktl->pkt, pkt);
if (ret < 0) {
av_free(pktl);
@ -777,50 +560,49 @@ int avpriv_packet_list_put(PacketList **packet_buffer,
av_packet_move_ref(&pktl->pkt, pkt);
}
if (*packet_buffer)
(*plast_pktl)->next = pktl;
pktl->next = NULL;
if (packet_buffer->head)
packet_buffer->tail->next = pktl;
else
*packet_buffer = pktl;
packet_buffer->head = pktl;
/* Add the packet in the buffered packet list. */
*plast_pktl = pktl;
packet_buffer->tail = pktl;
return 0;
}
int avpriv_packet_list_get(PacketList **pkt_buffer,
PacketList **pkt_buffer_end,
int avpriv_packet_list_get(PacketList *pkt_buffer,
AVPacket *pkt)
{
PacketList *pktl;
if (!*pkt_buffer)
PacketListEntry *pktl = pkt_buffer->head;
if (!pktl)
return AVERROR(EAGAIN);
pktl = *pkt_buffer;
*pkt = pktl->pkt;
*pkt_buffer = pktl->next;
if (!pktl->next)
*pkt_buffer_end = NULL;
pkt_buffer->head = pktl->next;
if (!pkt_buffer->head)
pkt_buffer->tail = NULL;
av_freep(&pktl);
return 0;
}
void avpriv_packet_list_free(PacketList **pkt_buf, PacketList **pkt_buf_end)
void avpriv_packet_list_free(PacketList *pkt_buf)
{
PacketList *tmp = *pkt_buf;
PacketListEntry *tmp = pkt_buf->head;
while (tmp) {
PacketList *pktl = tmp;
PacketListEntry *pktl = tmp;
tmp = pktl->next;
av_packet_unref(&pktl->pkt);
av_freep(&pktl);
}
*pkt_buf = NULL;
*pkt_buf_end = NULL;
pkt_buf->head = pkt_buf->tail = NULL;
}
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
{
uint8_t *side_data;
buffer_size_t side_data_size;
size_t side_data_size;
int i;
side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, &side_data_size);
@ -846,7 +628,7 @@ int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp)
{
AVProducerReferenceTime *prft;
uint8_t *side_data;
buffer_size_t side_data_size;
size_t side_data_size;
side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &side_data_size);
if (!side_data) {

Просмотреть файл

@ -28,33 +28,13 @@
* bitstream api.
*/
#include <stdint.h>
#include <string.h>
#include "config.h"
#include "libavutil/avassert.h"
#include "libavutil/qsort.h"
#include "avcodec.h"
#include "internal.h"
#include "mathops.h"
#include "libavutil/intreadwrite.h"
#include "put_bits.h"
#include "vlc.h"
const uint8_t ff_log2_run[41]={
0, 0, 0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 3, 3, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7,
8, 9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,
24,
};
#if FF_API_AVPRIV_PUT_BITS
void avpriv_align_put_bits(PutBitContext *s)
{
align_put_bits(s);
}
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
{
ff_copy_bits(pb, src, length);
}
#endif
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
{
@ -90,345 +70,3 @@ void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
put_bits(pb, bits, AV_RB16(src + 2 * words) >> (16 - bits));
}
/* VLC decoding */
#define GET_DATA(v, table, i, wrap, size) \
{ \
const uint8_t *ptr = (const uint8_t *)table + i * wrap; \
switch(size) { \
case 1: \
v = *(const uint8_t *)ptr; \
break; \
case 2: \
v = *(const uint16_t *)ptr; \
break; \
case 4: \
default: \
av_assert1(size == 4); \
v = *(const uint32_t *)ptr; \
break; \
} \
}
static int alloc_table(VLC *vlc, int size, int use_static)
{
int index = vlc->table_size;
vlc->table_size += size;
if (vlc->table_size > vlc->table_allocated) {
if (use_static)
abort(); // cannot do anything, init_vlc() is used with too little memory
vlc->table_allocated += (1 << vlc->bits);
vlc->table = av_realloc_f(vlc->table, vlc->table_allocated, sizeof(VLC_TYPE) * 2);
if (!vlc->table) {
vlc->table_allocated = 0;
vlc->table_size = 0;
return AVERROR(ENOMEM);
}
memset(vlc->table + vlc->table_allocated - (1 << vlc->bits), 0, sizeof(VLC_TYPE) * 2 << vlc->bits);
}
return index;
}
#define LOCALBUF_ELEMS 1500 // the maximum currently needed is 1296 by rv34
typedef struct VLCcode {
uint8_t bits;
VLC_TYPE symbol;
/** codeword, with the first bit-to-be-read in the msb
* (even if intended for a little-endian bitstream reader) */
uint32_t code;
} VLCcode;
static int vlc_common_init(VLC *vlc_arg, int nb_bits, int nb_codes,
VLC **vlc, VLC *localvlc, VLCcode **buf,
int flags)
{
*vlc = vlc_arg;
(*vlc)->bits = nb_bits;
if (flags & INIT_VLC_USE_NEW_STATIC) {
av_assert0(nb_codes <= LOCALBUF_ELEMS);
*localvlc = *vlc_arg;
*vlc = localvlc;
(*vlc)->table_size = 0;
} else {
(*vlc)->table = NULL;
(*vlc)->table_allocated = 0;
(*vlc)->table_size = 0;
}
if (nb_codes > LOCALBUF_ELEMS) {
*buf = av_malloc_array(nb_codes, sizeof(VLCcode));
if (!*buf)
return AVERROR(ENOMEM);
}
return 0;
}
static int compare_vlcspec(const void *a, const void *b)
{
const VLCcode *sa = a, *sb = b;
return (sa->code >> 1) - (sb->code >> 1);
}
/**
* Build VLC decoding tables suitable for use with get_vlc().
*
* @param vlc the context to be initialized
*
* @param table_nb_bits max length of vlc codes to store directly in this table
* (Longer codes are delegated to subtables.)
*
* @param nb_codes number of elements in codes[]
*
* @param codes descriptions of the vlc codes
* These must be ordered such that codes going into the same subtable are contiguous.
* Sorting by VLCcode.code is sufficient, though not necessary.
*/
static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
VLCcode *codes, int flags)
{
int table_size, table_index, index, code_prefix, symbol, subtable_bits;
int i, j, k, n, nb, inc;
uint32_t code;
volatile VLC_TYPE (* volatile table)[2]; // the double volatile is needed to prevent an internal compiler error in gcc 4.2
if (table_nb_bits > 30)
return AVERROR(EINVAL);
table_size = 1 << table_nb_bits;
table_index = alloc_table(vlc, table_size, flags & INIT_VLC_USE_NEW_STATIC);
ff_dlog(NULL, "new table index=%d size=%d\n", table_index, table_size);
if (table_index < 0)
return table_index;
table = (volatile VLC_TYPE (*)[2])&vlc->table[table_index];
/* first pass: map codes and compute auxiliary table sizes */
for (i = 0; i < nb_codes; i++) {
n = codes[i].bits;
code = codes[i].code;
symbol = codes[i].symbol;
ff_dlog(NULL, "i=%d n=%d code=0x%"PRIx32"\n", i, n, code);
if (n <= table_nb_bits) {
/* no need to add another table */
j = code >> (32 - table_nb_bits);
nb = 1 << (table_nb_bits - n);
inc = 1;
if (flags & INIT_VLC_OUTPUT_LE) {
j = bitswap_32(code);
inc = 1 << n;
}
for (k = 0; k < nb; k++) {
int bits = table[j][1];
int oldsym = table[j][0];
ff_dlog(NULL, "%4x: code=%d n=%d\n", j, i, n);
if ((bits || oldsym) && (bits != n || oldsym != symbol)) {
av_log(NULL, AV_LOG_ERROR, "incorrect codes\n");
return AVERROR_INVALIDDATA;
}
table[j][1] = n; //bits
table[j][0] = symbol;
j += inc;
}
} else {
/* fill auxiliary table recursively */
n -= table_nb_bits;
code_prefix = code >> (32 - table_nb_bits);
subtable_bits = n;
codes[i].bits = n;
codes[i].code = code << table_nb_bits;
for (k = i+1; k < nb_codes; k++) {
n = codes[k].bits - table_nb_bits;
if (n <= 0)
break;
code = codes[k].code;
if (code >> (32 - table_nb_bits) != code_prefix)
break;
codes[k].bits = n;
codes[k].code = code << table_nb_bits;
subtable_bits = FFMAX(subtable_bits, n);
}
subtable_bits = FFMIN(subtable_bits, table_nb_bits);
j = (flags & INIT_VLC_OUTPUT_LE) ? bitswap_32(code_prefix) >> (32 - table_nb_bits) : code_prefix;
table[j][1] = -subtable_bits;
ff_dlog(NULL, "%4x: n=%d (subtable)\n",
j, codes[i].bits + table_nb_bits);
index = build_table(vlc, subtable_bits, k-i, codes+i, flags);
if (index < 0)
return index;
/* note: realloc has been done, so reload tables */
table = (volatile VLC_TYPE (*)[2])&vlc->table[table_index];
table[j][0] = index; //code
if (table[j][0] != index) {
avpriv_request_sample(NULL, "strange codes");
return AVERROR_PATCHWELCOME;
}
i = k-1;
}
}
for (i = 0; i < table_size; i++) {
if (table[i][1] == 0) //bits
table[i][0] = -1; //codes
}
return table_index;
}
static int vlc_common_end(VLC *vlc, int nb_bits, int nb_codes, VLCcode *codes,
int flags, VLC *vlc_arg, VLCcode localbuf[LOCALBUF_ELEMS])
{
int ret = build_table(vlc, nb_bits, nb_codes, codes, flags);
if (flags & INIT_VLC_USE_NEW_STATIC) {
if (vlc->table_size != vlc->table_allocated &&
!(flags & (INIT_VLC_STATIC_OVERLONG & ~INIT_VLC_USE_NEW_STATIC)))
av_log(NULL, AV_LOG_ERROR, "needed %d had %d\n", vlc->table_size, vlc->table_allocated);
av_assert0(ret >= 0);
*vlc_arg = *vlc;
} else {
if (codes != localbuf)
av_free(codes);
if (ret < 0) {
av_freep(&vlc->table);
return ret;
}
}
return 0;
}
/* Build VLC decoding tables suitable for use with get_vlc().
'nb_bits' sets the decoding table size (2^nb_bits) entries. The
bigger it is, the faster is the decoding. But it should not be too
big to save memory and L1 cache. '9' is a good compromise.
'nb_codes' : number of vlcs codes
'bits' : table which gives the size (in bits) of each vlc code.
'codes' : table which gives the bit pattern of of each vlc code.
'symbols' : table which gives the values to be returned from get_vlc().
'xxx_wrap' : give the number of bytes between each entry of the
'bits' or 'codes' tables.
'xxx_size' : gives the number of bytes of each entry of the 'bits'
or 'codes' tables. Currently 1,2 and 4 are supported.
'wrap' and 'size' make it possible to use any memory configuration and types
(byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
*/
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes,
const void *bits, int bits_wrap, int bits_size,
const void *codes, int codes_wrap, int codes_size,
const void *symbols, int symbols_wrap, int symbols_size,
int flags)
{
VLCcode localbuf[LOCALBUF_ELEMS], *buf = localbuf;
int i, j, ret;
VLC localvlc, *vlc;
ret = vlc_common_init(vlc_arg, nb_bits, nb_codes, &vlc, &localvlc,
&buf, flags);
if (ret < 0)
return ret;
av_assert0(symbols_size <= 2 || !symbols);
j = 0;
#define COPY(condition)\
for (i = 0; i < nb_codes; i++) { \
unsigned len; \
GET_DATA(len, bits, i, bits_wrap, bits_size); \
if (!(condition)) \
continue; \
if (len > 3*nb_bits || len > 32) { \
av_log(NULL, AV_LOG_ERROR, "Too long VLC (%u) in init_vlc\n", len);\
if (buf != localbuf) \
av_free(buf); \
return AVERROR(EINVAL); \
} \
buf[j].bits = len; \
GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size); \
if (buf[j].code >= (1LL<<buf[j].bits)) { \
av_log(NULL, AV_LOG_ERROR, "Invalid code %"PRIx32" for %d in " \
"init_vlc\n", buf[j].code, i); \
if (buf != localbuf) \
av_free(buf); \
return AVERROR(EINVAL); \
} \
if (flags & INIT_VLC_INPUT_LE) \
buf[j].code = bitswap_32(buf[j].code); \
else \
buf[j].code <<= 32 - buf[j].bits; \
if (symbols) \
GET_DATA(buf[j].symbol, symbols, i, symbols_wrap, symbols_size) \
else \
buf[j].symbol = i; \
j++; \
}
COPY(len > nb_bits);
// qsort is the slowest part of init_vlc, and could probably be improved or avoided
AV_QSORT(buf, j, struct VLCcode, compare_vlcspec);
COPY(len && len <= nb_bits);
nb_codes = j;
return vlc_common_end(vlc, nb_bits, nb_codes, buf,
flags, vlc_arg, localbuf);
}
int ff_init_vlc_from_lengths(VLC *vlc_arg, int nb_bits, int nb_codes,
const int8_t *lens, int lens_wrap,
const void *symbols, int symbols_wrap, int symbols_size,
int offset, int flags, void *logctx)
{
VLCcode localbuf[LOCALBUF_ELEMS], *buf = localbuf;
VLC localvlc, *vlc;
uint64_t code;
int ret, j, len_max = FFMIN(32, 3 * nb_bits);
ret = vlc_common_init(vlc_arg, nb_bits, nb_codes, &vlc, &localvlc,
&buf, flags);
if (ret < 0)
return ret;
j = code = 0;
for (int i = 0; i < nb_codes; i++, lens += lens_wrap) {
int len = *lens;
if (len > 0) {
unsigned sym;
buf[j].bits = len;
if (symbols)
GET_DATA(sym, symbols, i, symbols_wrap, symbols_size)
else
sym = i;
buf[j].symbol = sym + offset;
buf[j++].code = code;
} else if (len < 0) {
len = -len;
} else
continue;
if (len > len_max || code & ((1U << (32 - len)) - 1)) {
av_log(logctx, AV_LOG_ERROR, "Invalid VLC (length %u)\n", len);
goto fail;
}
code += 1U << (32 - len);
if (code > UINT32_MAX + 1ULL) {
av_log(logctx, AV_LOG_ERROR, "Overdetermined VLC tree\n");
goto fail;
}
}
return vlc_common_end(vlc, nb_bits, j, buf,
flags, vlc_arg, localbuf);
fail:
if (buf != localbuf)
av_free(buf);
return AVERROR_INVALIDDATA;
}
void ff_free_vlc(VLC *vlc)
{
av_freep(&vlc->table);
}

Просмотреть файл

@ -1,185 +0,0 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <string.h>
#include "avcodec.h"
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#if FF_API_OLD_BSF
FF_DISABLE_DEPRECATION_WARNINGS
const AVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f)
{
const AVBitStreamFilter *filter = NULL;
void *opaque = NULL;
while (filter != f)
filter = av_bsf_iterate(&opaque);
return av_bsf_iterate(&opaque);
}
void av_register_bitstream_filter(AVBitStreamFilter *bsf)
{
}
typedef struct BSFCompatContext {
AVBSFContext *ctx;
int extradata_updated;
} BSFCompatContext;
AVBitStreamFilterContext *av_bitstream_filter_init(const char *name)
{
AVBitStreamFilterContext *ctx = NULL;
BSFCompatContext *priv = NULL;
const AVBitStreamFilter *bsf;
bsf = av_bsf_get_by_name(name);
if (!bsf)
return NULL;
ctx = av_mallocz(sizeof(*ctx));
if (!ctx)
return NULL;
priv = av_mallocz(sizeof(*priv));
if (!priv)
goto fail;
ctx->filter = bsf;
ctx->priv_data = priv;
return ctx;
fail:
if (priv)
av_bsf_free(&priv->ctx);
av_freep(&priv);
av_freep(&ctx);
return NULL;
}
void av_bitstream_filter_close(AVBitStreamFilterContext *bsfc)
{
BSFCompatContext *priv;
if (!bsfc)
return;
priv = bsfc->priv_data;
av_bsf_free(&priv->ctx);
av_freep(&bsfc->priv_data);
av_free(bsfc);
}
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
AVCodecContext *avctx, const char *args,
uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size, int keyframe)
{
BSFCompatContext *priv = bsfc->priv_data;
AVPacket pkt = { 0 };
int ret;
if (!priv->ctx) {
ret = av_bsf_alloc(bsfc->filter, &priv->ctx);
if (ret < 0)
return ret;
ret = avcodec_parameters_from_context(priv->ctx->par_in, avctx);
if (ret < 0)
return ret;
priv->ctx->time_base_in = avctx->time_base;
if (bsfc->args && bsfc->filter->priv_class) {
const AVOption *opt = av_opt_next(priv->ctx->priv_data, NULL);
const char * shorthand[2] = {NULL};
if (opt)
shorthand[0] = opt->name;
ret = av_opt_set_from_string(priv->ctx->priv_data, bsfc->args, shorthand, "=", ":");
if (ret < 0)
return ret;
}
ret = av_bsf_init(priv->ctx);
if (ret < 0)
return ret;
}
pkt.data = (uint8_t *)buf;
pkt.size = buf_size;
ret = av_bsf_send_packet(priv->ctx, &pkt);
if (ret < 0)
return ret;
*poutbuf = NULL;
*poutbuf_size = 0;
ret = av_bsf_receive_packet(priv->ctx, &pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return 0;
else if (ret < 0)
return ret;
*poutbuf = av_malloc(pkt.size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!*poutbuf) {
av_packet_unref(&pkt);
return AVERROR(ENOMEM);
}
*poutbuf_size = pkt.size;
memcpy(*poutbuf, pkt.data, pkt.size);
av_packet_unref(&pkt);
/* drain all the remaining packets we cannot return */
while (ret >= 0) {
ret = av_bsf_receive_packet(priv->ctx, &pkt);
av_packet_unref(&pkt);
}
if (!priv->extradata_updated) {
/* update extradata in avctx from the output codec parameters */
if (priv->ctx->par_out->extradata_size && (!args || !strstr(args, "private_spspps_buf"))) {
av_freep(&avctx->extradata);
avctx->extradata_size = 0;
avctx->extradata = av_mallocz(priv->ctx->par_out->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata)
return AVERROR(ENOMEM);
memcpy(avctx->extradata, priv->ctx->par_out->extradata, priv->ctx->par_out->extradata_size);
avctx->extradata_size = priv->ctx->par_out->extradata_size;
}
priv->extradata_updated = 1;
}
return 1;
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif

Просмотреть файл

@ -16,71 +16,68 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdint.h>
#include <string.h>
#include "libavutil/common.h"
#include "libavutil/log.h"
#include "avcodec.h"
#include "bsf.h"
#include "bsf_internal.h"
extern const AVBitStreamFilter ff_aac_adtstoasc_bsf;
extern const AVBitStreamFilter ff_av1_frame_merge_bsf;
extern const AVBitStreamFilter ff_av1_frame_split_bsf;
extern const AVBitStreamFilter ff_av1_metadata_bsf;
extern const AVBitStreamFilter ff_chomp_bsf;
extern const AVBitStreamFilter ff_dump_extradata_bsf;
extern const AVBitStreamFilter ff_dca_core_bsf;
extern const AVBitStreamFilter ff_eac3_core_bsf;
extern const AVBitStreamFilter ff_extract_extradata_bsf;
extern const AVBitStreamFilter ff_filter_units_bsf;
extern const AVBitStreamFilter ff_h264_metadata_bsf;
extern const AVBitStreamFilter ff_h264_mp4toannexb_bsf;
extern const AVBitStreamFilter ff_h264_redundant_pps_bsf;
extern const AVBitStreamFilter ff_hapqa_extract_bsf;
extern const AVBitStreamFilter ff_hevc_metadata_bsf;
extern const AVBitStreamFilter ff_hevc_mp4toannexb_bsf;
extern const AVBitStreamFilter ff_imx_dump_header_bsf;
extern const AVBitStreamFilter ff_mjpeg2jpeg_bsf;
extern const AVBitStreamFilter ff_mjpega_dump_header_bsf;
extern const AVBitStreamFilter ff_mp3_header_decompress_bsf;
extern const AVBitStreamFilter ff_mpeg2_metadata_bsf;
extern const AVBitStreamFilter ff_mpeg4_unpack_bframes_bsf;
extern const AVBitStreamFilter ff_mov2textsub_bsf;
extern const AVBitStreamFilter ff_noise_bsf;
extern const AVBitStreamFilter ff_null_bsf;
extern const AVBitStreamFilter ff_opus_metadata_bsf;
extern const AVBitStreamFilter ff_pcm_rechunk_bsf;
extern const AVBitStreamFilter ff_prores_metadata_bsf;
extern const AVBitStreamFilter ff_remove_extradata_bsf;
extern const AVBitStreamFilter ff_setts_bsf;
extern const AVBitStreamFilter ff_text2movsub_bsf;
extern const AVBitStreamFilter ff_trace_headers_bsf;
extern const AVBitStreamFilter ff_truehd_core_bsf;
extern const AVBitStreamFilter ff_vp9_metadata_bsf;
extern const AVBitStreamFilter ff_vp9_raw_reorder_bsf;
extern const AVBitStreamFilter ff_vp9_superframe_bsf;
extern const AVBitStreamFilter ff_vp9_superframe_split_bsf;
extern const FFBitStreamFilter ff_aac_adtstoasc_bsf;
extern const FFBitStreamFilter ff_av1_frame_merge_bsf;
extern const FFBitStreamFilter ff_av1_frame_split_bsf;
extern const FFBitStreamFilter ff_av1_metadata_bsf;
extern const FFBitStreamFilter ff_chomp_bsf;
extern const FFBitStreamFilter ff_dump_extradata_bsf;
extern const FFBitStreamFilter ff_dca_core_bsf;
extern const FFBitStreamFilter ff_dv_error_marker_bsf;
extern const FFBitStreamFilter ff_eac3_core_bsf;
extern const FFBitStreamFilter ff_extract_extradata_bsf;
extern const FFBitStreamFilter ff_filter_units_bsf;
extern const FFBitStreamFilter ff_h264_metadata_bsf;
extern const FFBitStreamFilter ff_h264_mp4toannexb_bsf;
extern const FFBitStreamFilter ff_h264_redundant_pps_bsf;
extern const FFBitStreamFilter ff_hapqa_extract_bsf;
extern const FFBitStreamFilter ff_hevc_metadata_bsf;
extern const FFBitStreamFilter ff_hevc_mp4toannexb_bsf;
extern const FFBitStreamFilter ff_imx_dump_header_bsf;
extern const FFBitStreamFilter ff_mjpeg2jpeg_bsf;
extern const FFBitStreamFilter ff_mjpega_dump_header_bsf;
extern const FFBitStreamFilter ff_mp3_header_decompress_bsf;
extern const FFBitStreamFilter ff_mpeg2_metadata_bsf;
extern const FFBitStreamFilter ff_mpeg4_unpack_bframes_bsf;
extern const FFBitStreamFilter ff_mov2textsub_bsf;
extern const FFBitStreamFilter ff_noise_bsf;
extern const FFBitStreamFilter ff_null_bsf;
extern const FFBitStreamFilter ff_opus_metadata_bsf;
extern const FFBitStreamFilter ff_pcm_rechunk_bsf;
extern const FFBitStreamFilter ff_pgs_frame_merge_bsf;
extern const FFBitStreamFilter ff_prores_metadata_bsf;
extern const FFBitStreamFilter ff_remove_extradata_bsf;
extern const FFBitStreamFilter ff_setts_bsf;
extern const FFBitStreamFilter ff_text2movsub_bsf;
extern const FFBitStreamFilter ff_trace_headers_bsf;
extern const FFBitStreamFilter ff_truehd_core_bsf;
extern const FFBitStreamFilter ff_vp9_metadata_bsf;
extern const FFBitStreamFilter ff_vp9_raw_reorder_bsf;
extern const FFBitStreamFilter ff_vp9_superframe_bsf;
extern const FFBitStreamFilter ff_vp9_superframe_split_bsf;
#include "libavcodec/bsf_list.c"
const AVBitStreamFilter *av_bsf_iterate(void **opaque)
{
uintptr_t i = (uintptr_t)*opaque;
const AVBitStreamFilter *f = bitstream_filters[i];
const FFBitStreamFilter *f = bitstream_filters[i];
if (f)
if (f) {
*opaque = (void*)(i + 1);
return f;
return &f->p;
}
return NULL;
}
#if FF_API_NEXT
const AVBitStreamFilter *av_bsf_next(void **opaque) {
return av_bsf_iterate(opaque);
}
#endif
const AVBitStreamFilter *av_bsf_get_by_name(const char *name)
{
const AVBitStreamFilter *f = NULL;
@ -97,28 +94,6 @@ const AVBitStreamFilter *av_bsf_get_by_name(const char *name)
return NULL;
}
#if FF_API_CHILD_CLASS_NEXT
const AVClass *ff_bsf_child_class_next(const AVClass *prev)
{
const AVBitStreamFilter *f = NULL;
void *i = 0;
/* find the filter that corresponds to prev */
while (prev && (f = av_bsf_iterate(&i))) {
if (f->priv_class == prev) {
break;
}
}
/* find next filter with priv options */
while ((f = av_bsf_iterate(&i))) {
if (f->priv_class)
return f->priv_class;
}
return NULL;
}
#endif
const AVClass *ff_bsf_child_class_iterate(void **opaque)
{
const AVBitStreamFilter *f;

Просмотреть файл

@ -23,7 +23,6 @@
#include <stdint.h>
#include "avcodec.h"
#include "version.h"
/* add and put pixel (decoding)
* Block sizes for op_pixels_func are 8x4,8x8 16x8 16x16.

Просмотреть файл

@ -18,6 +18,8 @@
#include <string.h>
#include "config_components.h"
#include "libavutil/avassert.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
@ -32,29 +34,40 @@
#define IS_EMPTY(pkt) (!(pkt)->data && !(pkt)->side_data_elems)
struct AVBSFInternal {
static av_always_inline const FFBitStreamFilter *ff_bsf(const AVBitStreamFilter *bsf)
{
return (const FFBitStreamFilter*)bsf;
}
typedef struct FFBSFContext {
AVBSFContext pub;
AVPacket *buffer_pkt;
int eof;
};
} FFBSFContext;
static av_always_inline FFBSFContext *ffbsfcontext(AVBSFContext *ctx)
{
return (FFBSFContext *)ctx;
}
void av_bsf_free(AVBSFContext **pctx)
{
AVBSFContext *ctx;
FFBSFContext *bsfi;
if (!pctx || !*pctx)
return;
ctx = *pctx;
ctx = *pctx;
bsfi = ffbsfcontext(ctx);
if (ctx->internal) {
if (ctx->filter->close)
ctx->filter->close(ctx);
av_packet_free(&ctx->internal->buffer_pkt);
av_freep(&ctx->internal);
if (ctx->priv_data) {
if (ff_bsf(ctx->filter)->close)
ff_bsf(ctx->filter)->close(ctx);
if (ctx->filter->priv_class)
av_opt_free(ctx->priv_data);
av_freep(&ctx->priv_data);
}
if (ctx->filter->priv_class && ctx->priv_data)
av_opt_free(ctx->priv_data);
av_freep(&ctx->priv_data);
av_packet_free(&bsfi->buffer_pkt);
avcodec_parameters_free(&ctx->par_in);
avcodec_parameters_free(&ctx->par_out);
@ -80,9 +93,6 @@ static const AVClass bsf_class = {
.item_name = bsf_to_name,
.version = LIBAVUTIL_VERSION_INT,
.child_next = bsf_child_next,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = ff_bsf_child_class_next,
#endif
.child_class_iterate = ff_bsf_child_class_iterate,
.category = AV_CLASS_CATEGORY_BITSTREAM_FILTER,
};
@ -95,12 +105,13 @@ const AVClass *av_bsf_get_class(void)
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
{
AVBSFContext *ctx;
AVBSFInternal *bsfi;
FFBSFContext *bsfi;
int ret;
ctx = av_mallocz(sizeof(*ctx));
if (!ctx)
bsfi = av_mallocz(sizeof(*bsfi));
if (!bsfi)
return AVERROR(ENOMEM);
ctx = &bsfi->pub;
ctx->av_class = &bsf_class;
ctx->filter = filter;
@ -112,8 +123,8 @@ int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
goto fail;
}
/* allocate priv data and init private options */
if (filter->priv_data_size) {
ctx->priv_data = av_mallocz(filter->priv_data_size);
if (ff_bsf(filter)->priv_data_size) {
ctx->priv_data = av_mallocz(ff_bsf(filter)->priv_data_size);
if (!ctx->priv_data) {
ret = AVERROR(ENOMEM);
goto fail;
@ -123,15 +134,6 @@ int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
av_opt_set_defaults(ctx->priv_data);
}
}
/* Allocate AVBSFInternal; must happen after priv_data has been allocated
* so that a filter->close needing priv_data is never called without. */
bsfi = av_mallocz(sizeof(*bsfi));
if (!bsfi) {
ret = AVERROR(ENOMEM);
goto fail;
}
ctx->internal = bsfi;
bsfi->buffer_pkt = av_packet_alloc();
if (!bsfi->buffer_pkt) {
ret = AVERROR(ENOMEM);
@ -160,9 +162,9 @@ int av_bsf_init(AVBSFContext *ctx)
"bitstream filter '%s'. Supported codecs are: ",
desc ? desc->name : "unknown", ctx->par_in->codec_id, ctx->filter->name);
for (i = 0; ctx->filter->codec_ids[i] != AV_CODEC_ID_NONE; i++) {
desc = avcodec_descriptor_get(ctx->filter->codec_ids[i]);
enum AVCodecID codec_id = ctx->filter->codec_ids[i];
av_log(ctx, AV_LOG_ERROR, "%s (%d) ",
desc ? desc->name : "unknown", ctx->filter->codec_ids[i]);
avcodec_get_name(codec_id), codec_id);
}
av_log(ctx, AV_LOG_ERROR, "\n");
return AVERROR(EINVAL);
@ -177,8 +179,8 @@ int av_bsf_init(AVBSFContext *ctx)
ctx->time_base_out = ctx->time_base_in;
if (ctx->filter->init) {
ret = ctx->filter->init(ctx);
if (ff_bsf(ctx->filter)->init) {
ret = ff_bsf(ctx->filter)->init(ctx);
if (ret < 0)
return ret;
}
@ -188,22 +190,24 @@ int av_bsf_init(AVBSFContext *ctx)
void av_bsf_flush(AVBSFContext *ctx)
{
AVBSFInternal *bsfi = ctx->internal;
FFBSFContext *const bsfi = ffbsfcontext(ctx);
bsfi->eof = 0;
av_packet_unref(bsfi->buffer_pkt);
if (ctx->filter->flush)
ctx->filter->flush(ctx);
if (ff_bsf(ctx->filter)->flush)
ff_bsf(ctx->filter)->flush(ctx);
}
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
{
AVBSFInternal *bsfi = ctx->internal;
FFBSFContext *const bsfi = ffbsfcontext(ctx);
int ret;
if (!pkt || IS_EMPTY(pkt)) {
if (pkt)
av_packet_unref(pkt);
bsfi->eof = 1;
return 0;
}
@ -226,12 +230,12 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
{
return ctx->filter->filter(ctx, pkt);
return ff_bsf(ctx->filter)->filter(ctx, pkt);
}
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
{
AVBSFInternal *bsfi = ctx->internal;
FFBSFContext *const bsfi = ffbsfcontext(ctx);
AVPacket *tmp_pkt;
if (bsfi->eof)
@ -252,7 +256,7 @@ int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
{
AVBSFInternal *bsfi = ctx->internal;
FFBSFContext *const bsfi = ffbsfcontext(ctx);
if (bsfi->eof)
return AVERROR_EOF;
@ -399,10 +403,10 @@ static const AVClass bsf_list_class = {
.version = LIBAVUTIL_VERSION_INT,
};
const AVBitStreamFilter ff_list_bsf = {
.name = "bsf_list",
static const FFBitStreamFilter list_bsf = {
.p.name = "bsf_list",
.p.priv_class = &bsf_list_class,
.priv_data_size = sizeof(BSFListContext),
.priv_class = &bsf_list_class,
.init = bsf_list_init,
.filter = bsf_list_filter,
.flush = bsf_list_flush,
@ -495,7 +499,7 @@ int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf)
goto end;
}
ret = av_bsf_alloc(&ff_list_bsf, bsf);
ret = av_bsf_alloc(&list_bsf.p, bsf);
if (ret < 0)
return ret;
@ -523,7 +527,6 @@ static int bsf_parse_single(char *str, AVBSFList *bsf_lst)
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
{
AVBSFList *lst;
char *bsf_str, *buf, *dup, *saveptr;
int ret;
if (!str)
@ -533,28 +536,27 @@ int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
if (!lst)
return AVERROR(ENOMEM);
if (!(dup = buf = av_strdup(str))) {
ret = AVERROR(ENOMEM);
goto end;
}
while (bsf_str = av_strtok(buf, ",", &saveptr)) {
do {
char *bsf_str = av_get_token(&str, ",");
ret = bsf_parse_single(bsf_str, lst);
av_free(bsf_str);
if (ret < 0)
goto end;
buf = NULL;
}
} while (*str && *++str);
ret = av_bsf_list_finalize(&lst, bsf_lst);
end:
if (ret < 0)
av_bsf_list_free(&lst);
av_free(dup);
return ret;
}
int av_bsf_get_null_filter(AVBSFContext **bsf)
{
return av_bsf_alloc(&ff_list_bsf, bsf);
#if CONFIG_NULL_BSF
extern const FFBitStreamFilter ff_null_bsf;
return av_bsf_alloc(&ff_null_bsf.p, bsf);
#else
return av_bsf_alloc(&list_bsf.p, bsf);
#endif
}

Просмотреть файл

@ -30,12 +30,31 @@
#include "packet.h"
/**
* @addtogroup lavc_core
* @defgroup lavc_bsf Bitstream filters
* @ingroup libavc
*
* Bitstream filters transform encoded media data without decoding it. This
* allows e.g. manipulating various header values. Bitstream filters operate on
* @ref AVPacket "AVPackets".
*
* The bitstream filtering API is centered around two structures:
* AVBitStreamFilter and AVBSFContext. The former represents a bitstream filter
* in abstract, the latter a specific filtering process. Obtain an
* AVBitStreamFilter using av_bsf_get_by_name() or av_bsf_iterate(), then pass
* it to av_bsf_alloc() to create an AVBSFContext. Fill in the user-settable
* AVBSFContext fields, as described in its documentation, then call
* av_bsf_init() to prepare the filter context for use.
*
* Submit packets for filtering using av_bsf_send_packet(), obtain filtered
* results with av_bsf_receive_packet(). When no more input packets will be
* sent, submit a NULL AVPacket to signal the end of the stream to the filter.
* av_bsf_receive_packet() will then return trailing packets, if any are
* produced by the filter.
*
* Finally, free the filter context with av_bsf_free().
* @{
*/
typedef struct AVBSFInternal AVBSFInternal;
/**
* The bitstream filter state.
*
@ -57,12 +76,6 @@ typedef struct AVBSFContext {
*/
const struct AVBitStreamFilter *filter;
/**
* Opaque libavcodec internal data. Must not be touched by the caller in any
* way.
*/
AVBSFInternal *internal;
/**
* Opaque filter-specific private data. If filter->priv_class is non-NULL,
* this is an AVOptions-enabled struct.
@ -115,20 +128,6 @@ typedef struct AVBitStreamFilter {
* code to this class.
*/
const AVClass *priv_class;
/*****************************************************************
* No fields below this line are part of the public API. They
* may not be used outside of libavcodec and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
int priv_data_size;
int (*init)(AVBSFContext *ctx);
int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
void (*close)(AVBSFContext *ctx);
void (*flush)(AVBSFContext *ctx);
} AVBitStreamFilter;
/**
@ -154,9 +153,9 @@ const AVBitStreamFilter *av_bsf_iterate(void **opaque);
* av_bsf_init() before sending any data to the filter.
*
* @param filter the filter for which to allocate an instance.
* @param ctx a pointer into which the pointer to the newly-allocated context
* will be written. It must be freed with av_bsf_free() after the
* filtering is done.
* @param[out] ctx a pointer into which the pointer to the newly-allocated context
* will be written. It must be freed with av_bsf_free() after the
* filtering is done.
*
* @return 0 on success, a negative AVERROR code on failure
*/
@ -182,9 +181,11 @@ int av_bsf_init(AVBSFContext *ctx);
* sending more empty packets does nothing) and will cause the filter to output
* any packets it may have buffered internally.
*
* @return 0 on success. AVERROR(EAGAIN) if packets need to be retrieved from the
* filter (using av_bsf_receive_packet()) before new input can be consumed. Another
* negative AVERROR value if an error occurs.
* @return
* - 0 on success.
* - AVERROR(EAGAIN) if packets need to be retrieved from the filter (using
* av_bsf_receive_packet()) before new input can be consumed.
* - Another negative AVERROR value if an error occurs.
*/
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
@ -201,10 +202,12 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
* overwritten by the returned data. On failure, pkt is not
* touched.
*
* @return 0 on success. AVERROR(EAGAIN) if more packets need to be sent to the
* filter (using av_bsf_send_packet()) to get more output. AVERROR_EOF if there
* will be no further output from the filter. Another negative AVERROR value if
* an error occurs.
* @return
* - 0 on success.
* - AVERROR(EAGAIN) if more packets need to be sent to the filter (using
* av_bsf_send_packet()) to get more output.
* - AVERROR_EOF if there will be no further output from the filter.
* - Another negative AVERROR value if an error occurs.
*
* @note one input packet may result in several output packets, so after sending
* a packet with av_bsf_send_packet(), this function needs to be called

Просмотреть файл

@ -24,6 +24,19 @@
#include "bsf.h"
#include "packet.h"
typedef struct FFBitStreamFilter {
/**
* The public AVBitStreamFilter. See bsf.h for it.
*/
AVBitStreamFilter p;
int priv_data_size;
int (*init)(AVBSFContext *ctx);
int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
void (*close)(AVBSFContext *ctx);
void (*flush)(AVBSFContext *ctx);
} FFBitStreamFilter;
/**
* Called by the bitstream filters to get the next packet for filtering.
* The filter is responsible for either freeing the packet or passing it to the
@ -42,10 +55,6 @@ int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt);
*/
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt);
#if FF_API_CHILD_CLASS_NEXT
const AVClass *ff_bsf_child_class_next(const AVClass *prev);
#endif
const AVClass *ff_bsf_child_class_iterate(void **opaque);
#endif /* AVCODEC_BSF_INTERNAL_H */

Просмотреть файл

@ -1,6 +1,3 @@
static const AVBitStreamFilter * const bitstream_filters[] = {
&ff_null_bsf,
#if CONFIG_VP9_SUPERFRAME_SPLIT_BSF
static const FFBitStreamFilter * const bitstream_filters[] = {
&ff_vp9_superframe_split_bsf,
#endif
NULL };

Просмотреть файл

@ -25,6 +25,7 @@
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "cbs.h"
#include "cbs_internal.h"
@ -293,6 +294,19 @@ int ff_cbs_read_packet(CodedBitstreamContext *ctx,
pkt->data, pkt->size, 0);
}
int ff_cbs_read_packet_side_data(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const AVPacket *pkt)
{
size_t side_data_size;
const uint8_t *side_data =
av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
&side_data_size);
return cbs_read_data(ctx, frag, NULL,
side_data, side_data_size, 1);
}
int ff_cbs_read(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const uint8_t *data, size_t size)
@ -301,6 +315,28 @@ int ff_cbs_read(CodedBitstreamContext *ctx,
data, size, 0);
}
/**
* Allocate a new internal data buffer of the given size in the unit.
*
* The data buffer will have input padding.
*/
static int cbs_alloc_unit_data(CodedBitstreamUnit *unit,
size_t size)
{
av_assert0(!unit->data && !unit->data_ref);
unit->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!unit->data_ref)
return AVERROR(ENOMEM);
unit->data = unit->data_ref->data;
unit->data_size = size;
memset(unit->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
return 0;
}
static int cbs_write_unit_data(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
@ -346,7 +382,7 @@ static int cbs_write_unit_data(CodedBitstreamContext *ctx,
flush_put_bits(&pbc);
ret = ff_cbs_alloc_unit_data(unit, put_bits_count(&pbc) / 8);
ret = cbs_alloc_unit_data(unit, put_bytes_output(&pbc));
if (ret < 0)
return ret;
@ -679,23 +715,6 @@ int ff_cbs_alloc_unit_content(CodedBitstreamUnit *unit,
return 0;
}
int ff_cbs_alloc_unit_data(CodedBitstreamUnit *unit,
size_t size)
{
av_assert0(!unit->data && !unit->data_ref);
unit->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!unit->data_ref)
return AVERROR(ENOMEM);
unit->data = unit->data_ref->data;
unit->data_size = size;
memset(unit->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
return 0;
}
static int cbs_insert_unit(CodedBitstreamFragment *frag,
int position)
{
@ -770,18 +789,16 @@ int ff_cbs_insert_unit_content(CodedBitstreamFragment *frag,
return 0;
}
int ff_cbs_insert_unit_data(CodedBitstreamFragment *frag,
int position,
CodedBitstreamUnitType type,
uint8_t *data, size_t data_size,
AVBufferRef *data_buf)
static int cbs_insert_unit_data(CodedBitstreamFragment *frag,
CodedBitstreamUnitType type,
uint8_t *data, size_t data_size,
AVBufferRef *data_buf,
int position)
{
CodedBitstreamUnit *unit;
AVBufferRef *data_ref;
int err;
if (position == -1)
position = frag->nb_units;
av_assert0(position >= 0 && position <= frag->nb_units);
if (data_buf)
@ -809,6 +826,16 @@ int ff_cbs_insert_unit_data(CodedBitstreamFragment *frag,
return 0;
}
int ff_cbs_append_unit_data(CodedBitstreamFragment *frag,
CodedBitstreamUnitType type,
uint8_t *data, size_t data_size,
AVBufferRef *data_buf)
{
return cbs_insert_unit_data(frag, type,
data, data_size, data_buf,
frag->nb_units);
}
void ff_cbs_delete_unit(CodedBitstreamFragment *frag,
int position)
{

Просмотреть файл

@ -24,7 +24,9 @@
#include "libavutil/buffer.h"
#include "avcodec.h"
#include "codec_id.h"
#include "codec_par.h"
#include "packet.h"
/*
@ -40,6 +42,7 @@
* bitstream.
*/
struct AVCodecContext;
struct CodedBitstreamType;
/**
@ -271,7 +274,11 @@ int ff_cbs_read_extradata(CodedBitstreamContext *ctx,
*/
int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const AVCodecContext *avctx);
const struct AVCodecContext *avctx);
int ff_cbs_read_packet_side_data(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const AVPacket *pkt);
/**
* Read the data bitstream from a packet into a fragment, then
@ -373,15 +380,6 @@ int ff_cbs_alloc_unit_content(CodedBitstreamUnit *unit,
int ff_cbs_alloc_unit_content2(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
/**
* Allocate a new internal data buffer of the given size in the unit.
*
* The data buffer will have input padding.
*/
int ff_cbs_alloc_unit_data(CodedBitstreamUnit *unit,
size_t size);
/**
* Insert a new unit into a fragment with the given content.
*
@ -395,14 +393,13 @@ int ff_cbs_insert_unit_content(CodedBitstreamFragment *frag,
AVBufferRef *content_buf);
/**
* Insert a new unit into a fragment with the given data bitstream.
* Add a new unit to a fragment with the given data bitstream.
*
* If data_buf is not supplied then data must have been allocated with
* av_malloc() and will on success become owned by the unit after this
* call or freed on error.
*/
int ff_cbs_insert_unit_data(CodedBitstreamFragment *frag,
int position,
int ff_cbs_append_unit_data(CodedBitstreamFragment *frag,
CodedBitstreamUnitType type,
uint8_t *data, size_t data_size,
AVBufferRef *data_buf);

Просмотреть файл

@ -20,10 +20,10 @@
#include "libavutil/opt.h"
#include "libavutil/pixfmt.h"
#include "avcodec.h"
#include "cbs.h"
#include "cbs_internal.h"
#include "cbs_av1.h"
#include "internal.h"
static int cbs_av1_read_uvlc(CodedBitstreamContext *ctx, GetBitContext *gbc,
@ -828,7 +828,7 @@ static int cbs_av1_split_fragment(CodedBitstreamContext *ctx,
goto fail;
}
err = ff_cbs_insert_unit_data(frag, -1, header.obu_type,
err = ff_cbs_append_unit_data(frag, header.obu_type,
data, obu_length, frag->data_ref);
if (err < 0)
goto fail;

Просмотреть файл

@ -355,7 +355,7 @@ static int FUNC(set_frame_refs)(CodedBitstreamContext *ctx, RWContext *rw,
AV1_REF_FRAME_ALTREF2, AV1_REF_FRAME_ALTREF
};
int8_t ref_frame_idx[AV1_REFS_PER_FRAME], used_frame[AV1_NUM_REF_FRAMES];
int8_t shifted_order_hints[AV1_NUM_REF_FRAMES];
int16_t shifted_order_hints[AV1_NUM_REF_FRAMES];
int cur_frame_hint, latest_order_hint, earliest_order_hint, ref;
int i, j;

Просмотреть файл

@ -19,8 +19,13 @@
#ifndef AVCODEC_CBS_INTERNAL_H
#define AVCODEC_CBS_INTERNAL_H
#include "avcodec.h"
#include <stdint.h>
#include "libavutil/buffer.h"
#include "libavutil/log.h"
#include "cbs.h"
#include "codec_id.h"
#include "get_bits.h"
#include "put_bits.h"

Просмотреть файл

@ -31,7 +31,7 @@
#include "libavutil/samplefmt.h"
#include "libavcodec/codec_id.h"
#include "libavcodec/version.h"
#include "libavcodec/version_major.h"
/**
* @addtogroup lavc_core
@ -50,7 +50,12 @@
* avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
*/
#define AV_CODEC_CAP_DR1 (1 << 1)
#if FF_API_FLAG_TRUNCATED
/**
* @deprecated Use parsers to always send proper frames.
*/
#define AV_CODEC_CAP_TRUNCATED (1 << 3)
#endif
/**
* Encoder or decoder requires flushing with NULL input at the end in order to
* give the complete and correct output.
@ -185,12 +190,6 @@ typedef struct AVProfile {
const char *name; ///< short name for the profile
} AVProfile;
typedef struct AVCodecDefault AVCodecDefault;
struct AVCodecContext;
struct AVSubtitle;
struct AVPacket;
/**
* AVCodec.
*/
@ -214,12 +213,18 @@ typedef struct AVCodec {
* see AV_CODEC_CAP_*
*/
int capabilities;
uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
#if FF_API_OLD_CHANNEL_LAYOUT
/**
* @deprecated use ch_layouts instead
*/
attribute_deprecated
const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
#endif
const AVClass *priv_class; ///< AVClass for the private context
const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
@ -235,117 +240,10 @@ typedef struct AVCodec {
*/
const char *wrapper_name;
/*****************************************************************
* No fields below this line are part of the public API. They
* may not be used outside of libavcodec and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
int priv_data_size;
#if FF_API_NEXT
struct AVCodec *next;
#endif
/**
* @name Frame-level threading support functions
* @{
* Array of supported channel layouts, terminated with a zeroed layout.
*/
/**
* Copy necessary context variables from a previous thread context to the current one.
* If not defined, the next thread will start automatically; otherwise, the codec
* must call ff_thread_finish_setup().
*
* dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
*/
int (*update_thread_context)(struct AVCodecContext *dst, const struct AVCodecContext *src);
/** @} */
/**
* Private codec-specific defaults.
*/
const AVCodecDefault *defaults;
/**
* Initialize codec static data, called from av_codec_iterate().
*
* This is not intended for time consuming operations as it is
* run for every codec regardless of that codec being used.
*/
void (*init_static_data)(struct AVCodec *codec);
int (*init)(struct AVCodecContext *);
int (*encode_sub)(struct AVCodecContext *, uint8_t *buf, int buf_size,
const struct AVSubtitle *sub);
/**
* Encode data to an AVPacket.
*
* @param avctx codec context
* @param avpkt output AVPacket
* @param[in] frame AVFrame containing the raw data to be encoded
* @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
* non-empty packet was returned in avpkt.
* @return 0 on success, negative error code on failure
*/
int (*encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
const struct AVFrame *frame, int *got_packet_ptr);
/**
* Decode picture or subtitle data.
*
* @param avctx codec context
* @param outdata codec type dependent output struct
* @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that a
* non-empty frame or subtitle was returned in
* outdata.
* @param[in] avpkt AVPacket containing the data to be decoded
* @return amount of bytes read from the packet on success, negative error
* code on failure
*/
int (*decode)(struct AVCodecContext *avctx, void *outdata,
int *got_frame_ptr, struct AVPacket *avpkt);
int (*close)(struct AVCodecContext *);
/**
* Encode API with decoupled frame/packet dataflow. This function is called
* to get one output packet. It should call ff_encode_get_frame() to obtain
* input data.
*/
int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
/**
* Decode API with decoupled packet/frame dataflow. This function is called
* to get one output frame. It should call ff_decode_get_packet() to obtain
* input data.
*/
int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
/**
* Flush buffers.
* Will be called when seeking
*/
void (*flush)(struct AVCodecContext *);
/**
* Internal codec capabilities.
* See FF_CODEC_CAP_* in internal.h
*/
int caps_internal;
/**
* Decoding only, a comma-separated list of bitstream filters to apply to
* packets before decoding.
*/
const char *bsfs;
/**
* Array of pointers to hardware configurations supported by the codec,
* or NULL if no hardware supported. The array is terminated by a NULL
* pointer.
*
* The user can only access this field via avcodec_get_hw_config().
*/
const struct AVCodecHWConfigInternal *const *hw_configs;
/**
* List of supported codec_tags, terminated by FF_CODEC_TAGS_END.
*/
const uint32_t *codec_tags;
const AVChannelLayout *ch_layouts;
} AVCodec;
/**
@ -365,7 +263,7 @@ const AVCodec *av_codec_iterate(void **opaque);
* @param id AVCodecID of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
AVCodec *avcodec_find_decoder(enum AVCodecID id);
const AVCodec *avcodec_find_decoder(enum AVCodecID id);
/**
* Find a registered decoder with the specified name.
@ -373,7 +271,7 @@ AVCodec *avcodec_find_decoder(enum AVCodecID id);
* @param name name of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
AVCodec *avcodec_find_decoder_by_name(const char *name);
const AVCodec *avcodec_find_decoder_by_name(const char *name);
/**
* Find a registered encoder with a matching codec ID.
@ -381,7 +279,7 @@ AVCodec *avcodec_find_decoder_by_name(const char *name);
* @param id AVCodecID of the requested encoder
* @return An encoder if one was found, NULL otherwise.
*/
AVCodec *avcodec_find_encoder(enum AVCodecID id);
const AVCodec *avcodec_find_encoder(enum AVCodecID id);
/**
* Find a registered encoder with the specified name.
@ -389,7 +287,7 @@ AVCodec *avcodec_find_encoder(enum AVCodecID id);
* @param name name of the requested encoder
* @return An encoder if one was found, NULL otherwise.
*/
AVCodec *avcodec_find_encoder_by_name(const char *name);
const AVCodec *avcodec_find_encoder_by_name(const char *name);
/**
* @return a non-zero number if codec is an encoder, zero otherwise
*/
@ -400,6 +298,15 @@ int av_codec_is_encoder(const AVCodec *codec);
*/
int av_codec_is_decoder(const AVCodec *codec);
/**
* Return a name for the specified profile, if available.
*
* @param codec the codec that is searched for the given profile
* @param profile the profile value for which a name is requested
* @return A name for the profile if found, NULL otherwise.
*/
const char *av_get_profile_name(const AVCodec *codec, int profile);
enum {
/**
* The codec supports this format via the hw_device_ctx interface.

Просмотреть файл

@ -19,15 +19,15 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <string.h>
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/macros.h"
#include "codec_id.h"
#include "codec_desc.h"
#include "profiles.h"
#include "version.h"
#define MT(...) (const char *const[]){ __VA_ARGS__, NULL }
@ -1856,6 +1856,36 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("Digital Pictures SGA Video"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_GEM,
.type = AVMEDIA_TYPE_VIDEO,
.name = "gem",
.long_name = NULL_IF_CONFIG_SMALL("GEM Raster image"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_VBN,
.type = AVMEDIA_TYPE_VIDEO,
.name = "vbn",
.long_name = NULL_IF_CONFIG_SMALL("Vizrt Binary Image"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_JPEGXL,
.type = AVMEDIA_TYPE_VIDEO,
.name = "jpegxl",
.long_name = NULL_IF_CONFIG_SMALL("JPEG XL"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
AV_CODEC_PROP_LOSSLESS,
.mime_types= MT("image/jxl"),
},
{
.id = AV_CODEC_ID_QOI,
.type = AVMEDIA_TYPE_VIDEO,
.name = "qoi",
.long_name = NULL_IF_CONFIG_SMALL("QOI (Quite OK Image)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
/* various PCM "codecs" */
{
@ -2462,6 +2492,13 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA MobiClip MOFLEX"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_ACORN,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_acorn",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Acorn Replay"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* AMR */
{
@ -2758,7 +2795,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "mlp",
.long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
.props = AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_GSM_MS,
@ -3216,6 +3253,20 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("MobiClip FastAudio"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MSNSIREN,
.type = AVMEDIA_TYPE_AUDIO,
.name = "msnsiren",
.long_name = NULL_IF_CONFIG_SMALL("MSN Siren"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DFPWM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dfpwm",
.long_name = NULL_IF_CONFIG_SMALL("DFPWM (Dynamic Filter Pulse Width Modulation)"),
.props = AV_CODEC_PROP_LOSSY,
},
/* subtitle codecs */
{

Просмотреть файл

@ -308,6 +308,9 @@ enum AVCodecID {
AV_CODEC_ID_SIMBIOSIS_IMX,
AV_CODEC_ID_SGA_VIDEO,
AV_CODEC_ID_GEM,
AV_CODEC_ID_VBN,
AV_CODEC_ID_JPEGXL,
AV_CODEC_ID_QOI,
/* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
@ -516,6 +519,7 @@ enum AVCodecID {
AV_CODEC_ID_HCA,
AV_CODEC_ID_FASTAUDIO,
AV_CODEC_ID_MSNSIREN,
AV_CODEC_ID_DFPWM,
/* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.

Просмотреть файл

@ -0,0 +1,278 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CODEC_INTERNAL_H
#define AVCODEC_CODEC_INTERNAL_H
#include <stdint.h>
#include "libavutil/attributes.h"
#include "codec.h"
/**
* The codec does not modify any global variables in the init function,
* allowing to call the init function without locking any global mutexes.
*/
#define FF_CODEC_CAP_INIT_THREADSAFE (1 << 0)
/**
* The codec allows calling the close function for deallocation even if
* the init function returned a failure. Without this capability flag, a
* codec does such cleanup internally when returning failures from the
* init function and does not expect the close function to be called at
* all.
*/
#define FF_CODEC_CAP_INIT_CLEANUP (1 << 1)
/**
* Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set
* AVFrame.pkt_dts manually. If the flag is set, decode.c won't overwrite
* this field. If it's unset, decode.c tries to guess the pkt_dts field
* from the input AVPacket.
*/
#define FF_CODEC_CAP_SETS_PKT_DTS (1 << 2)
/**
* The decoder extracts and fills its parameters even if the frame is
* skipped due to the skip_frame setting.
*/
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM (1 << 3)
/**
* The decoder sets the cropping fields in the output frames manually.
* If this cap is set, the generic code will initialize output frame
* dimensions to coded rather than display values.
*/
#define FF_CODEC_CAP_EXPORTS_CROPPING (1 << 4)
/**
* Codec initializes slice-based threading with a main function
*/
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF (1 << 5)
/*
* The codec supports frame threading and has inter-frame dependencies, so it
* uses ff_thread_report/await_progress().
*/
#define FF_CODEC_CAP_ALLOCATE_PROGRESS (1 << 6)
/**
* Codec handles avctx->thread_count == 0 (auto) internally.
*/
#define FF_CODEC_CAP_AUTO_THREADS (1 << 7)
/**
* Codec handles output frame properties internally instead of letting the
* internal logic derive them from AVCodecInternal.last_pkt_props.
*/
#define FF_CODEC_CAP_SETS_FRAME_PROPS (1 << 8)
/**
* FFCodec.codec_tags termination value
*/
#define FF_CODEC_TAGS_END -1
typedef struct FFCodecDefault {
const char *key;
const char *value;
} FFCodecDefault;
struct AVCodecContext;
struct AVSubtitle;
struct AVPacket;
enum FFCodecType {
/* The codec is a decoder using the decode callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_DECODE,
/* The codec is a decoder using the decode_sub callback;
* subtitle codecs only. */
FF_CODEC_CB_TYPE_DECODE_SUB,
/* The codec is a decoder using the receive_frame callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_RECEIVE_FRAME,
/* The codec is an encoder using the encode callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_ENCODE,
/* The codec is an encoder using the encode_sub callback;
* subtitle codecs only. */
FF_CODEC_CB_TYPE_ENCODE_SUB,
/* The codec is an encoder using the receive_packet callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_RECEIVE_PACKET,
};
typedef struct FFCodec {
/**
* The public AVCodec. See codec.h for it.
*/
AVCodec p;
/**
* Internal codec capabilities FF_CODEC_CAP_*.
*/
unsigned caps_internal:29;
/**
* This field determines the type of the codec (decoder/encoder)
* and also the exact callback cb implemented by the codec.
* cb_type uses enum FFCodecType values.
*/
unsigned cb_type:3;
int priv_data_size;
/**
* @name Frame-level threading support functions
* @{
*/
/**
* Copy necessary context variables from a previous thread context to the current one.
* If not defined, the next thread will start automatically; otherwise, the codec
* must call ff_thread_finish_setup().
*
* dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
*/
int (*update_thread_context)(struct AVCodecContext *dst, const struct AVCodecContext *src);
/**
* Copy variables back to the user-facing context
*/
int (*update_thread_context_for_user)(struct AVCodecContext *dst, const struct AVCodecContext *src);
/** @} */
/**
* Private codec-specific defaults.
*/
const FFCodecDefault *defaults;
/**
* Initialize codec static data, called from av_codec_iterate().
*
* This is not intended for time consuming operations as it is
* run for every codec regardless of that codec being used.
*/
void (*init_static_data)(struct FFCodec *codec);
int (*init)(struct AVCodecContext *);
union {
/**
* Decode to an AVFrame.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE.
*
* @param avctx codec context
* @param[out] frame AVFrame for output
* @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that
* a non-empty frame was returned in frame.
* @param[in] avpkt AVPacket containing the data to be decoded
* @return amount of bytes read from the packet on success,
* negative error code on failure
*/
int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
int *got_frame_ptr, struct AVPacket *avpkt);
/**
* Decode subtitle data to an AVSubtitle.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE_SUB.
*
* Apart from that this is like the decode callback.
*/
int (*decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub,
int *got_frame_ptr, const struct AVPacket *avpkt);
/**
* Decode API with decoupled packet/frame dataflow.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_FRAME.
*
* This function is called to get one output frame. It should call
* ff_decode_get_packet() to obtain input data.
*/
int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
/**
* Encode data to an AVPacket.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE
*
* @param avctx codec context
* @param[out] avpkt output AVPacket
* @param[in] frame AVFrame containing the input to be encoded
* @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
* non-empty packet was returned in avpkt.
* @return 0 on success, negative error code on failure
*/
int (*encode)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
const struct AVFrame *frame, int *got_packet_ptr);
/**
* Encode subtitles to a raw buffer.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE_SUB.
*/
int (*encode_sub)(struct AVCodecContext *avctx, uint8_t *buf,
int buf_size, const struct AVSubtitle *sub);
/**
* Encode API with decoupled frame/packet dataflow.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_PACKET.
*
* This function is called to get one output packet.
* It should call ff_encode_get_frame() to obtain input data.
*/
int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
} cb;
int (*close)(struct AVCodecContext *);
/**
* Flush buffers.
* Will be called when seeking
*/
void (*flush)(struct AVCodecContext *);
/**
* Decoding only, a comma-separated list of bitstream filters to apply to
* packets before decoding.
*/
const char *bsfs;
/**
* Array of pointers to hardware configurations supported by the codec,
* or NULL if no hardware supported. The array is terminated by a NULL
* pointer.
*
* The user can only access this field via avcodec_get_hw_config().
*/
const struct AVCodecHWConfigInternal *const *hw_configs;
/**
* List of supported codec_tags, terminated by FF_CODEC_TAGS_END.
*/
const uint32_t *codec_tags;
} FFCodec;
#define FF_CODEC_DECODE_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_DECODE, \
.cb.decode = (func)
#define FF_CODEC_DECODE_SUB_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_DECODE_SUB, \
.cb.decode_sub = (func)
#define FF_CODEC_RECEIVE_FRAME_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_RECEIVE_FRAME, \
.cb.receive_frame = (func)
#define FF_CODEC_ENCODE_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_ENCODE, \
.cb.encode = (func)
#define FF_CODEC_ENCODE_SUB_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_ENCODE_SUB, \
.cb.encode_sub = (func)
#define FF_CODEC_RECEIVE_PACKET_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_RECEIVE_PACKET, \
.cb.receive_packet = (func)
static av_always_inline const FFCodec *ffcodec(const AVCodec *codec)
{
return (const FFCodec*)codec;
}
#endif /* AVCODEC_CODEC_INTERNAL_H */

Просмотреть файл

@ -1,20 +1,6 @@
static const AVCodec * const codec_list[] = {
#if CONFIG_VP8_DECODER
static const FFCodec * const codec_list[] = {
&ff_vp8_decoder,
#endif
#if CONFIG_VP9_DECODER
&ff_vp9_decoder,
#endif
#if CONFIG_FLAC_DECODER
&ff_flac_decoder,
#endif
#if CONFIG_MP3_DECODER
&ff_mp3_decoder,
#endif
#if CONFIG_LIBDAV1D
&ff_libdav1d_decoder,
#endif
#if CONFIG_AV1_DECODER
&ff_av1_decoder,
#endif
NULL };

Просмотреть файл

@ -31,12 +31,14 @@
static void codec_parameters_reset(AVCodecParameters *par)
{
av_freep(&par->extradata);
av_channel_layout_uninit(&par->ch_layout);
memset(par, 0, sizeof(*par));
par->codec_type = AVMEDIA_TYPE_UNKNOWN;
par->codec_id = AV_CODEC_ID_NONE;
par->format = -1;
par->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
par->field_order = AV_FIELD_UNKNOWN;
par->color_range = AVCOL_RANGE_UNSPECIFIED;
par->color_primaries = AVCOL_PRI_UNSPECIFIED;
@ -71,9 +73,12 @@ void avcodec_parameters_free(AVCodecParameters **ppar)
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
{
int ret;
codec_parameters_reset(dst);
memcpy(dst, src, sizeof(*dst));
dst->ch_layout = (AVChannelLayout){0};
dst->extradata = NULL;
dst->extradata_size = 0;
if (src->extradata) {
@ -84,12 +89,18 @@ int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src
dst->extradata_size = src->extradata_size;
}
ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
if (ret < 0)
return ret;
return 0;
}
int avcodec_parameters_from_context(AVCodecParameters *par,
const AVCodecContext *codec)
{
int ret;
codec_parameters_reset(par);
par->codec_type = codec->codec_type;
@ -118,8 +129,32 @@ int avcodec_parameters_from_context(AVCodecParameters *par,
break;
case AVMEDIA_TYPE_AUDIO:
par->format = codec->sample_fmt;
par->channel_layout = codec->channel_layout;
par->channels = codec->channels;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
// if the old/new fields are set inconsistently, prefer the old ones
if ((codec->channels && codec->channels != codec->ch_layout.nb_channels) ||
(codec->channel_layout && (codec->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
codec->ch_layout.u.mask != codec->channel_layout))) {
if (codec->channel_layout)
av_channel_layout_from_mask(&par->ch_layout, codec->channel_layout);
else {
par->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
par->ch_layout.nb_channels = codec->channels;
}
FF_ENABLE_DEPRECATION_WARNINGS
} else {
#endif
ret = av_channel_layout_copy(&par->ch_layout, &codec->ch_layout);
if (ret < 0)
return ret;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
}
par->channel_layout = par->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
par->ch_layout.u.mask : 0;
par->channels = par->ch_layout.nb_channels;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
par->sample_rate = codec->sample_rate;
par->block_align = codec->block_align;
par->frame_size = codec->frame_size;
@ -147,6 +182,8 @@ int avcodec_parameters_from_context(AVCodecParameters *par,
int avcodec_parameters_to_context(AVCodecContext *codec,
const AVCodecParameters *par)
{
int ret;
codec->codec_type = par->codec_type;
codec->codec_id = par->codec_id;
codec->codec_tag = par->codec_tag;
@ -173,8 +210,32 @@ int avcodec_parameters_to_context(AVCodecContext *codec,
break;
case AVMEDIA_TYPE_AUDIO:
codec->sample_fmt = par->format;
codec->channel_layout = par->channel_layout;
codec->channels = par->channels;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
// if the old/new fields are set inconsistently, prefer the old ones
if ((par->channels && par->channels != par->ch_layout.nb_channels) ||
(par->channel_layout && (par->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
par->ch_layout.u.mask != par->channel_layout))) {
if (par->channel_layout)
av_channel_layout_from_mask(&codec->ch_layout, par->channel_layout);
else {
codec->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
codec->ch_layout.nb_channels = par->channels;
}
FF_ENABLE_DEPRECATION_WARNINGS
} else {
#endif
ret = av_channel_layout_copy(&codec->ch_layout, &par->ch_layout);
if (ret < 0)
return ret;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
}
codec->channel_layout = codec->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
codec->ch_layout.u.mask : 0;
codec->channels = codec->ch_layout.nb_channels;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
codec->sample_rate = par->sample_rate;
codec->block_align = par->block_align;
codec->frame_size = par->frame_size;

Просмотреть файл

@ -24,6 +24,7 @@
#include <stdint.h>
#include "libavutil/avutil.h"
#include "libavutil/channel_layout.h"
#include "libavutil/rational.h"
#include "libavutil/pixfmt.h"
@ -154,16 +155,22 @@ typedef struct AVCodecParameters {
*/
int video_delay;
#if FF_API_OLD_CHANNEL_LAYOUT
/**
* Audio only. The channel layout bitmask. May be 0 if the channel layout is
* unknown or unspecified, otherwise the number of bits set must be equal to
* the channels field.
* @deprecated use ch_layout
*/
attribute_deprecated
uint64_t channel_layout;
/**
* Audio only. The number of audio channels.
* @deprecated use ch_layout.nb_channels
*/
attribute_deprecated
int channels;
#endif
/**
* Audio only. The number of audio samples per second.
*/
@ -198,6 +205,11 @@ typedef struct AVCodecParameters {
* Audio only. Number of samples to skip after a discontinuity.
*/
int seek_preroll;
/**
* Audio only. The channel layout and number of channels.
*/
AVChannelLayout ch_layout;
} AVCodecParameters;
/**

Просмотреть файл

@ -30,7 +30,9 @@
#include <math.h>
#include <string.h>
#include "libavutil/error.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
#include "dct.h"
#include "dct32.h"
@ -212,8 +214,9 @@ av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse)
}
s->dct32 = ff_dct32_float;
if (ARCH_X86)
ff_dct_init_x86(s);
#if ARCH_X86
ff_dct_init_x86(s);
#endif
return 0;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -79,9 +79,18 @@ int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
int ff_attach_decode_data(AVFrame *frame);
/**
* Check whether the side-data of src contains a palette of
* size AVPALETTE_SIZE; if so, copy it to dst and return 1;
* else return 0.
* Also emit an error message upon encountering a palette
* with invalid size.
*/
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx);
/**
* Perform decoder initialization and validation.
* Called when opening the decoder, before the AVCodec.init() call.
* Called when opening the decoder, before the FFCodec.init() call.
*/
int ff_decode_preinit(AVCodecContext *avctx);

Просмотреть файл

@ -0,0 +1,170 @@
/*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DEFS_H
#define AVCODEC_DEFS_H
/**
* @file
* @ingroup libavc
* Misc types and constants that do not belong anywhere else.
*/
#include <stdint.h>
#include <stdlib.h>
/**
* @ingroup lavc_decoding
* Required number of additionally allocated bytes at the end of the input bitstream for decoding.
* This is mainly needed because some optimized bitstream readers read
* 32 or 64 bit at once and could read over the end.<br>
* Note: If the first 23 bits of the additional bytes are not 0, then damaged
* MPEG bitstreams could cause overread and segfault.
*/
#define AV_INPUT_BUFFER_PADDING_SIZE 64
/**
* @ingroup lavc_decoding
*/
enum AVDiscard{
/* We leave some space between them for extensions (drop some
* keyframes for intra-only or drop just some bidir frames). */
AVDISCARD_NONE =-16, ///< discard nothing
AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
AVDISCARD_NONREF = 8, ///< discard all non reference
AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
AVDISCARD_ALL = 48, ///< discard all
};
enum AVAudioServiceType {
AV_AUDIO_SERVICE_TYPE_MAIN = 0,
AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
};
/**
* Pan Scan area.
* This specifies the area which should be displayed.
* Note there may be multiple such areas for one frame.
*/
typedef struct AVPanScan {
/**
* id
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
int id;
/**
* width and height in 1/16 pel
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
int width;
int height;
/**
* position of the top left corner in 1/16 pel for up to 3 fields/frames
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
int16_t position[3][2];
} AVPanScan;
/**
* This structure describes the bitrate properties of an encoded bitstream. It
* roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
* parameters for H.264/HEVC.
*/
typedef struct AVCPBProperties {
/**
* Maximum bitrate of the stream, in bits per second.
* Zero if unknown or unspecified.
*/
int64_t max_bitrate;
/**
* Minimum bitrate of the stream, in bits per second.
* Zero if unknown or unspecified.
*/
int64_t min_bitrate;
/**
* Average bitrate of the stream, in bits per second.
* Zero if unknown or unspecified.
*/
int64_t avg_bitrate;
/**
* The size of the buffer to which the ratecontrol is applied, in bits.
* Zero if unknown or unspecified.
*/
int64_t buffer_size;
/**
* The delay between the time the packet this structure is associated with
* is received and the time when it should be decoded, in periods of a 27MHz
* clock.
*
* UINT64_MAX when unknown or unspecified.
*/
uint64_t vbv_delay;
} AVCPBProperties;
/**
* Allocate a CPB properties structure and initialize its fields to default
* values.
*
* @param size if non-NULL, the size of the allocated struct will be written
* here. This is useful for embedding it in side data.
*
* @return the newly allocated struct or NULL on failure
*/
AVCPBProperties *av_cpb_properties_alloc(size_t *size);
/**
* This structure supplies correlation between a packet timestamp and a wall clock
* production time. The definition follows the Producer Reference Time ('prft')
* as defined in ISO/IEC 14496-12
*/
typedef struct AVProducerReferenceTime {
/**
* A UTC timestamp, in microseconds, since Unix epoch (e.g, av_gettime()).
*/
int64_t wallclock;
int flags;
} AVProducerReferenceTime;
/**
* Encode extradata length to a buffer. Used by xiph codecs.
*
* @param s buffer to write to; must be at least (v/255+1) bytes long
* @param v size of extradata in bytes
* @return number of bytes written to the buffer.
*/
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
#endif // AVCODEC_DEFS_H

Просмотреть файл

@ -20,17 +20,19 @@
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/samplefmt.h"
#include "avcodec.h"
#include "codec_internal.h"
#include "encode.h"
#include "frame_thread_encoder.h"
#include "internal.h"
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
{
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
@ -40,18 +42,14 @@ int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64
av_assert0(!avpkt->data);
if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
avpkt->data = avctx->internal->byte_buffer;
avpkt->size = size;
}
av_fast_padded_malloc(&avctx->internal->byte_buffer,
&avctx->internal->byte_buffer_size, size);
avpkt->data = avctx->internal->byte_buffer;
if (!avpkt->data) {
int ret = av_new_packet(avpkt, size);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
return ret;
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
return AVERROR(ENOMEM);
}
avpkt->size = size;
return 0;
}
@ -74,7 +72,6 @@ int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, in
return ret;
}
avpkt->data = avpkt->buf->data;
memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
return 0;
}
@ -98,6 +95,7 @@ int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, i
ret = AVERROR(EINVAL);
goto fail;
}
memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
ret = 0;
fail:
@ -117,9 +115,10 @@ static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
int ret;
frame->format = src->format;
frame->channel_layout = src->channel_layout;
frame->channels = src->channels;
frame->nb_samples = s->frame_size;
ret = av_channel_layout_copy(&frame->ch_layout, &s->ch_layout);
if (ret < 0)
goto fail;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0)
goto fail;
@ -129,11 +128,12 @@ static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
goto fail;
if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
src->nb_samples, s->channels, s->sample_fmt)) < 0)
src->nb_samples, s->ch_layout.nb_channels,
s->sample_fmt)) < 0)
goto fail;
if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
frame->nb_samples - src->nb_samples,
s->channels, s->sample_fmt)) < 0)
s->ch_layout.nb_channels, s->sample_fmt)) < 0)
goto fail;
return 0;
@ -152,7 +152,7 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
return -1;
}
ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
ret = ffcodec(avctx->codec)->cb.encode_sub(avctx, buf, buf_size, sub);
avctx->frame_number++;
return ret;
}
@ -175,8 +175,8 @@ int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
EncodeSimpleContext *es = &avci->es;
AVFrame *frame = es->in_frame;
AVFrame *frame = avci->in_frame;
const FFCodec *const codec = ffcodec(avctx->codec);
int got_packet;
int ret;
@ -201,7 +201,7 @@ static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
got_packet = 0;
av_assert0(avctx->codec->encode2);
av_assert0(codec->cb_type == FF_CODEC_CB_TYPE_ENCODE);
if (CONFIG_FRAME_THREAD_ENCODER &&
avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
@ -211,7 +211,7 @@ static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
* no sense to use the properties of the current frame anyway). */
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
else {
ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
ret = codec->cb.encode(avctx, avpkt, frame, &got_packet);
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
@ -238,12 +238,9 @@ static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
}
}
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
/* NOTE: if we add any audio encoders which output non-keyframe packets,
* this needs to be moved to the encoders, but for now we can do it
* here to simplify things */
avpkt->flags |= AV_PKT_FLAG_KEY;
avpkt->dts = avpkt->pts;
}
avpkt->flags |= avci->intra_only_flag;
}
if (avci->draining && !got_packet)
@ -253,11 +250,8 @@ end:
if (ret < 0 || !got_packet)
av_packet_unref(avpkt);
if (frame) {
if (!ret)
avctx->frame_number++;
if (frame)
av_frame_unref(frame);
}
if (got_packet)
// Encoders must always return ref-counted buffers.
@ -297,8 +291,8 @@ static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt
return AVERROR(EINVAL);
}
if (avctx->codec->receive_packet) {
ret = avctx->codec->receive_packet(avctx, avpkt);
if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET) {
ret = ffcodec(avctx->codec)->cb.receive_packet(avctx, avpkt);
if (ret < 0)
av_packet_unref(avpkt);
else
@ -372,7 +366,7 @@ int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame
if (avci->draining)
return AVERROR_EOF;
if (avci->buffer_frame->data[0])
if (avci->buffer_frame->buf[0])
return AVERROR(EAGAIN);
if (!frame) {
@ -389,6 +383,8 @@ int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame
return ret;
}
avctx->frame_number++;
return 0;
}
@ -413,160 +409,16 @@ int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *
return 0;
}
#if FF_API_OLD_ENCDEC
static int compat_encode(AVCodecContext *avctx, AVPacket *avpkt,
int *got_packet, const AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
AVPacket user_pkt;
int ret;
*got_packet = 0;
if (frame && avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if (frame->format == AV_PIX_FMT_NONE)
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
if (frame->width == 0 || frame->height == 0)
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
}
if (avctx->codec->capabilities & AV_CODEC_CAP_DR1) {
av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* API does not support "
"AV_CODEC_CAP_DR1 encoders\n");
return AVERROR(ENOSYS);
}
ret = avcodec_send_frame(avctx, frame);
if (ret == AVERROR_EOF)
ret = 0;
else if (ret == AVERROR(EAGAIN)) {
/* we fully drain all the output in each encode call, so this should not
* ever happen */
return AVERROR_BUG;
} else if (ret < 0)
return ret;
av_packet_move_ref(&user_pkt, avpkt);
while (ret >= 0) {
ret = avcodec_receive_packet(avctx, avpkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
goto finish;
}
if (avpkt != avci->compat_encode_packet) {
if (avpkt->data && user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
av_buffer_unref(&avpkt->buf);
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
FF_DISABLE_DEPRECATION_WARNINGS
av_init_packet(&user_pkt);
FF_ENABLE_DEPRECATION_WARNINGS
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
av_packet_unref(avpkt);
ret = AVERROR(EINVAL);
goto finish;
}
}
*got_packet = 1;
avpkt = avci->compat_encode_packet;
} else {
if (!avci->compat_decode_warned) {
av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* "
"API cannot return all the packets for this encoder. "
"Some packets will be dropped. Update your code to the "
"new encoding API to fix this.\n");
avci->compat_decode_warned = 1;
av_packet_unref(avpkt);
}
}
if (avci->draining)
break;
}
finish:
if (ret < 0)
av_packet_unref(&user_pkt);
return ret;
}
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
if (ret < 0)
av_packet_unref(avpkt);
return ret;
}
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
if (ret < 0)
av_packet_unref(avpkt);
return ret;
}
#endif
int ff_encode_preinit(AVCodecContext *avctx)
static int encode_preinit_video(AVCodecContext *avctx)
{
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
int i;
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
return AVERROR(ENOMEM);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
return AVERROR(EINVAL);
}
if (avctx->codec->sample_fmts) {
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
break;
if (avctx->channels == 1 &&
av_get_planar_sample_fmt(avctx->sample_fmt) ==
av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
avctx->sample_fmt = avctx->codec->sample_fmts[i];
break;
}
}
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
char buf[128];
snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
(char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
return AVERROR(EINVAL);
}
}
if (avctx->codec->pix_fmts) {
for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
break;
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE
&& !(avctx->codec_id == AV_CODEC_ID_MJPEG
&& avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) {
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) {
char buf[128];
snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
@ -580,74 +432,17 @@ FF_ENABLE_DEPRECATION_WARNINGS
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
avctx->color_range = AVCOL_RANGE_JPEG;
}
if (avctx->codec->supported_samplerates) {
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
break;
if (avctx->codec->supported_samplerates[i] == 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
avctx->sample_rate);
return AVERROR(EINVAL);
}
}
if (avctx->sample_rate < 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
avctx->sample_rate);
return AVERROR(EINVAL);
}
if (avctx->codec->channel_layouts) {
if (!avctx->channel_layout) {
av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n");
} else {
for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
if (avctx->channel_layout == avctx->codec->channel_layouts[i])
break;
if (avctx->codec->channel_layouts[i] == 0) {
char buf[512];
av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
return AVERROR(EINVAL);
}
}
}
if (avctx->channel_layout && avctx->channels) {
int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
if (channels != avctx->channels) {
char buf[512];
av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
av_log(avctx, AV_LOG_ERROR,
"Channel layout '%s' with %d channels does not match number of specified channels %d\n",
buf, channels, avctx->channels);
return AVERROR(EINVAL);
}
} else if (avctx->channel_layout) {
avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
}
if (avctx->channels < 0) {
av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
avctx->channels);
return AVERROR(EINVAL);
}
if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
if ( avctx->bits_per_raw_sample < 0
|| (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
}
if (avctx->width <= 0 || avctx->height <= 0) {
av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
return AVERROR(EINVAL);
}
}
if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
&& avctx->bit_rate>0 && avctx->bit_rate<1000) {
av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
}
if (!avctx->rc_initial_buffer_occupancy)
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
if ( avctx->bits_per_raw_sample < 0
|| (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
}
if (avctx->width <= 0 || avctx->height <= 0) {
av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
return AVERROR(EINVAL);
}
if (avctx->ticks_per_frame && avctx->time_base.num &&
avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
@ -680,3 +475,159 @@ FF_ENABLE_DEPRECATION_WARNINGS
return 0;
}
static int encode_preinit_audio(AVCodecContext *avctx)
{
int i;
if (avctx->codec->sample_fmts) {
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
break;
if (avctx->ch_layout.nb_channels == 1 &&
av_get_planar_sample_fmt(avctx->sample_fmt) ==
av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
avctx->sample_fmt = avctx->codec->sample_fmts[i];
break;
}
}
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
char buf[128];
snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
(char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
return AVERROR(EINVAL);
}
}
if (avctx->codec->supported_samplerates) {
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
break;
if (avctx->codec->supported_samplerates[i] == 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
avctx->sample_rate);
return AVERROR(EINVAL);
}
}
if (avctx->sample_rate < 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
avctx->sample_rate);
return AVERROR(EINVAL);
}
if (avctx->codec->ch_layouts) {
if (!av_channel_layout_check(&avctx->ch_layout)) {
av_log(avctx, AV_LOG_WARNING, "Channel layout not specified correctly\n");
return AVERROR(EINVAL);
}
for (i = 0; avctx->codec->ch_layouts[i].nb_channels; i++) {
if (!av_channel_layout_compare(&avctx->ch_layout, &avctx->codec->ch_layouts[i]))
break;
}
if (!avctx->codec->ch_layouts[i].nb_channels) {
char buf[512];
int ret = av_channel_layout_describe(&avctx->ch_layout, buf, sizeof(buf));
if (ret > 0)
av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
return AVERROR(EINVAL);
}
}
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->channel_layout && avctx->channels) {
int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
if (channels != avctx->channels) {
char buf[512];
av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
av_log(avctx, AV_LOG_ERROR,
"Channel layout '%s' with %d channels does not match number of specified channels %d\n",
buf, channels, avctx->channels);
return AVERROR(EINVAL);
}
} else if (avctx->channel_layout) {
avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
}
if (avctx->channels < 0) {
av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
avctx->channels);
return AVERROR(EINVAL);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (!avctx->bits_per_raw_sample)
avctx->bits_per_raw_sample = 8 * av_get_bytes_per_sample(avctx->sample_fmt);
return 0;
}
int ff_encode_preinit(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
int ret = 0;
if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
return AVERROR(EINVAL);
}
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO: ret = encode_preinit_video(avctx); break;
case AVMEDIA_TYPE_AUDIO: ret = encode_preinit_audio(avctx); break;
}
if (ret < 0)
return ret;
if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
&& avctx->bit_rate>0 && avctx->bit_rate<1000) {
av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
}
if (!avctx->rc_initial_buffer_occupancy)
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
if (avctx->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY)
avctx->internal->intra_only_flag = AV_PKT_FLAG_KEY;
if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_ENCODE) {
avci->in_frame = av_frame_alloc();
if (!avci->in_frame)
return AVERROR(ENOMEM);
}
return 0;
}
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
{
int ret;
switch (avctx->codec->type) {
case AVMEDIA_TYPE_VIDEO:
frame->format = avctx->pix_fmt;
if (frame->width <= 0 || frame->height <= 0) {
frame->width = FFMAX(avctx->width, avctx->coded_width);
frame->height = FFMAX(avctx->height, avctx->coded_height);
}
break;
case AVMEDIA_TYPE_AUDIO:
frame->sample_rate = avctx->sample_rate;
frame->format = avctx->sample_fmt;
if (!frame->ch_layout.nb_channels) {
ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
if (ret < 0)
return ret;
}
break;
}
ret = avcodec_default_get_buffer2(avctx, frame, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
av_frame_unref(frame);
return ret;
}
return 0;
}

Просмотреть файл

@ -44,9 +44,29 @@ int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame);
*/
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags);
/**
* Allocate buffers for a frame. Encoder equivalent to ff_get_buffer().
*/
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame);
/**
* Check AVPacket size and allocate data.
*
* Encoders supporting FFCodec.encode2() can use this as a convenience to
* obtain a big enough buffer for the encoded bitstream.
*
* @param avctx the AVCodecContext of the encoder
* @param avpkt The AVPacket: on success, avpkt->data will point to a buffer
* of size at least `size`; the packet will not be refcounted.
* This packet must be initially blank.
* @param size an upper bound of the size of the packet to encode
* @return non negative on success, negative error code on failure
*/
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size);
/*
* Perform encoder initialization and validation.
* Called when opening the encoder, before the AVCodec.init() call.
* Called when opening the encoder, before the FFCodec.init() call.
*/
int ff_encode_preinit(AVCodecContext *avctx);

Просмотреть файл

@ -24,7 +24,7 @@
#include "avcodec.h"
#include "me_cmp.h"
#include "thread.h"
#include "threadframe.h"
///< current MB is the first after a resync marker
#define VP_START 1
@ -52,7 +52,8 @@ typedef struct ERPicture {
typedef struct ERContext {
AVCodecContext *avctx;
MECmpContext mecc;
me_cmp_func sad;
int mecc_inited;
int *mb_index2xy;
@ -81,7 +82,6 @@ typedef struct ERContext {
uint16_t pb_time;
int quarter_sample;
int partitioned_frame;
int ref_count;
void (*decode_mb)(void *opaque, int ref, int mv_dir, int mv_type,
int (*mv)[2][4][2],

Просмотреть файл

@ -25,7 +25,7 @@
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
{
const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
av_unused const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
if (avctx->bits_per_raw_sample == 10 || avctx->bits_per_raw_sample == 9) {
c->fdct = ff_jpeg_fdct_islow_10;
@ -43,8 +43,9 @@ av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
c->fdct248 = ff_fdct248_islow_8;
}
if (ARCH_PPC)
ff_fdctdsp_init_ppc(c, avctx, high_bit_depth);
if (ARCH_X86)
ff_fdctdsp_init_x86(c, avctx, high_bit_depth);
#if ARCH_PPC
ff_fdctdsp_init_ppc(c, avctx, high_bit_depth);
#elif ARCH_X86
ff_fdctdsp_init_x86(c, avctx, high_bit_depth);
#endif
}

Просмотреть файл

@ -19,6 +19,9 @@
#ifndef AVCODEC_FFT_INTERNAL_H
#define AVCODEC_FFT_INTERNAL_H
#include "libavutil/mathematics.h"
#include "fft.h"
#if FFT_FLOAT
#define FIX15(v) (v)
@ -36,10 +39,6 @@
#else /* FFT_FLOAT */
#define SCALE_FLOAT(a, bits) lrint((a) * (double)(1 << (bits)))
#if FFT_FIXED_32
#define CMUL(dre, dim, are, aim, bre, bim) do { \
int64_t accu; \
(accu) = (int64_t)(bre) * (are); \
@ -50,10 +49,6 @@
(dim) = (int)(((accu) + 0x40000000) >> 31); \
} while (0)
#define FIX15(a) av_clip(SCALE_FLOAT(a, 31), -2147483647, 2147483647)
#endif /* FFT_FIXED_32 */
#endif /* FFT_FLOAT */
#define ff_imdct_calc_c FFT_NAME(ff_imdct_calc_c)

Просмотреть файл

@ -26,10 +26,6 @@
#define FFT_FLOAT 1
#endif
#ifndef FFT_FIXED_32
#define FFT_FIXED_32 0
#endif
#include <stdint.h>
#include "config.h"
@ -45,15 +41,11 @@ typedef float FFTDouble;
#else
#if FFT_FIXED_32
#define Q31(x) (int)((x)*2147483648.0 + 0.5)
#define FFT_NAME(x) x ## _fixed_32
typedef int32_t FFTSample;
#endif /* FFT_FIXED_32 */
typedef struct FFTComplex {
FFTSample re, im;
} FFTComplex;

Просмотреть файл

@ -48,5 +48,4 @@
*/
#define FFT_FLOAT 0
#define FFT_FIXED_32 1
#include "fft_template.c"

Просмотреть файл

@ -17,5 +17,4 @@
*/
#define FFT_FLOAT 1
#define FFT_FIXED_32 0
#include "fft_template.c"

Просмотреть файл

@ -33,9 +33,9 @@
#include "fft.h"
#include "fft-internal.h"
#if FFT_FIXED_32
#if !FFT_FLOAT
#include "fft_table.h"
#else /* FFT_FIXED_32 */
#else /* !FFT_FLOAT */
/* cos(2*pi*x/n) for 0<=x<=n/4, followed by its reverse */
#if !CONFIG_HARDCODED_TABLES
@ -136,7 +136,7 @@ COSTABLE_CONST FFTSample * const FFT_NAME(ff_cos_tabs)[] = {
FFT_NAME(ff_cos_131072),
};
#endif /* FFT_FIXED_32 */
#endif /* FFT_FLOAT */
static void fft_permute_c(FFTContext *s, FFTComplex *z);
static void fft_calc_c(FFTContext *s, FFTComplex *z);
@ -226,20 +226,25 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
s->mdct_calc = ff_mdct_calc_c;
#endif
#if FFT_FIXED_32
ff_fft_lut_init();
#else /* FFT_FIXED_32 */
#if FFT_FLOAT
if (ARCH_AARCH64) ff_fft_init_aarch64(s);
if (ARCH_ARM) ff_fft_init_arm(s);
if (ARCH_PPC) ff_fft_init_ppc(s);
if (ARCH_X86) ff_fft_init_x86(s);
if (HAVE_MIPSFPU) ff_fft_init_mips(s);
#if ARCH_AARCH64
ff_fft_init_aarch64(s);
#elif ARCH_ARM
ff_fft_init_arm(s);
#elif ARCH_PPC
ff_fft_init_ppc(s);
#elif ARCH_X86
ff_fft_init_x86(s);
#endif
#if HAVE_MIPSFPU
ff_fft_init_mips(s);
#endif
for(j=4; j<=nbits; j++) {
ff_init_ff_cos_tabs(j);
}
#endif /* FFT_FIXED_32 */
#else /* FFT_FLOAT */
ff_fft_lut_init();
#endif
if (ARCH_X86 && FFT_FLOAT && s->fft_permutation == FF_FFT_PERM_AVX) {
@ -312,7 +317,7 @@ av_cold void ff_fft_end(FFTContext *s)
av_freep(&s->tmp_buf);
}
#if FFT_FIXED_32
#if !FFT_FLOAT
static void fft_calc_c(FFTContext *s, FFTComplex *z) {
@ -470,7 +475,7 @@ static void fft_calc_c(FFTContext *s, FFTComplex *z) {
}
}
#else /* FFT_FIXED_32 */
#else /* !FFT_FLOAT */
#define BUTTERFLIES(a0,a1,a2,a3) {\
BF(t3, t5, t5, t1);\
@ -620,4 +625,4 @@ static void fft_calc_c(FFTContext *s, FFTComplex *z)
{
fft_dispatch[s->nbits-2](z);
}
#endif /* FFT_FIXED_32 */
#endif /* !FFT_FLOAT */

Просмотреть файл

@ -29,15 +29,15 @@
static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 0 };
static const uint64_t flac_channel_layouts[8] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_QUAD,
AV_CH_LAYOUT_5POINT0,
AV_CH_LAYOUT_5POINT1,
AV_CH_LAYOUT_6POINT1,
AV_CH_LAYOUT_7POINT1
static const AVChannelLayout flac_channel_layouts[8] = {
AV_CHANNEL_LAYOUT_MONO,
AV_CHANNEL_LAYOUT_STEREO,
AV_CHANNEL_LAYOUT_SURROUND,
AV_CHANNEL_LAYOUT_QUAD,
AV_CHANNEL_LAYOUT_5POINT0,
AV_CHANNEL_LAYOUT_5POINT1,
AV_CHANNEL_LAYOUT_6POINT1,
AV_CHANNEL_LAYOUT_7POINT1
};
static int64_t get_utf8(GetBitContext *gb)
@ -193,12 +193,18 @@ int ff_flac_is_extradata_valid(AVCodecContext *avctx,
return 1;
}
void ff_flac_set_channel_layout(AVCodecContext *avctx)
void ff_flac_set_channel_layout(AVCodecContext *avctx, int channels)
{
if (avctx->channels <= FF_ARRAY_ELEMS(flac_channel_layouts))
avctx->channel_layout = flac_channel_layouts[avctx->channels - 1];
if (channels == avctx->ch_layout.nb_channels &&
avctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC)
return;
av_channel_layout_uninit(&avctx->ch_layout);
if (channels <= FF_ARRAY_ELEMS(flac_channel_layouts))
avctx->ch_layout = flac_channel_layouts[channels - 1];
else
avctx->channel_layout = 0;
avctx->ch_layout = (AVChannelLayout){ .order = AV_CHANNEL_ORDER_UNSPEC,
.nb_channels = channels };
}
int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
@ -229,13 +235,9 @@ int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
return AVERROR_INVALIDDATA;
}
avctx->channels = s->channels;
avctx->sample_rate = s->samplerate;
avctx->bits_per_raw_sample = s->bps;
if (!avctx->channel_layout ||
av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels)
ff_flac_set_channel_layout(avctx);
ff_flac_set_channel_layout(avctx, s->channels);
s->samples = get_bits64(&gb, 36);

Просмотреть файл

@ -131,7 +131,7 @@ int ff_flac_get_max_frame_size(int blocksize, int ch, int bps);
int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
FLACFrameInfo *fi, int log_level_offset);
void ff_flac_set_channel_layout(AVCodecContext *avctx);
void ff_flac_set_channel_layout(AVCodecContext *avctx, int channels);
/**
* Parse the metadata block parameters from the header.

Просмотреть файл

@ -19,7 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "internal.h"
#include "flacdata.h"
const int ff_flac_sample_rate_table[16] =
{ 0,

Просмотреть файл

@ -22,7 +22,7 @@
#ifndef AVCODEC_FLACDATA_H
#define AVCODEC_FLACDATA_H
#include "internal.h"
#include <stdint.h>
extern const int ff_flac_sample_rate_table[16];

Просмотреть файл

@ -37,7 +37,7 @@
#include "libavutil/crc.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "internal.h"
#include "codec_internal.h"
#include "get_bits.h"
#include "bytestream.h"
#include "golomb.h"
@ -260,7 +260,7 @@ static int decode_residuals(FLACContext *s, int32_t *decoded, int pred_order)
for (; i < samples; i++)
*decoded++ = get_sbits_long(&gb, tmp);
} else {
int real_limit = tmp ? (INT_MAX >> tmp) + 2 : INT_MAX;
int real_limit = (tmp > 1) ? (INT_MAX >> (tmp - 1)) + 2 : INT_MAX;
for (; i < samples; i++) {
int v = get_sr_golomb_flac(&gb, tmp, real_limit, 1);
if (v == 0x80000000){
@ -483,15 +483,14 @@ static int decode_frame(FLACContext *s)
if ( s->flac_stream_info.channels
&& fi.channels != s->flac_stream_info.channels
&& s->got_streaminfo) {
s->flac_stream_info.channels = s->avctx->channels = fi.channels;
ff_flac_set_channel_layout(s->avctx);
s->flac_stream_info.channels = fi.channels;
ff_flac_set_channel_layout(s->avctx, fi.channels);
ret = allocate_buffers(s);
if (ret < 0)
return ret;
}
s->flac_stream_info.channels = s->avctx->channels = fi.channels;
if (!s->avctx->channel_layout)
ff_flac_set_channel_layout(s->avctx);
s->flac_stream_info.channels = fi.channels;
ff_flac_set_channel_layout(s->avctx, fi.channels);
s->ch_mode = fi.ch_mode;
if (!s->flac_stream_info.bps && !fi.bps) {
@ -555,11 +554,9 @@ static int decode_frame(FLACContext *s)
return 0;
}
static int flac_decode_frame(AVCodecContext *avctx, void *data,
static int flac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
FLACContext *s = avctx->priv_data;
@ -618,7 +615,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */
frame->nb_samples = s->blocksize;
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
@ -660,22 +657,23 @@ static const AVClass flac_decoder_class = {
.version = LIBAVUTIL_VERSION_INT,
};
AVCodec ff_flac_decoder = {
.name = "flac",
.long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_FLAC,
const FFCodec ff_flac_decoder = {
.p.name = "flac",
.p.long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_FLAC,
.priv_data_size = sizeof(FLACContext),
.init = flac_decode_init,
.close = flac_decode_close,
.decode = flac_decode_frame,
.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
FF_CODEC_DECODE_CB(flac_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_FRAME_THREADS,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE },
.priv_class = &flac_decoder_class,
.p.priv_class = &flac_decoder_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};

Просмотреть файл

@ -19,6 +19,7 @@
*/
#include "libavutil/attributes.h"
#include "libavutil/internal.h"
#include "libavutil/samplefmt.h"
#include "flacdsp.h"
#include "config.h"
@ -123,8 +124,9 @@ av_cold void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int cha
break;
}
if (ARCH_ARM)
ff_flacdsp_init_arm(c, fmt, channels, bps);
if (ARCH_X86)
ff_flacdsp_init_x86(c, fmt, channels, bps);
#if ARCH_ARM
ff_flacdsp_init_arm(c, fmt, channels, bps);
#elif ARCH_X86
ff_flacdsp_init_x86(c, fmt, channels, bps);
#endif
}

Просмотреть файл

@ -20,7 +20,7 @@
#define AVCODEC_FLACDSP_H
#include <stdint.h>
#include "libavutil/internal.h"
#include "libavutil/samplefmt.h"
typedef struct FLACDSPContext {

Просмотреть файл

@ -17,7 +17,7 @@
*/
#include <stdint.h>
#include "libavutil/avutil.h"
#include "libavutil/common.h"
#include "mathops.h"
#undef FUNC

Просмотреть файл

@ -19,7 +19,7 @@
*/
#include <stdint.h>
#include "libavutil/avutil.h"
#include "libavutil/macros.h"
#undef FUNC
#undef FSUF
@ -66,8 +66,8 @@ static void FUNC(flac_decorrelate_ls_c)(uint8_t **out, int32_t **in,
int i;
for (i = 0; i < len; i++) {
int a = in[0][i];
int b = in[1][i];
unsigned a = in[0][i];
unsigned b = in[1][i];
S(samples, 0, i) = a << shift;
S(samples, 1, i) = (a - b) << shift;
}
@ -80,8 +80,8 @@ static void FUNC(flac_decorrelate_rs_c)(uint8_t **out, int32_t **in,
int i;
for (i = 0; i < len; i++) {
int a = in[0][i];
int b = in[1][i];
unsigned a = in[0][i];
unsigned b = in[1][i];
S(samples, 0, i) = (a + b) << shift;
S(samples, 1, i) = b << shift;
}
@ -94,7 +94,7 @@ static void FUNC(flac_decorrelate_ms_c)(uint8_t **out, int32_t **in,
int i;
for (i = 0; i < len; i++) {
int a = in[0][i];
unsigned a = in[0][i];
int b = in[1][i];
a -= b >> 1;
S(samples, 0, i) = (a + b) << shift;

Просмотреть файл

@ -27,7 +27,7 @@
* Initialize frame thread encoder.
* @note hardware encoders are not supported
*/
int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options);
int ff_frame_thread_encoder_init(AVCodecContext *avctx);
void ff_frame_thread_encoder_free(AVCodecContext *avctx);
int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
AVFrame *frame, int *got_packet_ptr);

Просмотреть файл

@ -33,7 +33,8 @@
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/avassert.h"
#include "avcodec.h"
#include "defs.h"
#include "mathops.h"
#include "vlc.h"
@ -709,8 +710,8 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
unsigned int index; \
\
index = SHOW_UBITS(name, gb, bits); \
code = table[index][0]; \
n = table[index][1]; \
code = table[index].sym; \
n = table[index].len; \
\
if (max_depth > 1 && n < 0) { \
LAST_SKIP_BITS(name, gb, bits); \
@ -719,8 +720,8 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + code; \
code = table[index][0]; \
n = table[index][1]; \
code = table[index].sym; \
n = table[index].len; \
if (max_depth > 2 && n < 0) { \
LAST_SKIP_BITS(name, gb, nb_bits); \
UPDATE_CACHE(name, gb); \
@ -728,8 +729,8 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + code; \
code = table[index][0]; \
n = table[index][1]; \
code = table[index].sym; \
n = table[index].len; \
} \
} \
SKIP_BITS(name, gb, n); \
@ -774,15 +775,15 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
/* Return the LUT element for the given bitstream configuration. */
static inline int set_idx(GetBitContext *s, int code, int *n, int *nb_bits,
VLC_TYPE (*table)[2])
const VLCElem *table)
{
unsigned idx;
*nb_bits = -*n;
idx = show_bits(s, *nb_bits) + code;
*n = table[idx][1];
*n = table[idx].len;
return table[idx][0];
return table[idx].sym;
}
/**
@ -794,14 +795,14 @@ static inline int set_idx(GetBitContext *s, int code, int *n, int *nb_bits,
* = (max_vlc_length + bits - 1) / bits
* @returns the code parsed or -1 if no vlc matches
*/
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table,
int bits, int max_depth)
{
#if CACHED_BITSTREAM_READER
int nb_bits;
unsigned idx = show_bits(s, bits);
int code = table[idx][0];
int n = table[idx][1];
int code = table[idx].sym;
int n = table[idx].len;
if (max_depth > 1 && n < 0) {
skip_remaining(s, bits);

Просмотреть файл

@ -0,0 +1,312 @@
/*
* The default get_buffer2() implementation
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/avassert.h"
#include "libavutil/avutil.h"
#include "libavutil/buffer.h"
#include "libavutil/frame.h"
#include "libavutil/hwcontext.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
#include "libavutil/samplefmt.h"
#include "libavutil/version.h"
#include "avcodec.h"
#include "internal.h"
typedef struct FramePool {
/**
* Pools for each data plane. For audio all the planes have the same size,
* so only pools[0] is used.
*/
AVBufferPool *pools[4];
/*
* Pool parameters
*/
int format;
int width, height;
int stride_align[AV_NUM_DATA_POINTERS];
int linesize[4];
int planes;
int channels;
int samples;
} FramePool;
static void frame_pool_free(void *opaque, uint8_t *data)
{
FramePool *pool = (FramePool*)data;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
av_buffer_pool_uninit(&pool->pools[i]);
av_freep(&data);
}
static AVBufferRef *frame_pool_alloc(void)
{
FramePool *pool = av_mallocz(sizeof(*pool));
AVBufferRef *buf;
if (!pool)
return NULL;
buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
frame_pool_free, NULL, 0);
if (!buf) {
av_freep(&pool);
return NULL;
}
return buf;
}
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
{
FramePool *pool = avctx->internal->pool ?
(FramePool*)avctx->internal->pool->data : NULL;
AVBufferRef *pool_buf;
int i, ret, ch, planes;
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
int planar = av_sample_fmt_is_planar(frame->format);
ch = frame->ch_layout.nb_channels;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGS
if (!ch)
ch = frame->channels;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
planes = planar ? ch : 1;
}
if (pool && pool->format == frame->format) {
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
pool->width == frame->width && pool->height == frame->height)
return 0;
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
pool->channels == ch && frame->nb_samples == pool->samples)
return 0;
}
pool_buf = frame_pool_alloc();
if (!pool_buf)
return AVERROR(ENOMEM);
pool = (FramePool*)pool_buf->data;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO: {
int linesize[4];
int w = frame->width;
int h = frame->height;
int unaligned;
ptrdiff_t linesize1[4];
size_t size[4];
avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
do {
// NOTE: do not align linesizes individually, this breaks e.g. assumptions
// that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
if (ret < 0)
goto fail;
// increase alignment of w for next try (rhs gives the lowest bit set in w)
w += w & ~(w - 1);
unaligned = 0;
for (i = 0; i < 4; i++)
unaligned |= linesize[i] % pool->stride_align[i];
} while (unaligned);
for (i = 0; i < 4; i++)
linesize1[i] = linesize[i];
ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
if (ret < 0)
goto fail;
for (i = 0; i < 4; i++) {
pool->linesize[i] = linesize[i];
if (size[i]) {
if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
ret = AVERROR(EINVAL);
goto fail;
}
pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
CONFIG_MEMORY_POISONING ?
NULL :
av_buffer_allocz);
if (!pool->pools[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
}
pool->format = frame->format;
pool->width = frame->width;
pool->height = frame->height;
break;
}
case AVMEDIA_TYPE_AUDIO: {
ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
frame->nb_samples, frame->format, 0);
if (ret < 0)
goto fail;
pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
if (!pool->pools[0]) {
ret = AVERROR(ENOMEM);
goto fail;
}
pool->format = frame->format;
pool->planes = planes;
pool->channels = ch;
pool->samples = frame->nb_samples;
break;
}
default: av_assert0(0);
}
av_buffer_unref(&avctx->internal->pool);
avctx->internal->pool = pool_buf;
return 0;
fail:
av_buffer_unref(&pool_buf);
return ret;
}
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
{
FramePool *pool = (FramePool*)avctx->internal->pool->data;
int planes = pool->planes;
int i;
frame->linesize[0] = pool->linesize[0];
if (planes > AV_NUM_DATA_POINTERS) {
frame->extended_data = av_calloc(planes, sizeof(*frame->extended_data));
frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
frame->extended_buf = av_calloc(frame->nb_extended_buf,
sizeof(*frame->extended_buf));
if (!frame->extended_data || !frame->extended_buf) {
av_freep(&frame->extended_data);
av_freep(&frame->extended_buf);
return AVERROR(ENOMEM);
}
} else {
frame->extended_data = frame->data;
av_assert0(frame->nb_extended_buf == 0);
}
for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
if (!frame->buf[i])
goto fail;
frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
}
for (i = 0; i < frame->nb_extended_buf; i++) {
frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
if (!frame->extended_buf[i])
goto fail;
frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
}
if (avctx->debug & FF_DEBUG_BUFFERS)
av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
return 0;
fail:
av_frame_unref(frame);
return AVERROR(ENOMEM);
}
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
{
FramePool *pool = (FramePool*)s->internal->pool->data;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
int i;
if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
return -1;
}
if (!desc) {
av_log(s, AV_LOG_ERROR,
"Unable to get pixel format descriptor for format %s\n",
av_get_pix_fmt_name(pic->format));
return AVERROR(EINVAL);
}
memset(pic->data, 0, sizeof(pic->data));
pic->extended_data = pic->data;
for (i = 0; i < 4 && pool->pools[i]; i++) {
pic->linesize[i] = pool->linesize[i];
pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
if (!pic->buf[i])
goto fail;
pic->data[i] = pic->buf[i]->data;
}
for (; i < AV_NUM_DATA_POINTERS; i++) {
pic->data[i] = NULL;
pic->linesize[i] = 0;
}
if (s->debug & FF_DEBUG_BUFFERS)
av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
return 0;
fail:
av_frame_unref(pic);
return AVERROR(ENOMEM);
}
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
{
int ret;
if (avctx->hw_frames_ctx) {
ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
frame->width = avctx->coded_width;
frame->height = avctx->coded_height;
return ret;
}
if ((ret = update_frame_pool(avctx, frame)) < 0)
return ret;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
return video_get_buffer(avctx, frame);
case AVMEDIA_TYPE_AUDIO:
return audio_get_buffer(avctx, frame);
default:
return -1;
}
}

Просмотреть файл

@ -26,7 +26,7 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "libavutil/common.h"
#include <stdint.h>
const uint8_t ff_golomb_vlc_len[512]={
19,17,15,15,13,13,13,13,11,11,11,11,11,11,11,11,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,

Просмотреть файл

@ -33,14 +33,12 @@
#include <stdint.h>
#include "get_bits.h"
#include "put_bits.h"
#define INVALID_VLC 0x80000000
extern const uint8_t ff_golomb_vlc_len[512];
extern const uint8_t ff_ue_golomb_vlc_code[512];
extern const int8_t ff_se_golomb_vlc_code[512];
extern const uint8_t ff_ue_golomb_len[256];
extern const uint8_t ff_interleaved_golomb_vlc_len[256];
extern const uint8_t ff_interleaved_ue_golomb_vlc_code[256];
@ -615,135 +613,4 @@ static inline int get_te(GetBitContext *s, int r, char *file, const char *func,
#define get_te0_golomb(a, r) get_te(a, r, __FILE__, __func__, __LINE__)
#endif /* TRACE */
/**
* write unsigned exp golomb code. 2^16 - 2 at most
*/
static inline void set_ue_golomb(PutBitContext *pb, int i)
{
av_assert2(i >= 0);
av_assert2(i <= 0xFFFE);
if (i < 256)
put_bits(pb, ff_ue_golomb_len[i], i + 1);
else {
int e = av_log2(i + 1);
put_bits(pb, 2 * e + 1, i + 1);
}
}
/**
* write unsigned exp golomb code. 2^32-2 at most.
*/
static inline void set_ue_golomb_long(PutBitContext *pb, uint32_t i)
{
av_assert2(i <= (UINT32_MAX - 1));
if (i < 256)
put_bits(pb, ff_ue_golomb_len[i], i + 1);
else {
int e = av_log2(i + 1);
put_bits64(pb, 2 * e + 1, i + 1);
}
}
/**
* write truncated unsigned exp golomb code.
*/
static inline void set_te_golomb(PutBitContext *pb, int i, int range)
{
av_assert2(range >= 1);
av_assert2(i <= range);
if (range == 2)
put_bits(pb, 1, i ^ 1);
else
set_ue_golomb(pb, i);
}
/**
* write signed exp golomb code. 16 bits at most.
*/
static inline void set_se_golomb(PutBitContext *pb, int i)
{
i = 2 * i - 1;
if (i < 0)
i ^= -1; //FIXME check if gcc does the right thing
set_ue_golomb(pb, i);
}
/**
* write unsigned golomb rice code (ffv1).
*/
static inline void set_ur_golomb(PutBitContext *pb, int i, int k, int limit,
int esc_len)
{
int e;
av_assert2(i >= 0);
e = i >> k;
if (e < limit)
put_bits(pb, e + k + 1, (1 << k) + av_mod_uintp2(i, k));
else
put_bits(pb, limit + esc_len, i - limit + 1);
}
/**
* write unsigned golomb rice code (jpegls).
*/
static inline void set_ur_golomb_jpegls(PutBitContext *pb, int i, int k,
int limit, int esc_len)
{
int e;
av_assert2(i >= 0);
e = (i >> k) + 1;
if (e < limit) {
while (e > 31) {
put_bits(pb, 31, 0);
e -= 31;
}
put_bits(pb, e, 1);
if (k)
put_sbits(pb, k, i);
} else {
while (limit > 31) {
put_bits(pb, 31, 0);
limit -= 31;
}
put_bits(pb, limit, 1);
put_bits(pb, esc_len, i - 1);
}
}
/**
* write signed golomb rice code (ffv1).
*/
static inline void set_sr_golomb(PutBitContext *pb, int i, int k, int limit,
int esc_len)
{
int v;
v = -2 * i - 1;
v ^= (v >> 31);
set_ur_golomb(pb, v, k, limit, esc_len);
}
/**
* write signed golomb rice code (flac).
*/
static inline void set_sr_golomb_flac(PutBitContext *pb, int i, int k,
int limit, int esc_len)
{
int v;
v = -2 * i - 1;
v ^= (v >> 31);
set_ur_golomb_jpegls(pb, v, k, limit, esc_len);
}
#endif /* AVCODEC_GOLOMB_H */

Просмотреть файл

@ -36,5 +36,6 @@ void ff_h264chroma_init_arm(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_x86(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_mips(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_loongarch(H264ChromaContext *c, int bit_depth);
#endif /* AVCODEC_H264CHROMA_H */

Просмотреть файл

@ -89,16 +89,16 @@ typedef struct H264DSPContext {
void (*h264_idct_add16)(uint8_t *dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/, int stride,
const uint8_t nnzc[15 * 8]);
const uint8_t nnzc[5 * 8]);
void (*h264_idct8_add4)(uint8_t *dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/, int stride,
const uint8_t nnzc[15 * 8]);
const uint8_t nnzc[5 * 8]);
void (*h264_idct_add8)(uint8_t **dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/, int stride,
const uint8_t nnzc[15 * 8]);
void (*h264_idct_add16intra)(uint8_t *dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/,
int stride, const uint8_t nnzc[15 * 8]);
int stride, const uint8_t nnzc[5 * 8]);
void (*h264_luma_dc_dequant_idct)(int16_t *output,
int16_t *input /*align 16*/, int qmul);
void (*h264_chroma_dc_dequant_idct)(int16_t *block, int qmul);
@ -129,5 +129,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
void ff_h264dsp_init_mips(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
void ff_h264dsp_init_loongarch(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
#endif /* AVCODEC_H264DSP_H */

Просмотреть файл

@ -25,11 +25,13 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "codec_id.h"
#include "h264pred.h"
#include "mathops.h"
#define BIT_DEPTH 8
#include "h264pred_template.c"
@ -51,6 +53,30 @@
#include "h264pred_template.c"
#undef BIT_DEPTH
static void pred4x4_127_dc_c(uint8_t *src, const uint8_t *topright,
ptrdiff_t _stride)
{
int stride = _stride;
const uint32_t a = 0x7F7F7F7FU;
AV_WN32A(src + 0 * stride, a);
AV_WN32A(src + 1 * stride, a);
AV_WN32A(src + 2 * stride, a);
AV_WN32A(src + 3 * stride, a);
}
static void pred4x4_129_dc_c(uint8_t *src, const uint8_t *topright,
ptrdiff_t _stride)
{
int stride = _stride;
const uint32_t a = 0x81818181U;
AV_WN32A(src + 0 * stride, a);
AV_WN32A(src + 1 * stride, a);
AV_WN32A(src + 2 * stride, a);
AV_WN32A(src + 3 * stride, a);
}
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright,
ptrdiff_t stride)
{
@ -419,56 +445,19 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
#define FUNCD(a) a ## _c
#define H264_PRED(depth) \
if(codec_id != AV_CODEC_ID_RV40){\
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
h->pred4x4[VERT_PRED ]= FUNCD(pred4x4_vertical_vp8);\
h->pred4x4[HOR_PRED ]= FUNCD(pred4x4_horizontal_vp8);\
} else {\
h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
}\
h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
if(codec_id == AV_CODEC_ID_SVQ3)\
h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_svq3);\
else\
h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left , depth);\
h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_vp8);\
} else\
h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left , depth);\
h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up , depth);\
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
} else {\
h->pred4x4[TM_VP8_PRED ]= FUNCD(pred4x4_tm_vp8);\
h->pred4x4[DC_127_PRED ]= FUNCC(pred4x4_127_dc , depth);\
h->pred4x4[DC_129_PRED ]= FUNCC(pred4x4_129_dc , depth);\
h->pred4x4[VERT_VP8_PRED ]= FUNCC(pred4x4_vertical , depth);\
h->pred4x4[HOR_VP8_PRED ]= FUNCC(pred4x4_horizontal , depth);\
}\
if (codec_id != AV_CODEC_ID_VP8)\
h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
}else{\
h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_rv40);\
h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_rv40);\
h->pred4x4[HOR_UP_PRED ]= FUNCD(pred4x4_horizontal_up_rv40);\
h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_down_left_rv40_nodown);\
h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= FUNCD(pred4x4_horizontal_up_rv40_nodown);\
h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_vertical_left_rv40_nodown);\
}\
h->pred4x4[VERT_PRED ] = FUNCC(pred4x4_vertical, depth);\
h->pred4x4[HOR_PRED ] = FUNCC(pred4x4_horizontal, depth);\
h->pred4x4[DC_PRED ] = FUNCC(pred4x4_dc, depth);\
h->pred4x4[DIAG_DOWN_LEFT_PRED ] = FUNCC(pred4x4_down_left, depth);\
h->pred4x4[DIAG_DOWN_RIGHT_PRED] = FUNCC(pred4x4_down_right, depth);\
h->pred4x4[VERT_RIGHT_PRED ] = FUNCC(pred4x4_vertical_right, depth);\
h->pred4x4[HOR_DOWN_PRED ] = FUNCC(pred4x4_horizontal_down, depth);\
h->pred4x4[VERT_LEFT_PRED ] = FUNCC(pred4x4_vertical_left, depth);\
h->pred4x4[HOR_UP_PRED ] = FUNCC(pred4x4_horizontal_up, depth);\
h->pred4x4[LEFT_DC_PRED ] = FUNCC(pred4x4_left_dc, depth);\
h->pred4x4[TOP_DC_PRED ] = FUNCC(pred4x4_top_dc, depth);\
if (depth > 8 || codec_id != AV_CODEC_ID_VP8)\
h->pred4x4[DC_128_PRED ] = FUNCC(pred4x4_128_dc, depth);\
\
h->pred8x8l[VERT_PRED ]= FUNCC(pred8x8l_vertical , depth);\
h->pred8x8l[HOR_PRED ]= FUNCC(pred8x8l_horizontal , depth);\
@ -486,20 +475,15 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
if (chroma_format_idc <= 1) {\
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
h->pred8x8[PLANE_PRED8x8] = FUNCC(pred8x8_plane, depth);\
} else {\
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x16_vertical , depth);\
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
h->pred8x8[PLANE_PRED8x8] = FUNCC(pred8x16_plane, depth);\
}\
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
if (chroma_format_idc <= 1) {\
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
} else {\
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
}\
} else\
h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && \
codec_id != AV_CODEC_ID_VP8) {\
if (depth > 8 || (codec_id != AV_CODEC_ID_RV40 && \
codec_id != AV_CODEC_ID_VP7 && \
codec_id != AV_CODEC_ID_VP8)) { \
if (chroma_format_idc <= 1) {\
h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
@ -521,10 +505,6 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\
h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\
h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc , depth);\
h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
}\
}\
if (chroma_format_idc <= 1) {\
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
@ -535,23 +515,7 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
h->pred16x16[DC_PRED8x8 ]= FUNCC(pred16x16_dc , depth);\
h->pred16x16[VERT_PRED8x8 ]= FUNCC(pred16x16_vertical , depth);\
h->pred16x16[HOR_PRED8x8 ]= FUNCC(pred16x16_horizontal , depth);\
switch(codec_id){\
case AV_CODEC_ID_SVQ3:\
h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_svq3);\
break;\
case AV_CODEC_ID_RV40:\
h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_rv40);\
break;\
case AV_CODEC_ID_VP7:\
case AV_CODEC_ID_VP8:\
h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_tm_vp8);\
h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc , depth);\
h->pred16x16[DC_129_PRED8x8]= FUNCC(pred16x16_129_dc , depth);\
break;\
default:\
h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane , depth);\
break;\
}\
h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane , depth);\
h->pred16x16[LEFT_DC_PRED8x8]= FUNCC(pred16x16_left_dc , depth);\
h->pred16x16[TOP_DC_PRED8x8 ]= FUNCC(pred16x16_top_dc , depth);\
h->pred16x16[DC_128_PRED8x8 ]= FUNCC(pred16x16_128_dc , depth);\
@ -564,8 +528,8 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
h->pred8x8l_filter_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_filter_add , depth);\
h->pred8x8l_filter_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_filter_add , depth);\
if (chroma_format_idc <= 1) {\
h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
h->pred8x8_add[VERT_PRED8x8] = FUNCC(pred8x8_vertical_add, depth);\
h->pred8x8_add[ HOR_PRED8x8] = FUNCC(pred8x8_horizontal_add, depth);\
} else {\
h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x16_vertical_add , depth);\
h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x16_horizontal_add , depth);\
@ -589,15 +553,50 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
default:
av_assert0(bit_depth<=8);
H264_PRED(8)
switch (codec_id) {
case AV_CODEC_ID_SVQ3:
h->pred4x4[DIAG_DOWN_LEFT_PRED] = FUNCD(pred4x4_down_left_svq3);
h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_plane_svq3);
break;
case AV_CODEC_ID_RV40:
h->pred4x4[DIAG_DOWN_LEFT_PRED] = FUNCD(pred4x4_down_left_rv40);
h->pred4x4[VERT_LEFT_PRED ] = FUNCD(pred4x4_vertical_left_rv40);
h->pred4x4[HOR_UP_PRED ] = FUNCD(pred4x4_horizontal_up_rv40);
h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN] = FUNCD(pred4x4_down_left_rv40_nodown);
h->pred4x4[HOR_UP_PRED_RV40_NODOWN] = FUNCD(pred4x4_horizontal_up_rv40_nodown);
h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN] = FUNCD(pred4x4_vertical_left_rv40_nodown);
h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_plane_rv40);
break;
case AV_CODEC_ID_VP7:
case AV_CODEC_ID_VP8:
h->pred4x4[VERT_PRED ] = FUNCD(pred4x4_vertical_vp8);
h->pred4x4[HOR_PRED ] = FUNCD(pred4x4_horizontal_vp8);
h->pred4x4[VERT_LEFT_PRED ] = FUNCD(pred4x4_vertical_left_vp8);
h->pred4x4[TM_VP8_PRED ] = FUNCD(pred4x4_tm_vp8);
h->pred4x4[VERT_VP8_PRED ] = FUNCC(pred4x4_vertical, 8);
h->pred4x4[DC_127_PRED ] = FUNCD(pred4x4_127_dc);
h->pred4x4[DC_129_PRED ] = FUNCD(pred4x4_129_dc);
h->pred4x4[HOR_VP8_PRED ] = FUNCC(pred4x4_horizontal, 8);
h->pred8x8[PLANE_PRED8x8 ] = FUNCD(pred8x8_tm_vp8);
h->pred8x8[DC_127_PRED8x8 ] = FUNCC(pred8x8_127_dc, 8);
h->pred8x8[DC_129_PRED8x8 ] = FUNCC(pred8x8_129_dc, 8);
h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_tm_vp8);
h->pred16x16[DC_127_PRED8x8] = FUNCC(pred16x16_127_dc, 8);
h->pred16x16[DC_129_PRED8x8] = FUNCC(pred16x16_129_dc, 8);
break;
}
break;
}
if (ARCH_AARCH64)
ff_h264_pred_init_aarch64(h, codec_id, bit_depth, chroma_format_idc);
if (ARCH_ARM)
ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
if (ARCH_X86)
ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
if (ARCH_MIPS)
ff_h264_pred_init_mips(h, codec_id, bit_depth, chroma_format_idc);
#if ARCH_AARCH64
ff_h264_pred_init_aarch64(h, codec_id, bit_depth, chroma_format_idc);
#elif ARCH_ARM
ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
#elif ARCH_X86
ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
#elif ARCH_MIPS
ff_h264_pred_init_mips(h, codec_id, bit_depth, chroma_format_idc);
#elif ARCH_LOONGARCH
ff_h264_pred_init_loongarch(h, codec_id, bit_depth, chroma_format_idc);
#endif
}

Просмотреть файл

@ -86,6 +86,8 @@
#define DC_129_PRED8x8 8
//@}
#define PART_NOT_AVAILABLE -2
/**
* Context for storing H.264 prediction functions
*/
@ -122,5 +124,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_mips(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_loongarch(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
#endif /* AVCODEC_H264PRED_H */

Просмотреть файл

@ -111,32 +111,6 @@ static void FUNCC(pred4x4_128_dc)(uint8_t *_src, const uint8_t *topright,
AV_WN4PA(src+3*stride, a);
}
static void FUNCC(pred4x4_127_dc)(uint8_t *_src, const uint8_t *topright,
ptrdiff_t _stride)
{
pixel *src = (pixel*)_src;
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))-1);
AV_WN4PA(src+0*stride, a);
AV_WN4PA(src+1*stride, a);
AV_WN4PA(src+2*stride, a);
AV_WN4PA(src+3*stride, a);
}
static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright,
ptrdiff_t _stride)
{
pixel *src = (pixel*)_src;
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))+1);
AV_WN4PA(src+0*stride, a);
AV_WN4PA(src+1*stride, a);
AV_WN4PA(src+2*stride, a);
AV_WN4PA(src+3*stride, a);
}
#define LOAD_TOP_RIGHT_EDGE\
const unsigned av_unused t4 = topright[0];\
@ -427,9 +401,11 @@ static void FUNCC(pred16x16_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\
PREDICT_16x16_DC(PIXEL_SPLAT_X4(v));\
}
PRED16x16_X(127, (1<<(BIT_DEPTH-1))-1)
PRED16x16_X(128, (1<<(BIT_DEPTH-1))+0)
#if BIT_DEPTH == 8
PRED16x16_X(127, (1<<(BIT_DEPTH-1))-1)
PRED16x16_X(129, (1<<(BIT_DEPTH-1))+1)
#endif
static inline void FUNCC(pred16x16_plane_compat)(uint8_t *_src,
ptrdiff_t _stride,
@ -551,9 +527,11 @@ static void FUNCC(pred8x8_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\
}\
}
PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1)
PRED8x8_X(128, (1<<(BIT_DEPTH-1))+0)
#if BIT_DEPTH == 8
PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1)
PRED8x8_X(129, (1<<(BIT_DEPTH-1))+1)
#endif
static void FUNCC(pred8x16_128_dc)(uint8_t *_src, ptrdiff_t stride)
{

Просмотреть файл

@ -102,5 +102,6 @@ void ff_hpeldsp_init_arm(HpelDSPContext *c, int flags);
void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags);
void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags);
void ff_hpeldsp_init_mips(HpelDSPContext *c, int flags);
void ff_hpeldsp_init_loongarch(HpelDSPContext *c, int flags);
#endif /* AVCODEC_HPELDSP_H */

Просмотреть файл

@ -47,7 +47,6 @@ extern const AVHWAccel ff_mjpeg_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg1_nvdec_hwaccel;
extern const AVHWAccel ff_mpeg1_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg1_videotoolbox_hwaccel;
extern const AVHWAccel ff_mpeg1_xvmc_hwaccel;
extern const AVHWAccel ff_mpeg2_d3d11va_hwaccel;
extern const AVHWAccel ff_mpeg2_d3d11va2_hwaccel;
extern const AVHWAccel ff_mpeg2_nvdec_hwaccel;
@ -55,11 +54,11 @@ extern const AVHWAccel ff_mpeg2_dxva2_hwaccel;
extern const AVHWAccel ff_mpeg2_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg2_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg2_videotoolbox_hwaccel;
extern const AVHWAccel ff_mpeg2_xvmc_hwaccel;
extern const AVHWAccel ff_mpeg4_nvdec_hwaccel;
extern const AVHWAccel ff_mpeg4_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg4_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg4_videotoolbox_hwaccel;
extern const AVHWAccel ff_prores_videotoolbox_hwaccel;
extern const AVHWAccel ff_vc1_d3d11va_hwaccel;
extern const AVHWAccel ff_vc1_d3d11va2_hwaccel;
extern const AVHWAccel ff_vc1_dxva2_hwaccel;
@ -74,6 +73,7 @@ extern const AVHWAccel ff_vp9_dxva2_hwaccel;
extern const AVHWAccel ff_vp9_nvdec_hwaccel;
extern const AVHWAccel ff_vp9_vaapi_hwaccel;
extern const AVHWAccel ff_vp9_vdpau_hwaccel;
extern const AVHWAccel ff_vp9_videotoolbox_hwaccel;
extern const AVHWAccel ff_wmv3_d3d11va_hwaccel;
extern const AVHWAccel ff_wmv3_d3d11va2_hwaccel;
extern const AVHWAccel ff_wmv3_dxva2_hwaccel;

Просмотреть файл

@ -78,8 +78,6 @@ typedef struct AVCodecHWConfigInternal {
HW_CONFIG_HWACCEL(1, 1, 1, VIDEOTOOLBOX, VIDEOTOOLBOX, ff_ ## codec ## _videotoolbox_hwaccel)
#define HWACCEL_D3D11VA(codec) \
HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel)
#define HWACCEL_XVMC(codec) \
HW_CONFIG_HWACCEL(0, 0, 1, XVMC, NONE, ff_ ## codec ## _xvmc_hwaccel)
#define HW_CONFIG_ENCODER(device, frames, ad_hoc, format, device_type_) \
&(const AVCodecHWConfigInternal) { \

Просмотреть файл

@ -17,6 +17,7 @@
*/
#include "config.h"
#include "config_components.h"
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "avcodec.h"
@ -26,7 +27,7 @@
#include "simple_idct.h"
#include "xvididct.h"
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
const uint8_t *src_scantable)
{
int i, end;
@ -52,10 +53,11 @@ av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
{
int i;
if (ARCH_X86)
if (ff_init_scantable_permutation_x86(idct_permutation,
perm_type))
return;
#if ARCH_X86
if (ff_init_scantable_permutation_x86(idct_permutation,
perm_type))
return;
#endif
switch (perm_type) {
case FF_IDCT_PERM_NONE:
@ -237,7 +239,7 @@ static void ff_jref_idct1_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
{
const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
av_unused const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
if (avctx->lowres==1) {
c->idct_put = ff_jref_idct4_put;
@ -287,7 +289,6 @@ av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
c->perm_type = FF_IDCT_PERM_NONE;
#endif /* CONFIG_FAANIDCT */
} else { // accurate/default
/* Be sure FF_IDCT_NONE will select this one, since it uses FF_IDCT_PERM_NONE */
c->idct_put = ff_simple_idct_put_int16_8bit;
c->idct_add = ff_simple_idct_add_int16_8bit;
c->idct = ff_simple_idct_int16_8bit;
@ -303,18 +304,21 @@ av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
if (CONFIG_MPEG4_DECODER && avctx->idct_algo == FF_IDCT_XVID)
ff_xvid_idct_init(c, avctx);
if (ARCH_AARCH64)
ff_idctdsp_init_aarch64(c, avctx, high_bit_depth);
if (ARCH_ALPHA)
ff_idctdsp_init_alpha(c, avctx, high_bit_depth);
if (ARCH_ARM)
ff_idctdsp_init_arm(c, avctx, high_bit_depth);
if (ARCH_PPC)
ff_idctdsp_init_ppc(c, avctx, high_bit_depth);
if (ARCH_X86)
ff_idctdsp_init_x86(c, avctx, high_bit_depth);
if (ARCH_MIPS)
ff_idctdsp_init_mips(c, avctx, high_bit_depth);
#if ARCH_AARCH64
ff_idctdsp_init_aarch64(c, avctx, high_bit_depth);
#elif ARCH_ALPHA
ff_idctdsp_init_alpha(c, avctx, high_bit_depth);
#elif ARCH_ARM
ff_idctdsp_init_arm(c, avctx, high_bit_depth);
#elif ARCH_PPC
ff_idctdsp_init_ppc(c, avctx, high_bit_depth);
#elif ARCH_X86
ff_idctdsp_init_x86(c, avctx, high_bit_depth);
#elif ARCH_MIPS
ff_idctdsp_init_mips(c, avctx, high_bit_depth);
#elif ARCH_LOONGARCH
ff_idctdsp_init_loongarch(c, avctx, high_bit_depth);
#endif
ff_init_scantable_permutation(c->idct_permutation,
c->perm_type);

Просмотреть файл

@ -43,7 +43,7 @@ enum idct_permutation_type {
FF_IDCT_PERM_SSE2,
};
void ff_init_scantable(uint8_t *permutation, ScanTable *st,
void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
const uint8_t *src_scantable);
void ff_init_scantable_permutation(uint8_t *idct_permutation,
enum idct_permutation_type perm_type);
@ -118,5 +118,7 @@ void ff_idctdsp_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_idctdsp_init_mips(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_idctdsp_init_loongarch(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
#endif /* AVCODEC_IDCTDSP_H */

Просмотреть файл

@ -25,46 +25,9 @@
*/
#include "avcodec.h"
#include "internal.h"
#include "mathops.h"
#include "libavutil/avassert.h"
#include "libavutil/colorspace.h"
#include "libavutil/common.h"
#include "libavutil/pixdesc.h"
#include "libavutil/internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixfmt.h"
#if FF_API_GETCHROMA
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
av_assert0(desc);
*h_shift = desc->log2_chroma_w;
*v_shift = desc->log2_chroma_h;
}
#endif
#if FF_API_AVCODEC_PIX_FMT
int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
enum AVPixelFormat src_pix_fmt,
int has_alpha)
{
return av_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
}
enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
{
return av_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, has_alpha, loss_ptr);
}
enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
{
return av_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, has_alpha, loss_ptr);
}
#endif
enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,
enum AVPixelFormat src_pix_fmt,
int has_alpha, int *loss_ptr){
@ -83,152 +46,3 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *p
return best;
}
#if FF_API_AVPICTURE
FF_DISABLE_DEPRECATION_WARNINGS
/* return true if yuv planar */
static inline int is_yuv_planar(const AVPixFmtDescriptor *desc)
{
int i;
int planes[4] = { 0 };
if ( desc->flags & AV_PIX_FMT_FLAG_RGB
|| !(desc->flags & AV_PIX_FMT_FLAG_PLANAR))
return 0;
/* set the used planes */
for (i = 0; i < desc->nb_components; i++)
planes[desc->comp[i].plane] = 1;
/* if there is an unused plane, the format is not planar */
for (i = 0; i < desc->nb_components; i++)
if (!planes[i])
return 0;
return 1;
}
int av_picture_crop(AVPicture *dst, const AVPicture *src,
enum AVPixelFormat pix_fmt, int top_band, int left_band)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int y_shift;
int x_shift;
int max_step[4];
if (pix_fmt < 0 || pix_fmt >= AV_PIX_FMT_NB)
return -1;
y_shift = desc->log2_chroma_h;
x_shift = desc->log2_chroma_w;
av_image_fill_max_pixsteps(max_step, NULL, desc);
if (is_yuv_planar(desc)) {
dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
} else{
if(top_band % (1<<y_shift) || left_band % (1<<x_shift))
return -1;
dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + (left_band * max_step[0]);
}
dst->linesize[0] = src->linesize[0];
dst->linesize[1] = src->linesize[1];
dst->linesize[2] = src->linesize[2];
return 0;
}
int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
enum AVPixelFormat pix_fmt, int padtop, int padbottom, int padleft, int padright,
int *color)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
uint8_t *optr;
int y_shift;
int x_shift;
int yheight;
int i, y;
int max_step[4];
if (pix_fmt < 0 || pix_fmt >= AV_PIX_FMT_NB)
return -1;
if (!is_yuv_planar(desc)) {
if (src)
return -1; //TODO: Not yet implemented
av_image_fill_max_pixsteps(max_step, NULL, desc);
if (padtop || padleft) {
memset(dst->data[0], color[0],
dst->linesize[0] * padtop + (padleft * max_step[0]));
}
if (padleft || padright) {
optr = dst->data[0] + dst->linesize[0] * padtop +
(dst->linesize[0] - (padright * max_step[0]));
yheight = height - 1 - (padtop + padbottom);
for (y = 0; y < yheight; y++) {
memset(optr, color[0], (padleft + padright) * max_step[0]);
optr += dst->linesize[0];
}
}
if (padbottom || padright) {
optr = dst->data[0] + dst->linesize[0] * (height - padbottom) -
(padright * max_step[0]);
memset(optr, color[0], dst->linesize[0] * padbottom +
(padright * max_step[0]));
}
return 0;
}
for (i = 0; i < 3; i++) {
x_shift = i ? desc->log2_chroma_w : 0;
y_shift = i ? desc->log2_chroma_h : 0;
if (padtop || padleft) {
memset(dst->data[i], color[i],
dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
}
if (padleft || padright) {
optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
(dst->linesize[i] - (padright >> x_shift));
yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
for (y = 0; y < yheight; y++) {
memset(optr, color[i], (padleft + padright) >> x_shift);
optr += dst->linesize[i];
}
}
if (src) { /* first line */
uint8_t *iptr = src->data[i];
optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
(padleft >> x_shift);
memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
iptr += src->linesize[i];
optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
(dst->linesize[i] - (padright >> x_shift));
yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
for (y = 0; y < yheight; y++) {
memset(optr, color[i], (padleft + padright) >> x_shift);
memcpy(optr + ((padleft + padright) >> x_shift), iptr,
(width - padleft - padright) >> x_shift);
iptr += src->linesize[i];
optr += dst->linesize[i];
}
}
if (padbottom || padright) {
optr = dst->data[i] + dst->linesize[i] *
((height - padbottom) >> y_shift) - (padright >> x_shift);
memset(optr, color[i],dst->linesize[i] *
(padbottom >> y_shift) + (padright >> x_shift));
}
}
return 0;
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif /* FF_API_AVPICTURE */

Просмотреть файл

@ -28,86 +28,13 @@
#include "libavutil/buffer.h"
#include "libavutil/channel_layout.h"
#include "libavutil/fifo.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixfmt.h"
#include "avcodec.h"
#include "config.h"
/**
* The codec does not modify any global variables in the init function,
* allowing to call the init function without locking any global mutexes.
*/
#define FF_CODEC_CAP_INIT_THREADSAFE (1 << 0)
/**
* The codec allows calling the close function for deallocation even if
* the init function returned a failure. Without this capability flag, a
* codec does such cleanup internally when returning failures from the
* init function and does not expect the close function to be called at
* all.
*/
#define FF_CODEC_CAP_INIT_CLEANUP (1 << 1)
/**
* Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set
* AVFrame.pkt_dts manually. If the flag is set, decode.c won't overwrite
* this field. If it's unset, decode.c tries to guess the pkt_dts field
* from the input AVPacket.
*/
#define FF_CODEC_CAP_SETS_PKT_DTS (1 << 2)
/**
* The decoder extracts and fills its parameters even if the frame is
* skipped due to the skip_frame setting.
*/
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM (1 << 3)
/**
* The decoder sets the cropping fields in the output frames manually.
* If this cap is set, the generic code will initialize output frame
* dimensions to coded rather than display values.
*/
#define FF_CODEC_CAP_EXPORTS_CROPPING (1 << 4)
/**
* Codec initializes slice-based threading with a main function
*/
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF (1 << 5)
/*
* The codec supports frame threading and has inter-frame dependencies, so it
* uses ff_thread_report/await_progress().
*/
#define FF_CODEC_CAP_ALLOCATE_PROGRESS (1 << 6)
/**
* Codec handles avctx->thread_count == 0 (auto) internally.
*/
#define FF_CODEC_CAP_AUTO_THREADS (1 << 7)
/**
* Codec handles output frame properties internally instead of letting the
* internal logic derive them from AVCodecInternal.last_pkt_props.
*/
#define FF_CODEC_CAP_SETS_FRAME_PROPS (1 << 8)
/**
* AVCodec.codec_tags termination value
*/
#define FF_CODEC_TAGS_END -1
#ifdef TRACE
# define ff_tlog(ctx, ...) av_log(ctx, AV_LOG_TRACE, __VA_ARGS__)
#else
# define ff_tlog(ctx, ...) do { } while(0)
#endif
#define FF_DEFAULT_QUANT_BIAS 999999
#define FF_QSCALE_TYPE_MPEG1 0
#define FF_QSCALE_TYPE_MPEG2 1
#define FF_QSCALE_TYPE_H264 2
#define FF_QSCALE_TYPE_VP56 3
#define FF_SANE_NB_CHANNELS 512U
#define FF_SIGNBIT(x) ((x) >> CHAR_BIT * sizeof(x) - 1)
#if HAVE_SIMD_ALIGN_64
# define STRIDE_ALIGN 64 /* AVX-512 */
#elif HAVE_SIMD_ALIGN_32
@ -118,20 +45,10 @@
# define STRIDE_ALIGN 8
#endif
typedef struct DecodeSimpleContext {
AVPacket *in_pkt;
} DecodeSimpleContext;
typedef struct EncodeSimpleContext {
AVFrame *in_frame;
} EncodeSimpleContext;
typedef struct AVCodecInternal {
/**
* Whether the parent AVCodecContext is a copy of the context which had
* init() called on it.
* This is used by multithreading - shared tables and picture pointers
* should be freed from the original context only.
* When using frame-threaded decoding, this field is set for the first
* worker thread (e.g. to decode extradata just once).
*/
int is_copy;
@ -141,23 +58,27 @@ typedef struct AVCodecInternal {
*/
int last_audio_frame;
#if FF_API_OLD_ENCDEC
AVFrame *to_free;
#endif
AVBufferRef *pool;
void *thread_ctx;
DecodeSimpleContext ds;
AVBSFContext *bsf;
/**
* This packet is used to hold the packet given to decoders
* implementing the .decode API; it is unused by the generic
* code for decoders implementing the .receive_frame API and
* may be freely used (but not freed) by them with the caveat
* that the packet will be unreferenced generically in
* avcodec_flush_buffers().
*/
AVPacket *in_pkt;
struct AVBSFContext *bsf;
/**
* Properties (timestamps+side data) extracted from the last packet passed
* for decoding.
*/
AVPacket *last_pkt_props;
AVFifoBuffer *pkt_props;
struct AVFifo *pkt_props;
/**
* temporary buffer used for encoders to store their bitstream
@ -165,9 +86,28 @@ typedef struct AVCodecInternal {
uint8_t *byte_buffer;
unsigned int byte_buffer_size;
/**
* This is set to AV_PKT_FLAG_KEY for encoders that encode intra-only
* formats (i.e. whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set).
* This is used to set said flag generically for said encoders.
*/
int intra_only_flag;
void *frame_thread_encoder;
EncodeSimpleContext es;
/**
* The input frame is stored here for encoders implementing the simple
* encode API.
*
* Not allocated in other cases.
*/
AVFrame *in_frame;
/**
* If this is set, then FFCodec->close (if existing) needs to be called
* for the parent AVCodecContext.
*/
int needs_close;
/**
* Number of audio samples to skip at the start of the next decoded frame
@ -191,18 +131,6 @@ typedef struct AVCodecInternal {
AVFrame *buffer_frame;
int draining_done;
#if FF_API_OLD_ENCDEC
int compat_decode_warned;
/* this variable is set by the decoder internals to signal to the old
* API compat wrappers the amount of data consumed from the last packet */
size_t compat_decode_consumed;
/* when a partial packet has been consumed, this stores the remaining size
* of the packet (that should be submitted in the next decode call */
size_t compat_decode_partial_size;
AVFrame *compat_decode_frame;
AVPacket *compat_encode_packet;
#endif
int showed_multi_packet_warning;
int skip_samples_multiplier;
@ -215,24 +143,20 @@ typedef struct AVCodecInternal {
int initial_format;
int initial_width, initial_height;
int initial_sample_rate;
#if FF_API_OLD_CHANNEL_LAYOUT
int initial_channels;
uint64_t initial_channel_layout;
#endif
AVChannelLayout initial_ch_layout;
} AVCodecInternal;
struct AVCodecDefault {
const uint8_t *key;
const uint8_t *value;
};
extern const uint8_t ff_log2_run[41];
/**
* Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
* If there is no such matching pair then size is returned.
*/
int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b);
unsigned int avpriv_toupper4(unsigned int x);
unsigned int ff_toupper4(unsigned int x);
void ff_color_frame(AVFrame *frame, const int color[4]);
@ -243,34 +167,6 @@ void ff_color_frame(AVFrame *frame, const int color[4]);
*/
#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - AV_INPUT_BUFFER_PADDING_SIZE)
/**
* Check AVPacket size and/or allocate data.
*
* Encoders supporting AVCodec.encode2() can use this as a convenience to
* ensure the output packet data is large enough, whether provided by the user
* or allocated in this function.
*
* @param avctx the AVCodecContext of the encoder
* @param avpkt the AVPacket
* If avpkt->data is already set, avpkt->size is checked
* to ensure it is large enough.
* If avpkt->data is NULL, a new buffer is allocated.
* avpkt->size is set to the specified size.
* All other AVPacket fields will be reset with av_init_packet().
* @param size the minimum required packet size
* @param min_size This is a hint to the allocation algorithm, which indicates
* to what minimal size the caller might later shrink the packet
* to. Encoders often allocate packets which are larger than the
* amount of data that is written into them as the exact amount is
* not known at the time of allocation. min_size represents the
* size a packet might be shrunk to by the caller. Can be set to
* 0. setting this roughly correctly allows the allocation code
* to choose between several allocation strategies to improve
* speed slightly.
* @return non negative on success, negative error code on failure
*/
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size);
/**
* Rescale from sample rate to AVCodecContext.time_base.
*/
@ -320,10 +216,6 @@ int ff_thread_can_start_frame(AVCodecContext *avctx);
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx);
const uint8_t *avpriv_find_start_code(const uint8_t *p,
const uint8_t *end,
uint32_t *state);
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec);
/**
@ -398,10 +290,4 @@ int ff_int_from_list_or_default(void *ctx, const char * val_name, int val,
void ff_dvdsub_parse_palette(uint32_t *palette, const char *p);
#if defined(_WIN32) && CONFIG_SHARED && !defined(BUILDING_avcodec)
# define av_export_avcodec __declspec(dllimport)
#else
# define av_export_avcodec
#endif
#endif /* AVCODEC_INTERNAL_H */

Просмотреть файл

@ -66,9 +66,8 @@
* Independent JPEG Group's fast AAN dct.
*/
#include <stdlib.h>
#include <stdio.h>
#include "libavutil/common.h"
#include <stdint.h>
#include "libavutil/attributes.h"
#include "dct.h"
#define DCTSIZE 8

Просмотреть файл

@ -62,7 +62,9 @@
* Independent JPEG Group's LLM idct.
*/
#include "libavutil/common.h"
#include <stddef.h>
#include <stdint.h>
#include "libavutil/intreadwrite.h"
#include "dct.h"

Просмотреть файл

@ -19,9 +19,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dav1d/dav1d.h"
#include <dav1d/dav1d.h>
#include "libavutil/avassert.h"
#include "libavutil/cpu.h"
#include "libavutil/film_grain_params.h"
#include "libavutil/mastering_display_metadata.h"
#include "libavutil/imgutils.h"
@ -30,6 +31,7 @@
#include "atsc_a53.h"
#include "avcodec.h"
#include "bytestream.h"
#include "codec_internal.h"
#include "decode.h"
#include "internal.h"
@ -39,6 +41,9 @@
typedef struct Libdav1dContext {
AVClass *class;
Dav1dContext *c;
/* This packet coincides with AVCodecInternal.in_pkt
* and is not owned by us. */
AVPacket *pkt;
AVBufferPool *pool;
int pool_size;
@ -123,13 +128,98 @@ static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
av_buffer_unref(&buf);
}
static void libdav1d_init_params(AVCodecContext *c, const Dav1dSequenceHeader *seq)
{
c->profile = seq->profile;
c->level = ((seq->operating_points[0].major_level - 2) << 2)
| seq->operating_points[0].minor_level;
switch (seq->chr) {
case DAV1D_CHR_VERTICAL:
c->chroma_sample_location = AVCHROMA_LOC_LEFT;
break;
case DAV1D_CHR_COLOCATED:
c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
break;
}
c->colorspace = (enum AVColorSpace) seq->mtrx;
c->color_primaries = (enum AVColorPrimaries) seq->pri;
c->color_trc = (enum AVColorTransferCharacteristic) seq->trc;
c->color_range = seq->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
if (seq->layout == DAV1D_PIXEL_LAYOUT_I444 &&
seq->mtrx == DAV1D_MC_IDENTITY &&
seq->pri == DAV1D_COLOR_PRI_BT709 &&
seq->trc == DAV1D_TRC_SRGB)
c->pix_fmt = pix_fmt_rgb[seq->hbd];
else
c->pix_fmt = pix_fmt[seq->layout][seq->hbd];
if (seq->num_units_in_tick && seq->time_scale) {
av_reduce(&c->framerate.den, &c->framerate.num,
seq->num_units_in_tick, seq->time_scale, INT_MAX);
if (seq->equal_picture_interval)
c->ticks_per_frame = seq->num_ticks_per_picture;
}
if (seq->film_grain_present)
c->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
else
c->properties &= ~FF_CODEC_PROPERTY_FILM_GRAIN;
}
static av_cold int libdav1d_parse_extradata(AVCodecContext *c)
{
Dav1dSequenceHeader seq;
size_t offset = 0;
int res;
if (!c->extradata || c->extradata_size <= 0)
return 0;
if (c->extradata[0] & 0x80) {
int version = c->extradata[0] & 0x7F;
if (version != 1 || c->extradata_size < 4) {
int explode = !!(c->err_recognition & AV_EF_EXPLODE);
av_log(c, explode ? AV_LOG_ERROR : AV_LOG_WARNING,
"Error decoding extradata\n");
return explode ? AVERROR_INVALIDDATA : 0;
}
// Do nothing if there are no configOBUs to parse
if (c->extradata_size == 4)
return 0;
offset = 4;
}
res = dav1d_parse_sequence_header(&seq, c->extradata + offset,
c->extradata_size - offset);
if (res < 0)
return 0; // Assume no seqhdr OBUs are present
libdav1d_init_params(c, &seq);
res = ff_set_dimensions(c, seq.max_width, seq.max_height);
if (res < 0)
return res;
return 0;
}
static av_cold int libdav1d_init(AVCodecContext *c)
{
Libdav1dContext *dav1d = c->priv_data;
Dav1dSettings s;
#if FF_DAV1D_VERSION_AT_LEAST(6,0)
int threads = c->thread_count;
#else
int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
#endif
int res;
dav1d->pkt = c->internal->in_pkt;
av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
dav1d_default_settings(&s);
@ -141,19 +231,22 @@ static av_cold int libdav1d_init(AVCodecContext *c)
s.frame_size_limit = c->max_pixels;
if (dav1d->apply_grain >= 0)
s.apply_grain = dav1d->apply_grain;
else if (c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN)
s.apply_grain = 0;
else
s.apply_grain = !(c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
s.all_layers = dav1d->all_layers;
if (dav1d->operating_point >= 0)
s.operating_point = dav1d->operating_point;
#if FF_DAV1D_VERSION_AT_LEAST(6,2)
s.strict_std_compliance = c->strict_std_compliance > 0;
#endif
#if FF_DAV1D_VERSION_AT_LEAST(6,0)
if (dav1d->frame_threads || dav1d->tile_threads)
s.n_threads = FFMAX(dav1d->frame_threads, dav1d->tile_threads);
else
s.n_threads = FFMIN(threads, DAV1D_MAX_THREADS);
s.max_frame_delay = (c->flags & AV_CODEC_FLAG_LOW_DELAY) ? 1 : s.n_threads;
s.max_frame_delay = (c->flags & AV_CODEC_FLAG_LOW_DELAY) ? 1 : 0;
av_log(c, AV_LOG_DEBUG, "Using %d threads, %d max_frame_delay\n",
s.n_threads, s.max_frame_delay);
#else
@ -167,6 +260,10 @@ static av_cold int libdav1d_init(AVCodecContext *c)
s.n_frame_threads, s.n_tile_threads);
#endif
res = libdav1d_parse_extradata(c);
if (res < 0)
return res;
res = dav1d_open(&dav1d->c, &s);
if (res < 0)
return AVERROR(ENOMEM);
@ -198,37 +295,41 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
Libdav1dContext *dav1d = c->priv_data;
Dav1dData *data = &dav1d->data;
Dav1dPicture pic = { 0 }, *p = &pic;
#if FF_DAV1D_VERSION_AT_LEAST(5,1)
enum Dav1dEventFlags event_flags = 0;
#endif
int res;
if (!data->sz) {
AVPacket pkt = { 0 };
AVPacket *const pkt = dav1d->pkt;
res = ff_decode_get_packet(c, &pkt);
res = ff_decode_get_packet(c, pkt);
if (res < 0 && res != AVERROR_EOF)
return res;
if (pkt.size) {
res = dav1d_data_wrap(data, pkt.data, pkt.size, libdav1d_data_free, pkt.buf);
if (pkt->size) {
res = dav1d_data_wrap(data, pkt->data, pkt->size,
libdav1d_data_free, pkt->buf);
if (res < 0) {
av_packet_unref(&pkt);
av_packet_unref(pkt);
return res;
}
data->m.timestamp = pkt.pts;
data->m.offset = pkt.pos;
data->m.duration = pkt.duration;
data->m.timestamp = pkt->pts;
data->m.offset = pkt->pos;
data->m.duration = pkt->duration;
pkt.buf = NULL;
av_packet_unref(&pkt);
pkt->buf = NULL;
av_packet_unref(pkt);
if (c->reordered_opaque != AV_NOPTS_VALUE) {
uint8_t *reordered_opaque = av_malloc(sizeof(c->reordered_opaque));
uint8_t *reordered_opaque = av_memdup(&c->reordered_opaque,
sizeof(c->reordered_opaque));
if (!reordered_opaque) {
dav1d_data_unref(data);
return AVERROR(ENOMEM);
}
memcpy(reordered_opaque, &c->reordered_opaque, sizeof(c->reordered_opaque));
res = dav1d_data_wrap_user_data(data, reordered_opaque,
libdav1d_user_data_free, reordered_opaque);
if (res < 0) {
@ -237,6 +338,9 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
return res;
}
}
} else if (res >= 0) {
av_packet_unref(pkt);
return AVERROR(EAGAIN);
}
}
@ -244,8 +348,10 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
if (res < 0) {
if (res == AVERROR(EINVAL))
res = AVERROR_INVALIDDATA;
if (res != AVERROR(EAGAIN))
if (res != AVERROR(EAGAIN)) {
dav1d_data_unref(data);
return res;
}
}
res = dav1d_get_picture(dav1d->c, p);
@ -274,9 +380,16 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
frame->linesize[1] = p->stride[1];
frame->linesize[2] = p->stride[1];
c->profile = p->seq_hdr->profile;
c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
| p->seq_hdr->operating_points[0].minor_level;
#if FF_DAV1D_VERSION_AT_LEAST(5,1)
dav1d_get_event_flags(dav1d->c, &event_flags);
if (c->pix_fmt == AV_PIX_FMT_NONE ||
event_flags & DAV1D_EVENT_FLAG_NEW_SEQUENCE)
#endif
libdav1d_init_params(c, p->seq_hdr);
res = ff_decode_frame_props(c, frame);
if (res < 0)
goto fail;
frame->width = p->p.w;
frame->height = p->p.h;
if (c->width != p->p.w || c->height != p->p.h) {
@ -292,46 +405,13 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
INT_MAX);
ff_set_sar(c, frame->sample_aspect_ratio);
switch (p->seq_hdr->chr) {
case DAV1D_CHR_VERTICAL:
frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT;
break;
case DAV1D_CHR_COLOCATED:
frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
break;
}
frame->colorspace = c->colorspace = (enum AVColorSpace) p->seq_hdr->mtrx;
frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p->seq_hdr->pri;
frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc;
frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
p->seq_hdr->trc == DAV1D_TRC_SRGB)
frame->format = c->pix_fmt = pix_fmt_rgb[p->seq_hdr->hbd];
else
frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
if (p->m.user_data.data)
memcpy(&frame->reordered_opaque, p->m.user_data.data, sizeof(frame->reordered_opaque));
else
frame->reordered_opaque = AV_NOPTS_VALUE;
if (p->seq_hdr->num_units_in_tick && p->seq_hdr->time_scale) {
av_reduce(&c->framerate.den, &c->framerate.num,
p->seq_hdr->num_units_in_tick, p->seq_hdr->time_scale, INT_MAX);
if (p->seq_hdr->equal_picture_interval)
c->ticks_per_frame = p->seq_hdr->num_ticks_per_picture;
}
// match timestamps and packet size
frame->pts = p->m.timestamp;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_pts = p->m.timestamp;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame->pkt_dts = p->m.timestamp;
frame->pkt_pos = p->m.offset;
frame->pkt_size = p->m.size;
@ -479,8 +559,8 @@ static av_cold int libdav1d_close(AVCodecContext *c)
#define OFFSET(x) offsetof(Libdav1dContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption libdav1d_options[] = {
{ "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
{ "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
{ "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD | AV_OPT_FLAG_DEPRECATED },
{ "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD | AV_OPT_FLAG_DEPRECATED },
{ "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD | AV_OPT_FLAG_DEPRECATED },
{ "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
{ "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
@ -494,19 +574,19 @@ static const AVClass libdav1d_class = {
.version = LIBAVUTIL_VERSION_INT,
};
AVCodec ff_libdav1d_decoder = {
.name = "libdav1d",
.long_name = NULL_IF_CONFIG_SMALL("dav1d AV1 decoder by VideoLAN"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AV1,
const FFCodec ff_libdav1d_decoder = {
.p.name = "libdav1d",
.p.long_name = NULL_IF_CONFIG_SMALL("dav1d AV1 decoder by VideoLAN"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AV1,
.priv_data_size = sizeof(Libdav1dContext),
.init = libdav1d_init,
.close = libdav1d_close,
.flush = libdav1d_flush,
.receive_frame = libdav1d_receive_frame,
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
FF_CODEC_RECEIVE_FRAME_CB(libdav1d_receive_frame),
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_SETS_PKT_DTS |
FF_CODEC_CAP_AUTO_THREADS,
.priv_class = &libdav1d_class,
.wrapper_name = "libdav1d",
.p.priv_class = &libdav1d_class,
.p.wrapper_name = "libdav1d",
};

Просмотреть файл

@ -25,12 +25,12 @@
#include <stdint.h>
#include "libavutil/common.h"
#include "libavutil/reverse.h"
#include "config.h"
#define MAX_NEG_CROP 1024
extern const uint32_t ff_inverse[257];
extern const uint8_t ff_log2_run[41];
extern const uint8_t ff_sqrt_tab[256];
extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
extern const uint8_t ff_zigzag_direct[64];
@ -126,6 +126,8 @@ static inline av_const int median4(int a, int b, int c, int d)
}
#endif
#define FF_SIGNBIT(x) ((x) >> CHAR_BIT * sizeof(x) - 1)
#ifndef sign_extend
static inline av_const int sign_extend(int val, unsigned bits)
{
@ -240,12 +242,4 @@ static inline int8_t ff_u8_to_s8(uint8_t a)
return b.s8;
}
static av_always_inline uint32_t bitswap_32(uint32_t x)
{
return (uint32_t)ff_reverse[ x & 0xFF] << 24 |
(uint32_t)ff_reverse[(x >> 8) & 0xFF] << 16 |
(uint32_t)ff_reverse[(x >> 16) & 0xFF] << 8 |
(uint32_t)ff_reverse[ x >> 24];
}
#endif /* AVCODEC_MATHOPS_H */

Просмотреть файл

@ -112,3 +112,12 @@ const uint8_t ff_zigzag_scan[16+1] = {
1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4,
3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
};
const uint8_t ff_log2_run[41] = {
0, 0, 0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 3, 3, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23,
24,
};

Просмотреть файл

@ -118,14 +118,14 @@ int ff_pre_estimate_p_frame_motion(struct MpegEncContext *s,
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr,
int P[10][2], int src_index, int ref_index,
int16_t (*last_mv)[2], int ref_mv_scale, int size,
int h);
const int16_t (*last_mv)[2], int ref_mv_scale,
int size, int h);
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index,
int ref_index, int size, int h, int add_rate);
int ff_get_best_fcode(struct MpegEncContext *s,
int16_t (*mv_table)[2], int type);
const int16_t (*mv_table)[2], int type);
void ff_fix_long_p_mvs(struct MpegEncContext *s, int type);
void ff_fix_long_mvs(struct MpegEncContext *s, uint8_t *field_select_table,

Просмотреть файл

@ -9,11 +9,12 @@
if CONFIG['FFVPX_ASFLAGS']:
if CONFIG['CPU_ARCH'] == 'x86' or CONFIG['CPU_ARCH'] == 'x86_64':
DIRS += ['x86']
elif CONFIG['CPU_ARCH'] == 'aarch64':
DIRS += ['aarch64']
elif CONFIG['CPU_ARCH'] == 'arm':
DIRS += ['arm']
if CONFIG['CPU_ARCH'] == 'aarch64':
DIRS += ['aarch64']
SharedLibrary('mozavcodec')
SOURCES += [
'allcodecs.c',
@ -40,6 +41,7 @@ SOURCES += [
'flacdata.c',
'flacdec.c',
'flacdsp.c',
'get_buffer.c',
'idctdsp.c',
'jfdctfst.c',
'jfdctint.c',
@ -54,10 +56,12 @@ SOURCES += [
'mpegaudiodsp_data.c',
'mpegaudiodsp_fixed.c',
'mpegaudiodsp_float.c',
'mpegaudiotabs.c',
'null_bsf.c',
'options.c',
'parser.c',
'parsers.c',
'profiles.c',
'pthread.c',
'pthread_frame.c',
'pthread_slice.c',
@ -65,6 +69,8 @@ SOURCES += [
'reverse.c',
'simple_idct.c',
'utils.c',
'version.c',
'vlc.c',
'vorbis_parser.c',
'xiph.c'
]
@ -73,14 +79,12 @@ if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
SOURCES += [
'av1dec.c',
'avpicture.c',
'bitstream_filter.c',
'cbs.c',
'cbs_av1.c',
'golomb.c',
'h264pred.c',
'imgconvert.c',
'mathtables.c',
'profiles.c',
'qsv_api.c',
'raw.c',
'videodsp.c',

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше