Merge "SAD32xh and SAD64xh for AVX2"
This commit is contained in:
Коммит
687c56e802
|
@ -625,6 +625,20 @@ INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
|
|||
|
||||
#if HAVE_AVX2
|
||||
#if CONFIG_VP9_ENCODER
|
||||
const SadMxNVp9Func sad_64x64_avx2_vp9 = vp9_sad64x64_avx2;
|
||||
const SadMxNVp9Func sad_64x32_avx2_vp9 = vp9_sad64x32_avx2;
|
||||
const SadMxNVp9Func sad_32x64_avx2_vp9 = vp9_sad32x64_avx2;
|
||||
const SadMxNVp9Func sad_32x32_avx2_vp9 = vp9_sad32x32_avx2;
|
||||
const SadMxNVp9Func sad_32x16_avx2_vp9 = vp9_sad32x16_avx2;
|
||||
const SadMxNVp9Param avx2_vp9_tests[] = {
|
||||
make_tuple(64, 64, sad_64x64_avx2_vp9),
|
||||
make_tuple(64, 32, sad_64x32_avx2_vp9),
|
||||
make_tuple(32, 64, sad_32x64_avx2_vp9),
|
||||
make_tuple(32, 32, sad_32x32_avx2_vp9),
|
||||
make_tuple(32, 16, sad_32x16_avx2_vp9),
|
||||
};
|
||||
INSTANTIATE_TEST_CASE_P(AVX2, SADVP9Test, ::testing::ValuesIn(avx2_vp9_tests));
|
||||
|
||||
const SadMxNx4Func sad_64x64x4d_avx2 = vp9_sad64x64x4d_avx2;
|
||||
const SadMxNx4Func sad_32x32x4d_avx2 = vp9_sad32x32x4d_avx2;
|
||||
INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::Values(
|
||||
|
|
|
@ -931,22 +931,22 @@ add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x4/, "const uint8_t *src_pt
|
|||
specialize qw/vp9_sub_pixel_avg_variance4x4/, "$sse_x86inc", "$ssse3_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vp9_sad64x64 neon/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad64x64 neon avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vp9_sad32x64/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad32x64 avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vp9_sad64x32/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad64x32 avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vp9_sad32x16/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad32x16 avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vp9_sad16x32/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vp9_sad32x32 neon/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad32x32 neon avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vp9_sad16x16 neon/, "$sse2_x86inc";
|
||||
|
@ -970,22 +970,22 @@ add_proto qw/unsigned int vp9_sad4x4/, "const uint8_t *src_ptr, int source_strid
|
|||
specialize qw/vp9_sad4x4/, "$sse_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad64x64_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vp9_sad64x64_avg/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad64x64_avg avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad32x64_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vp9_sad32x64_avg/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad32x64_avg avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad64x32_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vp9_sad64x32_avg/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad64x32_avg avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad32x16_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vp9_sad32x16_avg/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad32x16_avg avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad16x32_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vp9_sad16x32_avg/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad32x32_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vp9_sad32x32_avg/, "$sse2_x86inc";
|
||||
specialize qw/vp9_sad32x32_avg avx2/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vp9_sad16x16_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vp9_sad16x16_avg/, "$sse2_x86inc";
|
||||
|
|
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include <immintrin.h>
|
||||
#include "vpx_ports/mem.h"
|
||||
|
||||
#define FSAD64_H(h) \
|
||||
unsigned int vp9_sad64x##h##_avx2(const uint8_t *src_ptr, \
|
||||
int src_stride, \
|
||||
const uint8_t *ref_ptr, \
|
||||
int ref_stride) { \
|
||||
int i, res; \
|
||||
__m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
|
||||
__m256i sum_sad = _mm256_setzero_si256(); \
|
||||
__m256i sum_sad_h; \
|
||||
__m128i sum_sad128; \
|
||||
for (i = 0 ; i < h ; i++) { \
|
||||
ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
|
||||
ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \
|
||||
sad1_reg = _mm256_sad_epu8(ref1_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)src_ptr)); \
|
||||
sad2_reg = _mm256_sad_epu8(ref2_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
|
||||
ref_ptr+= ref_stride; \
|
||||
src_ptr+= src_stride; \
|
||||
} \
|
||||
sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
|
||||
sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
|
||||
sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
|
||||
res = _mm_cvtsi128_si32(sum_sad128); \
|
||||
return res; \
|
||||
}
|
||||
|
||||
#define FSAD32_H(h) \
|
||||
unsigned int vp9_sad32x##h##_avx2(const uint8_t *src_ptr, \
|
||||
int src_stride, \
|
||||
const uint8_t *ref_ptr, \
|
||||
int ref_stride) { \
|
||||
int i, res; \
|
||||
__m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
|
||||
__m256i sum_sad = _mm256_setzero_si256(); \
|
||||
__m256i sum_sad_h; \
|
||||
__m128i sum_sad128; \
|
||||
int ref2_stride = ref_stride << 1; \
|
||||
int src2_stride = src_stride << 1; \
|
||||
int max = h >> 1; \
|
||||
for (i = 0 ; i < max ; i++) { \
|
||||
ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
|
||||
ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
|
||||
sad1_reg = _mm256_sad_epu8(ref1_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)src_ptr)); \
|
||||
sad2_reg = _mm256_sad_epu8(ref2_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
|
||||
ref_ptr+= ref2_stride; \
|
||||
src_ptr+= src2_stride; \
|
||||
} \
|
||||
sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
|
||||
sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
|
||||
sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
|
||||
res = _mm_cvtsi128_si32(sum_sad128); \
|
||||
return res; \
|
||||
}
|
||||
|
||||
#define FSAD64 \
|
||||
FSAD64_H(64); \
|
||||
FSAD64_H(32);
|
||||
|
||||
#define FSAD32 \
|
||||
FSAD32_H(64); \
|
||||
FSAD32_H(32); \
|
||||
FSAD32_H(16);
|
||||
|
||||
FSAD64;
|
||||
FSAD32;
|
||||
|
||||
#undef FSAD64
|
||||
#undef FSAD32
|
||||
#undef FSAD64_H
|
||||
#undef FSAD32_H
|
||||
|
||||
#define FSADAVG64_H(h) \
|
||||
unsigned int vp9_sad64x##h##_avg_avx2(const uint8_t *src_ptr, \
|
||||
int src_stride, \
|
||||
const uint8_t *ref_ptr, \
|
||||
int ref_stride, \
|
||||
const uint8_t *second_pred) { \
|
||||
int i, res; \
|
||||
__m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
|
||||
__m256i sum_sad = _mm256_setzero_si256(); \
|
||||
__m256i sum_sad_h; \
|
||||
__m128i sum_sad128; \
|
||||
for (i = 0 ; i < h ; i++) { \
|
||||
ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
|
||||
ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \
|
||||
ref1_reg = _mm256_avg_epu8(ref1_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)second_pred)); \
|
||||
ref2_reg = _mm256_avg_epu8(ref2_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)(second_pred +32))); \
|
||||
sad1_reg = _mm256_sad_epu8(ref1_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)src_ptr)); \
|
||||
sad2_reg = _mm256_sad_epu8(ref2_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
|
||||
ref_ptr+= ref_stride; \
|
||||
src_ptr+= src_stride; \
|
||||
second_pred+= 64; \
|
||||
} \
|
||||
sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
|
||||
sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
|
||||
sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
|
||||
res = _mm_cvtsi128_si32(sum_sad128); \
|
||||
return res; \
|
||||
}
|
||||
|
||||
#define FSADAVG32_H(h) \
|
||||
unsigned int vp9_sad32x##h##_avg_avx2(const uint8_t *src_ptr, \
|
||||
int src_stride, \
|
||||
const uint8_t *ref_ptr, \
|
||||
int ref_stride, \
|
||||
const uint8_t *second_pred) { \
|
||||
int i, res; \
|
||||
__m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
|
||||
__m256i sum_sad = _mm256_setzero_si256(); \
|
||||
__m256i sum_sad_h; \
|
||||
__m128i sum_sad128; \
|
||||
int ref2_stride = ref_stride << 1; \
|
||||
int src2_stride = src_stride << 1; \
|
||||
int max = h >> 1; \
|
||||
for (i = 0 ; i < max ; i++) { \
|
||||
ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
|
||||
ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
|
||||
ref1_reg = _mm256_avg_epu8(ref1_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)second_pred)); \
|
||||
ref2_reg = _mm256_avg_epu8(ref2_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)(second_pred +32))); \
|
||||
sad1_reg = _mm256_sad_epu8(ref1_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)src_ptr)); \
|
||||
sad2_reg = _mm256_sad_epu8(ref2_reg, \
|
||||
_mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, \
|
||||
_mm256_add_epi32(sad1_reg, sad2_reg)); \
|
||||
ref_ptr+= ref2_stride; \
|
||||
src_ptr+= src2_stride; \
|
||||
second_pred+= 64; \
|
||||
} \
|
||||
sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
|
||||
sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
|
||||
sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
|
||||
sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
|
||||
res = _mm_cvtsi128_si32(sum_sad128); \
|
||||
return res; \
|
||||
}
|
||||
|
||||
#define FSADAVG64 \
|
||||
FSADAVG64_H(64); \
|
||||
FSADAVG64_H(32);
|
||||
|
||||
#define FSADAVG32 \
|
||||
FSADAVG32_H(64); \
|
||||
FSADAVG32_H(32); \
|
||||
FSADAVG32_H(16);
|
||||
|
||||
FSADAVG64;
|
||||
FSADAVG32;
|
||||
|
||||
#undef FSADAVG64
|
||||
#undef FSADAVG32
|
||||
#undef FSADAVG64_H
|
||||
#undef FSADAVG32_H
|
|
@ -118,6 +118,7 @@ VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_dct_ssse3_x86_64.asm
|
|||
endif
|
||||
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_sad_sse4.asm
|
||||
VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_sad_intrin_avx2.c
|
||||
VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt_x86_64.asm
|
||||
|
||||
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c
|
||||
|
|
Загрузка…
Ссылка в новой задаче