Merge "Revert "Optimize wedge partition selection."" into nextgenv2

This commit is contained in:
James Zern 2016-06-10 03:49:28 +00:00 коммит произвёл Gerrit Code Review
Родитель 9d924a0c4a 95340fccb3
Коммит 667db87a1b
9 изменённых файлов: 121 добавлений и 1035 удалений

Просмотреть файл

@ -185,7 +185,6 @@ ifeq ($(CONFIG_EXT_INTER),yes)
LIBVPX_TEST_SRCS-$(HAVE_SSSE3) += masked_variance_test.cc LIBVPX_TEST_SRCS-$(HAVE_SSSE3) += masked_variance_test.cc
LIBVPX_TEST_SRCS-$(HAVE_SSSE3) += masked_sad_test.cc LIBVPX_TEST_SRCS-$(HAVE_SSSE3) += masked_sad_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += blend_mask6_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += blend_mask6_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_wedge_utils_test.cc
endif endif
ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)

Просмотреть файл

@ -1,399 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "vpx_ports/mem.h"
#include "./vpx_dsp_rtcd.h"
#include "./vp10_rtcd.h"
#include "vpx_dsp/vpx_dsp_common.h"
#include "vp10/common/enums.h"
#include "test/array_utils.h"
#include "test/assertion_helpers.h"
#include "test/function_equivalence_test.h"
#include "test/randomise.h"
#include "test/register_state_check.h"
#include "test/snapshot.h"
#define WEDGE_WEIGHT_BITS 6
#define MAX_MASK_VALUE (1 << (WEDGE_WEIGHT_BITS))
using std::tr1::make_tuple;
using libvpx_test::FunctionEquivalenceTest;
using libvpx_test::Snapshot;
using libvpx_test::Randomise;
using libvpx_test::array_utils::arraySet;
using libvpx_test::assertion_helpers::ArraysEq;
using libvpx_test::assertion_helpers::ArraysEqWithin;
namespace {
static const int16_t int13_max = (1<<12) - 1;
//////////////////////////////////////////////////////////////////////////////
// vp10_wedge_sse_from_residuals - functionality
//////////////////////////////////////////////////////////////////////////////
class WedgeUtilsSSEFuncTest : public testing::Test {
protected:
Snapshot snapshot;
Randomise randomise;
};
static void equiv_blend_residuals(int16_t *r,
const int16_t *r0,
const int16_t *r1,
const uint8_t *m,
int N) {
for (int i = 0 ; i < N ; i++) {
const int32_t m0 = m[i];
const int32_t m1 = MAX_MASK_VALUE - m0;
const int16_t R = m0 * r0[i] + m1 * r1[i];
// Note that this rounding is designed to match the result
// you would get when actually blending the 2 predictors and computing
// the residuals.
r[i] = ROUND_POWER_OF_TWO(R - 1, WEDGE_WEIGHT_BITS);
}
}
static uint64_t equiv_sse_from_residuals(const int16_t *r0,
const int16_t *r1,
const uint8_t *m,
int N) {
uint64_t acc = 0;
for (int i = 0 ; i < N ; i++) {
const int32_t m0 = m[i];
const int32_t m1 = MAX_MASK_VALUE - m0;
const int16_t R = m0 * r0[i] + m1 * r1[i];
const int32_t r = ROUND_POWER_OF_TWO(R - 1, WEDGE_WEIGHT_BITS);
acc += r * r;
}
return acc;
}
TEST_F(WedgeUtilsSSEFuncTest, ResidualBlendingEquiv) {
for (int i = 0 ; i < 1000 && !HasFatalFailure(); i++) {
uint8_t s[MAX_SB_SQUARE];
uint8_t p0[MAX_SB_SQUARE];
uint8_t p1[MAX_SB_SQUARE];
uint8_t p[MAX_SB_SQUARE];
int16_t r0[MAX_SB_SQUARE];
int16_t r1[MAX_SB_SQUARE];
int16_t r_ref[MAX_SB_SQUARE];
int16_t r_tst[MAX_SB_SQUARE];
uint8_t m[MAX_SB_SQUARE];
randomise(s);
randomise(m, 0, MAX_MASK_VALUE + 1);
const int w = 1 << randomise.uniform<uint32_t>(3, MAX_SB_SIZE_LOG2);
const int h = 1 << randomise.uniform<uint32_t>(3, MAX_SB_SIZE_LOG2);
const int N = w * h;
for (int j = 0 ; j < N ; j++) {
p0[j] = clamp(s[j] + randomise.uniform<int>(-16, 17), 0, UINT8_MAX);
p1[j] = clamp(s[j] + randomise.uniform<int>(-16, 17), 0, UINT8_MAX);
}
vpx_blend_mask6(p, w, p0, w, p1, w, m, w, h, w, 0, 0);
vpx_subtract_block(h, w, r0, w, s, w, p0, w);
vpx_subtract_block(h, w, r1, w, s, w, p1, w);
vpx_subtract_block(h, w, r_ref, w, s, w, p, w);
equiv_blend_residuals(r_tst, r0, r1, m, N);
ASSERT_TRUE(ArraysEqWithin(r_ref, r_tst, 0, N));
uint64_t ref_sse = vpx_sum_squares_i16(r_ref, N);
uint64_t tst_sse = equiv_sse_from_residuals(r0, r1, m, N);
ASSERT_EQ(ref_sse, tst_sse);
}
}
static uint64_t sse_from_residuals(const int16_t *r0,
const int16_t *r1,
const uint8_t *m,
int N) {
uint64_t acc = 0;
for (int i = 0 ; i < N ; i++) {
const int32_t m0 = m[i];
const int32_t m1 = MAX_MASK_VALUE - m0;
const int32_t r = m0 * r0[i] + m1 * r1[i];
acc += r * r;
}
return ROUND_POWER_OF_TWO(acc, 2 * WEDGE_WEIGHT_BITS);
}
TEST_F(WedgeUtilsSSEFuncTest, ResidualBlendingMethod) {
for (int i = 0 ; i < 1000 && !HasFatalFailure(); i++) {
int16_t r0[MAX_SB_SQUARE];
int16_t r1[MAX_SB_SQUARE];
int16_t d[MAX_SB_SQUARE];
uint8_t m[MAX_SB_SQUARE];
randomise(r1, 2 * INT8_MIN, 2 * INT8_MAX + 1);
randomise(d, 2 * INT8_MIN, 2 * INT8_MAX + 1);
randomise(m, 0, MAX_MASK_VALUE + 1);
const int N = 64 * randomise.uniform<uint32_t>(1, MAX_SB_SQUARE/64);
for (int j = 0 ; j < N ; j++)
r0[j] = r1[j] + d[j];
uint64_t ref_res, tst_res;
ref_res = sse_from_residuals(r0, r1, m, N);
tst_res = vp10_wedge_sse_from_residuals(r1, d, m, N);
ASSERT_EQ(ref_res, tst_res);
}
}
//////////////////////////////////////////////////////////////////////////////
// vp10_wedge_sse_from_residuals - optimizations
//////////////////////////////////////////////////////////////////////////////
typedef uint64_t (*FSSE)(const int16_t *r1,
const int16_t *d,
const uint8_t *m,
int N);
class WedgeUtilsSSEOptTest : public FunctionEquivalenceTest<FSSE> {
protected:
void Common() {
const int N = 64 * randomise.uniform<uint32_t>(1, MAX_SB_SQUARE/64);
snapshot(r1);
snapshot(d);
snapshot(m);
uint64_t ref_res, tst_res;
ref_res = ref_func_(r1, d, m, N);
ASM_REGISTER_STATE_CHECK(tst_res = tst_func_(r1, d, m, N));
ASSERT_EQ(ref_res, tst_res);
ASSERT_TRUE(ArraysEq(snapshot.get(r1), r1));
ASSERT_TRUE(ArraysEq(snapshot.get(d), d));
ASSERT_TRUE(ArraysEq(snapshot.get(m), m));
}
Snapshot snapshot;
Randomise randomise;
DECLARE_ALIGNED(16, int16_t, r1[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, int16_t, d[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, m[MAX_SB_SQUARE]);
};
TEST_P(WedgeUtilsSSEOptTest, RandomValues) {
for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
randomise(r1, -int13_max, int13_max + 1);
randomise(d, -int13_max, int13_max + 1);
randomise(m, 0, 65);
Common();
}
}
TEST_P(WedgeUtilsSSEOptTest, ExtremeValues) {
for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
if (randomise.uniform<bool>())
arraySet(r1, int13_max);
else
arraySet(r1, -int13_max);
if (randomise.uniform<bool>())
arraySet(d, int13_max);
else
arraySet(d, -int13_max);
arraySet(m, MAX_MASK_VALUE);
Common();
}
}
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(
SSE2, WedgeUtilsSSEOptTest,
::testing::Values(
make_tuple(&vp10_wedge_sse_from_residuals_c,
&vp10_wedge_sse_from_residuals_sse2)
)
);
#endif // HAVE_SSE2
//////////////////////////////////////////////////////////////////////////////
// vp10_wedge_sign_from_residuals
//////////////////////////////////////////////////////////////////////////////
typedef int (*FSign)(const int16_t *ds,
const uint8_t *m,
int N,
int64_t limit);
class WedgeUtilsSignOptTest : public FunctionEquivalenceTest<FSign> {
protected:
static const int maxSize = 8196; // Size limited by SIMD implementation.
void Common() {
const int maxN = VPXMIN(maxSize, MAX_SB_SQUARE);
const int N = 64 * randomise.uniform<uint32_t>(1, maxN/64);
int64_t limit;
limit = (int64_t)vpx_sum_squares_i16(r0, N);
limit -= (int64_t)vpx_sum_squares_i16(r1, N);
limit *= (1 << WEDGE_WEIGHT_BITS) / 2;
for (int i = 0 ; i < N ; i++)
ds[i] = clamp(r0[i]*r0[i] - r1[i]*r1[i], INT16_MIN, INT16_MAX);
snapshot(r0);
snapshot(r1);
snapshot(ds);
snapshot(m);
int ref_res, tst_res;
ref_res = ref_func_(ds, m, N, limit);
ASM_REGISTER_STATE_CHECK(tst_res = tst_func_(ds, m, N, limit));
ASSERT_EQ(ref_res, tst_res);
ASSERT_TRUE(ArraysEq(snapshot.get(r0), r0));
ASSERT_TRUE(ArraysEq(snapshot.get(r1), r1));
ASSERT_TRUE(ArraysEq(snapshot.get(ds), ds));
ASSERT_TRUE(ArraysEq(snapshot.get(m), m));
}
Snapshot snapshot;
Randomise randomise;
DECLARE_ALIGNED(16, int16_t, r0[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, int16_t, r1[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, int16_t, ds[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, m[MAX_SB_SQUARE]);
};
TEST_P(WedgeUtilsSignOptTest, RandomValues) {
for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
randomise(r0, -int13_max, int13_max+1);
randomise(r1, -int13_max, int13_max+1);
randomise(m, 0, MAX_MASK_VALUE + 1);
Common();
}
}
TEST_P(WedgeUtilsSignOptTest, ExtremeValues) {
for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
switch (randomise.uniform<int>(4)) {
case 0:
arraySet(r0, 0);
arraySet(r1, int13_max);
break;
case 1:
arraySet(r0, int13_max);
arraySet(r1, 0);
break;
case 2:
arraySet(r0, 0);
arraySet(r1, -int13_max);
break;
default:
arraySet(r0, -int13_max);
arraySet(r1, 0);
break;
}
arraySet(m, MAX_MASK_VALUE);
Common();
}
}
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(
SSE2, WedgeUtilsSignOptTest,
::testing::Values(
make_tuple(&vp10_wedge_sign_from_residuals_c,
&vp10_wedge_sign_from_residuals_sse2)
)
);
#endif // HAVE_SSE2
//////////////////////////////////////////////////////////////////////////////
// vp10_wedge_compute_delta_squares
//////////////////////////////////////////////////////////////////////////////
typedef void (*FDS)(int16_t *d,
const int16_t *a,
const int16_t *b,
int N);
class WedgeUtilsDeltaSquaresOptTest : public FunctionEquivalenceTest<FDS> {
protected:
void Common() {
const int N = 64 * randomise.uniform<uint32_t>(1, MAX_SB_SQUARE/64);
randomise(d_ref);
randomise(d_tst);
snapshot(a);
snapshot(b);
ref_func_(d_ref, a, b, N);
ASM_REGISTER_STATE_CHECK(tst_func_(d_tst, a, b, N));
ASSERT_TRUE(ArraysEqWithin(d_ref, d_tst, 0, N));
ASSERT_TRUE(ArraysEq(snapshot.get(a), a));
ASSERT_TRUE(ArraysEq(snapshot.get(b), b));
}
Snapshot snapshot;
Randomise randomise;
DECLARE_ALIGNED(16, int16_t, a[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, int16_t, b[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, int16_t, d_ref[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, int16_t, d_tst[MAX_SB_SQUARE]);
};
TEST_P(WedgeUtilsDeltaSquaresOptTest, RandomValues) {
for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
randomise(a);
randomise(b, -INT16_MAX, INT16_MAX + 1);
Common();
}
}
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(
SSE2, WedgeUtilsDeltaSquaresOptTest,
::testing::Values(
make_tuple(&vp10_wedge_compute_delta_squares_c,
&vp10_wedge_compute_delta_squares_sse2)
)
);
#endif // HAVE_SSE2
} // namespace

Просмотреть файл

@ -2440,6 +2440,7 @@ static void build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, int plane,
int wedge_offset_x, int wedge_offset_x,
int wedge_offset_y, int wedge_offset_y,
#endif // CONFIG_SUPERTX #endif // CONFIG_SUPERTX
int mi_x, int mi_y,
uint8_t *ext_dst0, uint8_t *ext_dst0,
int ext_dst_stride0, int ext_dst_stride0,
uint8_t *ext_dst1, uint8_t *ext_dst1,
@ -2453,6 +2454,8 @@ static void build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, int plane,
(void) block; (void) block;
(void) bw; (void) bw;
(void) bh; (void) bh;
(void) mi_x;
(void) mi_y;
if (is_compound if (is_compound
&& is_interinter_wedge_used(mbmi->sb_type) && is_interinter_wedge_used(mbmi->sb_type)
@ -2516,9 +2519,12 @@ static void build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, int plane,
void vp10_build_wedge_inter_predictor_from_buf( void vp10_build_wedge_inter_predictor_from_buf(
MACROBLOCKD *xd, BLOCK_SIZE bsize, MACROBLOCKD *xd, BLOCK_SIZE bsize,
int plane_from, int plane_to, int plane_from, int plane_to,
int mi_row, int mi_col,
uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst0[3], int ext_dst_stride0[3],
uint8_t *ext_dst1[3], int ext_dst_stride1[3]) { uint8_t *ext_dst1[3], int ext_dst_stride1[3]) {
int plane; int plane;
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
for (plane = plane_from; plane <= plane_to; ++plane) { for (plane = plane_from; plane <= plane_to; ++plane) {
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
&xd->plane[plane]); &xd->plane[plane]);
@ -2537,6 +2543,7 @@ void vp10_build_wedge_inter_predictor_from_buf(
#if CONFIG_SUPERTX #if CONFIG_SUPERTX
0, 0, 0, 0,
#endif #endif
mi_x, mi_y,
ext_dst0[plane], ext_dst0[plane],
ext_dst_stride0[plane], ext_dst_stride0[plane],
ext_dst1[plane], ext_dst1[plane],
@ -2547,6 +2554,7 @@ void vp10_build_wedge_inter_predictor_from_buf(
#if CONFIG_SUPERTX #if CONFIG_SUPERTX
0, 0, 0, 0,
#endif #endif
mi_x, mi_y,
ext_dst0[plane], ext_dst0[plane],
ext_dst_stride0[plane], ext_dst_stride0[plane],
ext_dst1[plane], ext_dst1[plane],

Просмотреть файл

@ -646,6 +646,7 @@ void vp10_build_inter_predictors_for_planes_single_buf(
void vp10_build_wedge_inter_predictor_from_buf( void vp10_build_wedge_inter_predictor_from_buf(
MACROBLOCKD *xd, BLOCK_SIZE bsize, MACROBLOCKD *xd, BLOCK_SIZE bsize,
int plane_from, int plane_to, int plane_from, int plane_to,
int mi_row, int mi_col,
uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst0[3], int ext_dst_stride0[3],
uint8_t *ext_dst1[3], int ext_dst_stride1[3]); uint8_t *ext_dst1[3], int ext_dst_stride1[3]);
#endif // CONFIG_EXT_INTER #endif // CONFIG_EXT_INTER

Просмотреть файл

@ -690,15 +690,6 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
} }
# End vp10_high encoder functions # End vp10_high encoder functions
if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
add_proto qw/uint64_t vp10_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
specialize qw/vp10_wedge_sse_from_residuals sse2/;
add_proto qw/int vp10_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
specialize qw/vp10_wedge_sign_from_residuals sse2/;
add_proto qw/void vp10_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
specialize qw/vp10_wedge_compute_delta_squares sse2/;
}
} }
# end encoder functions # end encoder functions
1; 1;

Просмотреть файл

@ -6535,8 +6535,8 @@ static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
static int estimate_wedge_sign(const VP10_COMP *cpi, static int estimate_wedge_sign(const VP10_COMP *cpi,
const MACROBLOCK *x, const MACROBLOCK *x,
const BLOCK_SIZE bsize, const BLOCK_SIZE bsize,
const uint8_t *pred0, int stride0, uint8_t *pred0, int stride0,
const uint8_t *pred1, int stride1) { uint8_t *pred1, int stride1) {
const struct macroblock_plane *const p = &x->plane[0]; const struct macroblock_plane *const p = &x->plane[0];
const uint8_t *src = p->src.buf; const uint8_t *src = p->src.buf;
int src_stride = p->src.stride; int src_stride = p->src.stride;
@ -6702,195 +6702,6 @@ static INTERP_FILTER predict_interp_filter(const VP10_COMP *cpi,
} }
#endif #endif
#if CONFIG_EXT_INTER
// Choose the best wedge index and sign
static int64_t pick_wedge(const VP10_COMP *const cpi,
const MACROBLOCK *const x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
const uint8_t *const p1,
int *const best_wedge_sign,
int *const best_wedge_index) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const src = &x->plane[0].src;
const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
const int bh = 4 * num_4x4_blocks_high_lookup[bsize];
const int N = bw * bh;
int rate;
int64_t dist;
int64_t rd, best_rd = INT64_MAX;
int wedge_index;
int wedge_sign;
int wedge_types = (1 << get_wedge_bits_lookup(bsize));
const uint8_t *mask;
uint64_t sse;
#if CONFIG_VP9_HIGHBITDEPTH
const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
#else
const int bd_round = 0;
#endif // CONFIG_VP9_HIGHBITDEPTH
int16_t r0[MAX_SB_SQUARE];
int16_t r1[MAX_SB_SQUARE];
int16_t d10[MAX_SB_SQUARE];
int16_t ds[MAX_SB_SQUARE];
int64_t sign_limit;
#if CONFIG_VP9_HIGHBITDEPTH
if (hbd) {
vpx_highbd_subtract_block(bh, bw, r0, bw, src->buf, src->stride,
CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
vpx_highbd_subtract_block(bh, bw, d10, bw,
CONVERT_TO_BYTEPTR(p1), bw,
CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
} else // NOLINT
#endif // CONFIG_VP9_HIGHBITDEPTH
{
vpx_subtract_block(bh, bw, r0, bw, src->buf, src->stride, p0, bw);
vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
}
sign_limit = ((int64_t)vpx_sum_squares_i16(r0, N)
- (int64_t)vpx_sum_squares_i16(r1, N))
* (1 << WEDGE_WEIGHT_BITS) / 2;
vp10_wedge_compute_delta_squares(ds, r0, r1, N);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mask = vp10_get_soft_mask(wedge_index, 0, bsize, 0, 0);
wedge_sign = vp10_wedge_sign_from_residuals(ds, mask, N, sign_limit);
mask = vp10_get_soft_mask(wedge_index, wedge_sign, bsize, 0, 0);
sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
sse = ROUNDZ_POWER_OF_TWO(sse, bd_round);
model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
if (rd < best_rd) {
*best_wedge_index = wedge_index;
*best_wedge_sign = wedge_sign;
best_rd = rd;
}
}
return best_rd;
}
// Choose the best wedge index the specified sign
static int64_t pick_wedge_fixed_sign(const VP10_COMP *const cpi,
const MACROBLOCK *const x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
const uint8_t *const p1,
const int wedge_sign,
int *const best_wedge_index) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const src = &x->plane[0].src;
const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
const int bh = 4 * num_4x4_blocks_high_lookup[bsize];
const int N = bw * bh;
int rate;
int64_t dist;
int64_t rd, best_rd = INT64_MAX;
int wedge_index;
int wedge_types = (1 << get_wedge_bits_lookup(bsize));
const uint8_t *mask;
uint64_t sse;
#if CONFIG_VP9_HIGHBITDEPTH
const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
#else
const int bd_round = 0;
#endif // CONFIG_VP9_HIGHBITDEPTH
int16_t r1[MAX_SB_SQUARE];
int16_t d10[MAX_SB_SQUARE];
#if CONFIG_VP9_HIGHBITDEPTH
if (hbd) {
vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
vpx_highbd_subtract_block(bh, bw, d10, bw,
CONVERT_TO_BYTEPTR(p1), bw,
CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
} else // NOLINT
#endif // CONFIG_VP9_HIGHBITDEPTH
{
vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
}
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mask = vp10_get_soft_mask(wedge_index, wedge_sign, bsize, 0, 0);
sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
sse = ROUNDZ_POWER_OF_TWO(sse, bd_round);
model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
if (rd < best_rd) {
*best_wedge_index = wedge_index;
best_rd = rd;
}
}
return best_rd;
}
static int64_t pick_interinter_wedge(const VP10_COMP *const cpi,
const MACROBLOCK *const x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
const uint8_t *const p1) {
const MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
int64_t rd;
int wedge_index = -1;
int wedge_sign = 0;
assert(is_interinter_wedge_used(bsize));
if (cpi->sf.fast_wedge_sign_estimate) {
wedge_sign = estimate_wedge_sign(cpi, x, bsize, p0, bw, p1, bw);
rd = pick_wedge_fixed_sign(cpi, x, bsize, p0, p1, wedge_sign, &wedge_index);
} else {
rd = pick_wedge(cpi, x, bsize, p0, p1, &wedge_sign, &wedge_index);
}
mbmi->interinter_wedge_sign = wedge_sign;
mbmi->interinter_wedge_index = wedge_index;
return rd;
}
static int64_t pick_interintra_wedge(const VP10_COMP *const cpi,
const MACROBLOCK *const x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
const uint8_t *const p1) {
const MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int64_t rd;
int wedge_index = -1;
assert(is_interintra_wedge_used(bsize));
rd = pick_wedge_fixed_sign(cpi, x, bsize, p0, p1, 0, &wedge_index);
mbmi->interintra_wedge_sign = 0;
mbmi->interintra_wedge_index = wedge_index;
return rd;
}
#endif // CONFIG_EXT_INTER
static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x, static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, BLOCK_SIZE bsize,
int *rate2, int64_t *distortion, int *rate2, int64_t *distortion,
@ -6930,7 +6741,6 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
int_mv cur_mv[2]; int_mv cur_mv[2];
int rate_mv = 0; int rate_mv = 0;
#if CONFIG_EXT_INTER #if CONFIG_EXT_INTER
const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
int mv_idx = (this_mode == NEWFROMNEARMV) ? 1 : 0; int mv_idx = (this_mode == NEWFROMNEARMV) ? 1 : 0;
int_mv single_newmv[MAX_REF_FRAMES]; int_mv single_newmv[MAX_REF_FRAMES];
const unsigned int *const interintra_mode_cost = const unsigned int *const interintra_mode_cost =
@ -6941,11 +6751,11 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
#endif #endif
#endif // CONFIG_EXT_INTER #endif // CONFIG_EXT_INTER
#if CONFIG_VP9_HIGHBITDEPTH #if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf_[2 * MAX_MB_PLANE * MAX_SB_SQUARE]); DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf_[MAX_MB_PLANE * MAX_SB_SQUARE]);
#endif // CONFIG_VP9_HIGHBITDEPTH
uint8_t *tmp_buf; uint8_t *tmp_buf;
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * MAX_SB_SQUARE]);
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_OBMC #if CONFIG_OBMC
int allow_obmc = int allow_obmc =
@ -7019,11 +6829,12 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
#endif #endif
#if CONFIG_VP9_HIGHBITDEPTH #if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf_); tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
else } else {
tmp_buf = (uint8_t *)tmp_buf16;
}
#endif // CONFIG_VP9_HIGHBITDEPTH #endif // CONFIG_VP9_HIGHBITDEPTH
tmp_buf = tmp_buf_;
if (is_comp_pred) { if (is_comp_pred) {
if (frame_mv[refs[0]].as_int == INVALID_MV || if (frame_mv[refs[0]].as_int == INVALID_MV ||
@ -7436,10 +7247,13 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
#endif // CONFIG_OBMC #endif // CONFIG_OBMC
if (is_comp_pred && is_interinter_wedge_used(bsize)) { if (is_comp_pred && is_interinter_wedge_used(bsize)) {
int wedge_index, best_wedge_index = WEDGE_NONE;
int wedge_sign, best_wedge_sign = 0;
int rate_sum, rs; int rate_sum, rs;
int64_t dist_sum; int64_t dist_sum;
int64_t best_rd_nowedge = INT64_MAX; int64_t best_rd_nowedge = INT64_MAX;
int64_t best_rd_wedge = INT64_MAX; int64_t best_rd_wedge = INT64_MAX;
int wedge_types;
int tmp_skip_txfm_sb; int tmp_skip_txfm_sb;
int64_t tmp_skip_sse_sb; int64_t tmp_skip_sse_sb;
@ -7457,15 +7271,21 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
// Disbale wedge search if source variance is small // Disbale wedge search if source variance is small
if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh && if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh &&
best_rd_nowedge / 3 < ref_best_rd) { best_rd_nowedge / 3 < ref_best_rd) {
uint8_t pred0[2 * MAX_SB_SQUARE]; uint8_t pred0[2 * MAX_SB_SQUARE * 3];
uint8_t pred1[2 * MAX_SB_SQUARE]; uint8_t pred1[2 * MAX_SB_SQUARE * 3];
uint8_t *preds0[1] = {pred0}; uint8_t *preds0[3] = {pred0,
uint8_t *preds1[1] = {pred1}; pred0 + 2 * MAX_SB_SQUARE,
int strides[1] = {bw}; pred0 + 4 * MAX_SB_SQUARE};
uint8_t *preds1[3] = {pred1,
pred1 + 2 * MAX_SB_SQUARE,
pred1 + 4 * MAX_SB_SQUARE};
int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
int est_wedge_sign;
mbmi->use_wedge_interinter = 1; mbmi->use_wedge_interinter = 1;
rs = vp10_cost_literal(get_interinter_wedge_bits(bsize)) + rs = vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1); vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
wedge_types = (1 << get_wedge_bits_lookup(bsize));
vp10_build_inter_predictors_for_planes_single_buf( vp10_build_inter_predictors_for_planes_single_buf(
xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides); xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides);
@ -7473,8 +7293,49 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
xd, bsize, 0, 0, mi_row, mi_col, 1, preds1, strides); xd, bsize, 0, 0, mi_row, mi_col, 1, preds1, strides);
// Choose the best wedge // Choose the best wedge
best_rd_wedge = pick_interinter_wedge(cpi, x, bsize, pred0, pred1); if (cpi->sf.fast_wedge_sign_estimate) {
best_rd_wedge += RDCOST(x->rdmult, x->rddiv, rs + rate_mv, 0); est_wedge_sign = estimate_wedge_sign(
cpi, x, bsize, pred0, MAX_SB_SIZE, pred1, MAX_SB_SIZE);
best_wedge_sign = mbmi->interinter_wedge_sign = est_wedge_sign;
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->interinter_wedge_index = wedge_index;
vp10_build_wedge_inter_predictor_from_buf(xd, bsize,
0, 0, mi_row, mi_col,
preds0, strides,
preds1, strides);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0,
&rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
if (rd < best_rd_wedge) {
best_wedge_index = wedge_index;
best_rd_wedge = rd;
}
}
} else {
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
for (wedge_sign = 0; wedge_sign < 2; ++wedge_sign) {
mbmi->interinter_wedge_index = wedge_index;
mbmi->interinter_wedge_sign = wedge_sign;
vp10_build_wedge_inter_predictor_from_buf(xd, bsize,
0, 0, mi_row, mi_col,
preds0, strides,
preds1, strides);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0,
&rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv,
rs + rate_mv + rate_sum, dist_sum);
if (rd < best_rd_wedge) {
best_wedge_index = wedge_index;
best_wedge_sign = wedge_sign;
best_rd_wedge = rd;
}
}
}
}
mbmi->interinter_wedge_index = best_wedge_index;
mbmi->interinter_wedge_sign = best_wedge_sign;
if (have_newmv_in_inter_mode(this_mode)) { if (have_newmv_in_inter_mode(this_mode)) {
int_mv tmp_mv[2]; int_mv tmp_mv[2];
@ -7519,6 +7380,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
mbmi->mv[1].as_int = cur_mv[1].as_int; mbmi->mv[1].as_int = cur_mv[1].as_int;
tmp_rate_mv = rate_mv; tmp_rate_mv = rate_mv;
vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0,
mi_row, mi_col,
preds0, strides, preds0, strides,
preds1, strides); preds1, strides);
} }
@ -7533,6 +7395,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
if (best_rd_wedge < best_rd_nowedge) { if (best_rd_wedge < best_rd_nowedge) {
mbmi->use_wedge_interinter = 1; mbmi->use_wedge_interinter = 1;
mbmi->interinter_wedge_index = best_wedge_index;
mbmi->interinter_wedge_sign = best_wedge_sign;
xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int; xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int; xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
*rate2 += tmp_rate_mv - rate_mv; *rate2 += tmp_rate_mv - rate_mv;
@ -7546,7 +7410,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
} }
} else { } else {
vp10_build_wedge_inter_predictor_from_buf(xd, bsize, vp10_build_wedge_inter_predictor_from_buf(xd, bsize,
0, 0, 0, 0, mi_row, mi_col,
preds0, strides, preds0, strides,
preds1, strides); preds1, strides);
vp10_subtract_plane(x, bsize, 0); vp10_subtract_plane(x, bsize, 0);
@ -7558,6 +7422,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
best_rd_wedge = rd; best_rd_wedge = rd;
if (best_rd_wedge < best_rd_nowedge) { if (best_rd_wedge < best_rd_nowedge) {
mbmi->use_wedge_interinter = 1; mbmi->use_wedge_interinter = 1;
mbmi->interinter_wedge_index = best_wedge_index;
mbmi->interinter_wedge_sign = best_wedge_sign;
} else { } else {
mbmi->use_wedge_interinter = 0; mbmi->use_wedge_interinter = 0;
} }
@ -7580,11 +7446,13 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
} }
if (is_comp_interintra_pred) { if (is_comp_interintra_pred) {
const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
INTERINTRA_MODE best_interintra_mode = II_DC_PRED; INTERINTRA_MODE best_interintra_mode = II_DC_PRED;
int64_t best_interintra_rd = INT64_MAX; int64_t best_interintra_rd = INT64_MAX;
int rmode, rate_sum; int rmode, rate_sum;
int64_t dist_sum; int64_t dist_sum;
int j; int j;
int wedge_types, wedge_index, best_wedge_index = -1;
int64_t best_interintra_rd_nowedge = INT64_MAX; int64_t best_interintra_rd_nowedge = INT64_MAX;
int64_t best_interintra_rd_wedge = INT64_MAX; int64_t best_interintra_rd_wedge = INT64_MAX;
int rwedge; int rwedge;
@ -7592,7 +7460,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
int tmp_rate_mv = 0; int tmp_rate_mv = 0;
int tmp_skip_txfm_sb; int tmp_skip_txfm_sb;
int64_t tmp_skip_sse_sb; int64_t tmp_skip_sse_sb;
DECLARE_ALIGNED(16, uint8_t, intrapred_[2 * MAX_SB_SQUARE]); DECLARE_ALIGNED(16, uint8_t,
intrapred_[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
uint8_t *intrapred; uint8_t *intrapred;
#if CONFIG_VP9_HIGHBITDEPTH #if CONFIG_VP9_HIGHBITDEPTH
@ -7605,7 +7474,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
mbmi->ref_frame[1] = NONE; mbmi->ref_frame[1] = NONE;
for (j = 0; j < MAX_MB_PLANE; j++) { for (j = 0; j < MAX_MB_PLANE; j++) {
xd->plane[j].dst.buf = tmp_buf + j * MAX_SB_SQUARE; xd->plane[j].dst.buf = tmp_buf + j * MAX_SB_SQUARE;
xd->plane[j].dst.stride = bw; xd->plane[j].dst.stride = MAX_SB_SIZE;
} }
vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
restore_dst_buf(xd, orig_dst, orig_dst_stride); restore_dst_buf(xd, orig_dst, orig_dst_stride);
@ -7616,9 +7485,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
mbmi->interintra_mode = (INTERINTRA_MODE)j; mbmi->interintra_mode = (INTERINTRA_MODE)j;
rmode = interintra_mode_cost[mbmi->interintra_mode]; rmode = interintra_mode_cost[mbmi->interintra_mode];
vp10_build_intra_predictors_for_interintra( vp10_build_intra_predictors_for_interintra(
xd, bsize, 0, intrapred, bw); xd, bsize, 0, intrapred, MAX_SB_SIZE);
vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, vp10_combine_interintra(xd, bsize, 0, tmp_buf, MAX_SB_SIZE,
intrapred, bw); intrapred, MAX_SB_SIZE);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum, model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb); &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum); rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
@ -7630,9 +7499,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
mbmi->interintra_mode = best_interintra_mode; mbmi->interintra_mode = best_interintra_mode;
rmode = interintra_mode_cost[mbmi->interintra_mode]; rmode = interintra_mode_cost[mbmi->interintra_mode];
vp10_build_intra_predictors_for_interintra( vp10_build_intra_predictors_for_interintra(
xd, bsize, 0, intrapred, bw); xd, bsize, 0, intrapred, MAX_SB_SIZE);
vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, vp10_combine_interintra(xd, bsize, 0, tmp_buf, MAX_SB_SIZE,
intrapred, bw); intrapred, MAX_SB_SIZE);
vp10_subtract_plane(x, bsize, 0); vp10_subtract_plane(x, bsize, 0);
rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum, rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, &tmp_skip_txfm_sb, &tmp_skip_sse_sb,
@ -7655,20 +7524,32 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
// Disbale wedge search if source variance is small // Disbale wedge search if source variance is small
if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) { if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
mbmi->use_wedge_interintra = 1; mbmi->use_wedge_interintra = 1;
wedge_types = (1 << get_wedge_bits_lookup(bsize));
rwedge = vp10_cost_literal(get_interintra_wedge_bits(bsize)) + rwedge = vp10_cost_literal(get_interintra_wedge_bits(bsize)) +
vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1); vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
best_interintra_rd_wedge = pick_interintra_wedge(cpi, x, bsize, mbmi->interintra_wedge_index = wedge_index;
intrapred_, tmp_buf_); mbmi->interintra_wedge_sign = 0;
vp10_combine_interintra(xd, bsize, 0,
best_interintra_rd_wedge += RDCOST(x->rdmult, x->rddiv, tmp_buf, MAX_SB_SIZE,
rmode + rate_mv + rwedge, 0); intrapred, MAX_SB_SIZE);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0,
&rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv,
rmode + rate_mv + rwedge + rate_sum, dist_sum);
if (rd < best_interintra_rd_wedge) {
best_interintra_rd_wedge = rd;
best_wedge_index = wedge_index;
}
}
// Refine motion vector. // Refine motion vector.
if (have_newmv_in_inter_mode(this_mode)) { if (have_newmv_in_inter_mode(this_mode) && best_wedge_index > -1) {
// get negative of mask // get negative of mask
const uint8_t* mask = vp10_get_soft_mask( const uint8_t* mask = vp10_get_soft_mask(
mbmi->interintra_wedge_index, 1, bsize, 0, 0); best_wedge_index, 1, bsize, 0, 0);
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_wedge_sign = 0;
do_masked_motion_search(cpi, x, mask, bw, bsize, do_masked_motion_search(cpi, x, mask, bw, bsize,
mi_row, mi_col, &tmp_mv, &tmp_rate_mv, mi_row, mi_col, &tmp_mv, &tmp_rate_mv,
0, mv_idx); 0, mv_idx);
@ -7685,11 +7566,13 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
tmp_rate_mv = rate_mv; tmp_rate_mv = rate_mv;
} }
} else { } else {
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_wedge_sign = 0;
tmp_mv.as_int = cur_mv[0].as_int; tmp_mv.as_int = cur_mv[0].as_int;
tmp_rate_mv = rate_mv; tmp_rate_mv = rate_mv;
vp10_combine_interintra(xd, bsize, 0, vp10_combine_interintra(xd, bsize, 0,
tmp_buf, bw, tmp_buf, MAX_SB_SIZE,
intrapred, bw); intrapred, MAX_SB_SIZE);
} }
// Evaluate closer to true rd // Evaluate closer to true rd
vp10_subtract_plane(x, bsize, 0); vp10_subtract_plane(x, bsize, 0);
@ -7702,6 +7585,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
best_interintra_rd_wedge = rd; best_interintra_rd_wedge = rd;
if (best_interintra_rd_wedge < best_interintra_rd_nowedge) { if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
mbmi->use_wedge_interintra = 1; mbmi->use_wedge_interintra = 1;
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_wedge_sign = 0;
best_interintra_rd = best_interintra_rd_wedge; best_interintra_rd = best_interintra_rd_wedge;
mbmi->mv[0].as_int = tmp_mv.as_int; mbmi->mv[0].as_int = tmp_mv.as_int;
*rate2 += tmp_rate_mv - rate_mv; *rate2 += tmp_rate_mv - rate_mv;

Просмотреть файл

@ -1,135 +0,0 @@
/*
* Copyright (c) 2016 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include "vpx/vpx_integer.h"
#include "vpx_ports/mem.h"
#include "vpx_dsp/vpx_dsp_common.h"
#include "vp10/common/reconinter.h"
#define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
/**
* Computes SSE of a compound predictor constructed from 2 fundamental
* predictors p0 and p1 using blending with mask.
*
* r1: Residuals of p1.
* (source - p1)
* d: Difference of p1 and p0.
* (p1 - p0)
* m: The blending mask
* N: Number of pixels
*
* 'r1', 'd', and 'm' are contiguous.
*
* Computes:
* Sum((MAX_MASK_VALUE*r1 + mask*d)**2), which is equivalent to:
* Sum((mask*r0 + (MAX_MASK_VALUE-mask)*r1)**2),
* where r0 is (source - p0), and r1 is (source - p1), which is in turn
* is equivalent to:
* Sum((source*MAX_MASK_VALUE - (mask*p0 + (MAX_MASK_VALUE-mask)*p1))**2),
* which is the SSE of the residuals of the compound predictor scaled up by
* MAX_MASK_VALUE**2.
*
* Note that we clamp the partial term in the loop to 16 bits signed. This is
* to facilitate equivalent SIMD implementation. It should have no effect if
* residuals are within 16 - WEDGE_WEIGHT_BITS (=10) signed, which always
* holds for 8 bit input, and on real input, it should hold practically always,
* as residuals are expected to be small.
*/
uint64_t vp10_wedge_sse_from_residuals_c(const int16_t *r1,
const int16_t *d,
const uint8_t *m,
int N) {
uint64_t csse = 0;
int i;
assert(N % 64 == 0);
for (i = 0 ; i < N ; i++) {
int32_t t = MAX_MASK_VALUE*r1[i] + m[i]*d[i];
t = clamp(t, INT16_MIN, INT16_MAX);
csse += t*t;
}
return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
}
/**
* Choose the mask sign for a compound predictor.
*
* ds: Difference of the squares of the residuals.
* r0**2 - r1**2
* m: The blending mask
* N: Number of pixels
* limit: Pre-computed threshold value.
* MAX_MASK_VALUE/2 * (sum(r0**2) - sum(r1**2))
*
* 'ds' and 'm' are contiguous.
*
* Returns true if the negated mask has lower SSE compared to the positive
* mask. Computation is based on:
* Sum((mask*r0 + (MAX_MASK_VALUE-mask)*r1)**2)
* >
* Sum(((MAX_MASK_VALUE-mask)*r0 + mask*r1)**2)
*
* which can be simplified to:
*
* Sum(mask*(r0**2 - r1**2)) > MAX_MASK_VALUE/2 * (sum(r0**2) - sum(r1**2))
*
* The right hand side does not depend on the mask, and needs to be passed as
* the 'limit' parameter.
*
* After pre-computing (r0**2 - r1**2), which is passed in as 'ds', the left
* hand side is simply a scalar product between an int16_t and uint8_t vector.
*
* Note that for efficiency, ds is stored on 16 bits. Real input residuals
* being small, this should not cause a noticeable issue.
*/
int vp10_wedge_sign_from_residuals_c(const int16_t *ds,
const uint8_t *m,
int N,
int64_t limit) {
int64_t acc = 0;
assert(N % 64 == 0);
do {
acc += *ds++ * *m++;
} while (--N);
return acc > limit;
}
/**
* Compute the element-wise difference of the squares of 2 arrays.
*
* d: Difference of the squares of the inputs: a**2 - b**2
* a: First input array
* b: Second input array
* N: Number of elements
*
* 'd', 'a', and 'b' are contiguous.
*
* The result is saturated to signed 16 bits.
*/
void vp10_wedge_compute_delta_squares_c(int16_t *d,
const int16_t *a,
const int16_t *b,
int N) {
int i;
assert(N % 64 == 0);
for (i = 0 ; i < N ; i++)
d[i] = clamp(a[i]*a[i] - b[i]*b[i], INT16_MIN, INT16_MAX);
}

Просмотреть файл

@ -1,260 +0,0 @@
/*
* Copyright (c) 2016 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <immintrin.h>
#include "vpx_dsp/x86/synonyms.h"
#include "vpx/vpx_integer.h"
#include "vp10/common/reconinter.h"
#define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
/**
* See vp10_wedge_sse_from_residuals_c
*/
uint64_t vp10_wedge_sse_from_residuals_sse2(const int16_t *r1,
const int16_t *d,
const uint8_t *m,
int N) {
int n = -N;
int n8 = n + 8;
uint64_t csse;
const __m128i v_mask_max_w = _mm_set1_epi16(MAX_MASK_VALUE);
const __m128i v_zext_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
__m128i v_acc0_q = _mm_setzero_si128();
assert(N % 64 == 0);
r1 += N;
d += N;
m += N;
do {
const __m128i v_r0_w = xx_load_128(r1 + n);
const __m128i v_r1_w = xx_load_128(r1 + n8);
const __m128i v_d0_w = xx_load_128(d + n);
const __m128i v_d1_w = xx_load_128(d + n8);
const __m128i v_m01_b = xx_load_128(m + n);
const __m128i v_rd0l_w = _mm_unpacklo_epi16(v_d0_w, v_r0_w);
const __m128i v_rd0h_w = _mm_unpackhi_epi16(v_d0_w, v_r0_w);
const __m128i v_rd1l_w = _mm_unpacklo_epi16(v_d1_w, v_r1_w);
const __m128i v_rd1h_w = _mm_unpackhi_epi16(v_d1_w, v_r1_w);
const __m128i v_m0_w = _mm_unpacklo_epi8(v_m01_b, _mm_setzero_si128());
const __m128i v_m1_w = _mm_unpackhi_epi8(v_m01_b, _mm_setzero_si128());
const __m128i v_m0l_w = _mm_unpacklo_epi16(v_m0_w, v_mask_max_w);
const __m128i v_m0h_w = _mm_unpackhi_epi16(v_m0_w, v_mask_max_w);
const __m128i v_m1l_w = _mm_unpacklo_epi16(v_m1_w, v_mask_max_w);
const __m128i v_m1h_w = _mm_unpackhi_epi16(v_m1_w, v_mask_max_w);
const __m128i v_t0l_d = _mm_madd_epi16(v_rd0l_w, v_m0l_w);
const __m128i v_t0h_d = _mm_madd_epi16(v_rd0h_w, v_m0h_w);
const __m128i v_t1l_d = _mm_madd_epi16(v_rd1l_w, v_m1l_w);
const __m128i v_t1h_d = _mm_madd_epi16(v_rd1h_w, v_m1h_w);
const __m128i v_t0_w = _mm_packs_epi32(v_t0l_d, v_t0h_d);
const __m128i v_t1_w = _mm_packs_epi32(v_t1l_d, v_t1h_d);
const __m128i v_sq0_d = _mm_madd_epi16(v_t0_w, v_t0_w);
const __m128i v_sq1_d = _mm_madd_epi16(v_t1_w, v_t1_w);
const __m128i v_sum0_q = _mm_add_epi64(_mm_and_si128(v_sq0_d, v_zext_q),
_mm_srli_epi64(v_sq0_d, 32));
const __m128i v_sum1_q = _mm_add_epi64(_mm_and_si128(v_sq1_d, v_zext_q),
_mm_srli_epi64(v_sq1_d, 32));
v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum0_q);
v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum1_q);
n8 += 16;
n += 16;
} while (n);
v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8));
#if ARCH_X86_64
csse = (uint64_t)_mm_cvtsi128_si64(v_acc0_q);
#else
xx_storel_64(&csse, v_acc0_q);
#endif
return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
}
/**
* See vp10_wedge_sign_from_residuals_c
*/
int vp10_wedge_sign_from_residuals_sse2(const int16_t *ds,
const uint8_t *m,
int N,
int64_t limit) {
int64_t acc;
__m128i v_sign_d;
__m128i v_acc0_d = _mm_setzero_si128();
__m128i v_acc1_d = _mm_setzero_si128();
__m128i v_acc_q;
// Input size limited to 8192 by the use of 32 bit accumulators and m
// being between [0, 64]. Overflow might happen at larger sizes,
// though it is practically impossible on real video input.
assert(N < 8192);
assert(N % 64 == 0);
do {
const __m128i v_m01_b = xx_load_128(m);
const __m128i v_m23_b = xx_load_128(m + 16);
const __m128i v_m45_b = xx_load_128(m + 32);
const __m128i v_m67_b = xx_load_128(m + 48);
const __m128i v_d0_w = xx_load_128(ds);
const __m128i v_d1_w = xx_load_128(ds + 8);
const __m128i v_d2_w = xx_load_128(ds + 16);
const __m128i v_d3_w = xx_load_128(ds + 24);
const __m128i v_d4_w = xx_load_128(ds + 32);
const __m128i v_d5_w = xx_load_128(ds + 40);
const __m128i v_d6_w = xx_load_128(ds + 48);
const __m128i v_d7_w = xx_load_128(ds + 56);
const __m128i v_m0_w = _mm_unpacklo_epi8(v_m01_b, _mm_setzero_si128());
const __m128i v_m1_w = _mm_unpackhi_epi8(v_m01_b, _mm_setzero_si128());
const __m128i v_m2_w = _mm_unpacklo_epi8(v_m23_b, _mm_setzero_si128());
const __m128i v_m3_w = _mm_unpackhi_epi8(v_m23_b, _mm_setzero_si128());
const __m128i v_m4_w = _mm_unpacklo_epi8(v_m45_b, _mm_setzero_si128());
const __m128i v_m5_w = _mm_unpackhi_epi8(v_m45_b, _mm_setzero_si128());
const __m128i v_m6_w = _mm_unpacklo_epi8(v_m67_b, _mm_setzero_si128());
const __m128i v_m7_w = _mm_unpackhi_epi8(v_m67_b, _mm_setzero_si128());
const __m128i v_p0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
const __m128i v_p1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
const __m128i v_p2_d = _mm_madd_epi16(v_d2_w, v_m2_w);
const __m128i v_p3_d = _mm_madd_epi16(v_d3_w, v_m3_w);
const __m128i v_p4_d = _mm_madd_epi16(v_d4_w, v_m4_w);
const __m128i v_p5_d = _mm_madd_epi16(v_d5_w, v_m5_w);
const __m128i v_p6_d = _mm_madd_epi16(v_d6_w, v_m6_w);
const __m128i v_p7_d = _mm_madd_epi16(v_d7_w, v_m7_w);
const __m128i v_p01_d = _mm_add_epi32(v_p0_d, v_p1_d);
const __m128i v_p23_d = _mm_add_epi32(v_p2_d, v_p3_d);
const __m128i v_p45_d = _mm_add_epi32(v_p4_d, v_p5_d);
const __m128i v_p67_d = _mm_add_epi32(v_p6_d, v_p7_d);
const __m128i v_p0123_d = _mm_add_epi32(v_p01_d, v_p23_d);
const __m128i v_p4567_d = _mm_add_epi32(v_p45_d, v_p67_d);
v_acc0_d = _mm_add_epi32(v_acc0_d, v_p0123_d);
v_acc1_d = _mm_add_epi32(v_acc1_d, v_p4567_d);
ds += 64;
m += 64;
N -= 64;
} while (N);
v_sign_d = _mm_cmplt_epi32(v_acc0_d, _mm_setzero_si128());
v_acc0_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc0_d, v_sign_d),
_mm_unpackhi_epi32(v_acc0_d, v_sign_d));
v_sign_d = _mm_cmplt_epi32(v_acc1_d, _mm_setzero_si128());
v_acc1_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc1_d, v_sign_d),
_mm_unpackhi_epi32(v_acc1_d, v_sign_d));
v_acc_q = _mm_add_epi64(v_acc0_d, v_acc1_d);
v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8));
#if ARCH_X86_64
acc = (uint64_t)_mm_cvtsi128_si64(v_acc_q);
#else
xx_storel_64(&acc, v_acc_q);
#endif
return acc > limit;
}
// Negate under mask
static INLINE __m128i negm_epi16(__m128i v_v_w, __m128i v_mask_w) {
return _mm_sub_epi16(_mm_xor_si128(v_v_w, v_mask_w), v_mask_w);
}
/**
* vp10_wedge_compute_delta_squares_c
*/
void vp10_wedge_compute_delta_squares_sse2(int16_t *d,
const int16_t *a,
const int16_t *b,
int N) {
const __m128i v_neg_w = _mm_set_epi16(0xffff, 0, 0xffff, 0,
0xffff, 0, 0xffff, 0);
assert(N % 64 == 0);
do {
const __m128i v_a0_w = xx_load_128(a);
const __m128i v_b0_w = xx_load_128(b);
const __m128i v_a1_w = xx_load_128(a + 8);
const __m128i v_b1_w = xx_load_128(b + 8);
const __m128i v_a2_w = xx_load_128(a + 16);
const __m128i v_b2_w = xx_load_128(b + 16);
const __m128i v_a3_w = xx_load_128(a + 24);
const __m128i v_b3_w = xx_load_128(b + 24);
const __m128i v_ab0l_w = _mm_unpacklo_epi16(v_a0_w, v_b0_w);
const __m128i v_ab0h_w = _mm_unpackhi_epi16(v_a0_w, v_b0_w);
const __m128i v_ab1l_w = _mm_unpacklo_epi16(v_a1_w, v_b1_w);
const __m128i v_ab1h_w = _mm_unpackhi_epi16(v_a1_w, v_b1_w);
const __m128i v_ab2l_w = _mm_unpacklo_epi16(v_a2_w, v_b2_w);
const __m128i v_ab2h_w = _mm_unpackhi_epi16(v_a2_w, v_b2_w);
const __m128i v_ab3l_w = _mm_unpacklo_epi16(v_a3_w, v_b3_w);
const __m128i v_ab3h_w = _mm_unpackhi_epi16(v_a3_w, v_b3_w);
// Negate top word of pairs
const __m128i v_abl0n_w = negm_epi16(v_ab0l_w, v_neg_w);
const __m128i v_abh0n_w = negm_epi16(v_ab0h_w, v_neg_w);
const __m128i v_abl1n_w = negm_epi16(v_ab1l_w, v_neg_w);
const __m128i v_abh1n_w = negm_epi16(v_ab1h_w, v_neg_w);
const __m128i v_abl2n_w = negm_epi16(v_ab2l_w, v_neg_w);
const __m128i v_abh2n_w = negm_epi16(v_ab2h_w, v_neg_w);
const __m128i v_abl3n_w = negm_epi16(v_ab3l_w, v_neg_w);
const __m128i v_abh3n_w = negm_epi16(v_ab3h_w, v_neg_w);
const __m128i v_r0l_w = _mm_madd_epi16(v_ab0l_w, v_abl0n_w);
const __m128i v_r0h_w = _mm_madd_epi16(v_ab0h_w, v_abh0n_w);
const __m128i v_r1l_w = _mm_madd_epi16(v_ab1l_w, v_abl1n_w);
const __m128i v_r1h_w = _mm_madd_epi16(v_ab1h_w, v_abh1n_w);
const __m128i v_r2l_w = _mm_madd_epi16(v_ab2l_w, v_abl2n_w);
const __m128i v_r2h_w = _mm_madd_epi16(v_ab2h_w, v_abh2n_w);
const __m128i v_r3l_w = _mm_madd_epi16(v_ab3l_w, v_abl3n_w);
const __m128i v_r3h_w = _mm_madd_epi16(v_ab3h_w, v_abh3n_w);
const __m128i v_r0_w = _mm_packs_epi32(v_r0l_w, v_r0h_w);
const __m128i v_r1_w = _mm_packs_epi32(v_r1l_w, v_r1h_w);
const __m128i v_r2_w = _mm_packs_epi32(v_r2l_w, v_r2h_w);
const __m128i v_r3_w = _mm_packs_epi32(v_r3l_w, v_r3h_w);
xx_store_128(d, v_r0_w);
xx_store_128(d + 8, v_r1_w);
xx_store_128(d + 16, v_r2_w);
xx_store_128(d + 24, v_r3_w);
a += 32;
b += 32;
d += 32;
N -= 32;
} while (N);
}

Просмотреть файл

@ -124,10 +124,6 @@ endif
ifeq ($(CONFIG_VP9_TEMPORAL_DENOISING),yes) ifeq ($(CONFIG_VP9_TEMPORAL_DENOISING),yes)
VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/denoiser_sse2.c VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/denoiser_sse2.c
endif endif
ifeq ($(CONFIG_EXT_INTER),yes)
VP10_CX_SRCS-yes += encoder/wedge_utils.c
VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/wedge_utils_sse2.c
endif
VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c