Port interintra experiment from nextgen.

The interintra experiment, which combines an inter prediction and an
inter prediction have been ported from the nextgen branch. The
experiment is merged into ext_inter, so there is no separate configure
option to enable it.

Change-Id: I0cc20cefd29e9b77ab7bbbb709abc11512320325
This commit is contained in:
Geza Lore 2016-02-22 10:55:32 +00:00 коммит произвёл Debargha Mukherjee
Родитель 3287f5519e
Коммит 7ded038af5
15 изменённых файлов: 911 добавлений и 92 удалений

Просмотреть файл

@ -170,6 +170,11 @@ typedef struct {
INTRA_FILTER intra_filter;
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_INTER
PREDICTION_MODE interintra_mode;
PREDICTION_MODE interintra_uv_mode;
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
int8_t obmc;
#endif // CONFIG_OBMC
@ -624,6 +629,30 @@ void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
int aoff, int loff);
#if CONFIG_EXT_INTER
static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
return (bsize >= BLOCK_8X8) && (bsize < BLOCK_64X64);
}
static INLINE int is_interintra_allowed_mode(const PREDICTION_MODE mode) {
return (mode >= NEARESTMV) && (mode <= NEWMV);
}
static INLINE int is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2]) {
return (rf[0] > INTRA_FRAME) && (rf[1] <= INTRA_FRAME);
}
static INLINE int is_interintra_allowed(const MB_MODE_INFO *mbmi) {
return is_interintra_allowed_bsize(mbmi->sb_type)
&& is_interintra_allowed_mode(mbmi->mode)
&& is_interintra_allowed_ref(mbmi->ref_frame);
}
static INLINE int is_interintra_pred(const MB_MODE_INFO *mbmi) {
return (mbmi->ref_frame[1] == INTRA_FRAME) && is_interintra_allowed(mbmi);
}
#endif // CONFIG_EXT_INTER
#ifdef __cplusplus
} // extern "C"
#endif

Просмотреть файл

@ -226,6 +226,10 @@ static const vpx_prob default_inter_compound_mode_probs
{17, 81, 52, 192, 192, 128, 180, 180}, // 5 = one intra neighbour
{25, 29, 50, 192, 192, 128, 180, 180}, // 6 = two intra neighbours
};
static const vpx_prob default_interintra_prob[BLOCK_SIZES] = {
192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
};
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
@ -1326,6 +1330,7 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
vp10_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
vp10_copy(fc->interintra_prob, default_interintra_prob);
#endif // CONFIG_EXT_INTER
#if CONFIG_SUPERTX
vp10_copy(fc->supertx_prob, default_supertx_prob);
@ -1434,6 +1439,12 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
pre_fc->inter_compound_mode_probs[i],
counts->inter_compound_mode[i],
fc->inter_compound_mode_probs[i]);
for (i = 0; i < BLOCK_SIZES; ++i) {
if (is_interintra_allowed_bsize(i))
fc->interintra_prob[i] = mode_mv_merge_probs(pre_fc->interintra_prob[i],
counts->interintra[i]);
}
#endif // CONFIG_EXT_INTER
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)

Просмотреть файл

@ -80,6 +80,7 @@ typedef struct frame_contexts {
#if CONFIG_EXT_INTER
vpx_prob inter_compound_mode_probs[INTER_MODE_CONTEXTS]
[INTER_COMPOUND_MODES - 1];
vpx_prob interintra_prob[BLOCK_SIZES];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
vpx_prob obmc_prob[BLOCK_SIZES];
@ -141,6 +142,7 @@ typedef struct FRAME_COUNTS {
unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES];
#if CONFIG_EXT_INTER
unsigned int inter_compound_mode[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES];
unsigned int interintra[BLOCK_SIZES][2];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
unsigned int obmc[BLOCK_SIZES][2];

Просмотреть файл

@ -22,6 +22,10 @@
#include "vp10/common/onyxc_int.h"
#endif // CONFIG_OBMC
// TODO(geza.lore) Update this when the extended coding unit size experiment
// have been ported.
#define CU_SIZE 64
#if CONFIG_VP9_HIGHBITDEPTH
void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
@ -232,23 +236,65 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
vp10_build_interintra_predictors_sby(xd,
xd->plane[0].dst.buf,
xd->plane[0].dst.stride,
bsize);
#endif // CONFIG_EXT_INTER
}
void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize, int plane) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi)) {
if (plane == 0) {
vp10_build_interintra_predictors_sby(xd,
xd->plane[0].dst.buf,
xd->plane[0].dst.stride,
bsize);
} else {
vp10_build_interintra_predictors_sbc(xd,
xd->plane[plane].dst.buf,
xd->plane[plane].dst.stride,
plane, bsize);
}
}
#endif // CONFIG_EXT_INTER
}
void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
MAX_MB_PLANE - 1);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
vp10_build_interintra_predictors_sbuv(xd,
xd->plane[1].dst.buf,
xd->plane[2].dst.buf,
xd->plane[1].dst.stride,
xd->plane[2].dst.stride,
bsize);
#endif // CONFIG_EXT_INTER
}
void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
MAX_MB_PLANE - 1);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
vp10_build_interintra_predictors(xd,
xd->plane[0].dst.buf,
xd->plane[1].dst.buf,
xd->plane[2].dst.buf,
xd->plane[0].dst.stride,
xd->plane[1].dst.stride,
xd->plane[2].dst.stride,
bsize);
#endif // CONFIG_EXT_INTER
}
void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
@ -509,6 +555,17 @@ void vp10_build_inter_predictors_sb_sub8x8(MACROBLOCKD *xd,
0, 0, bw, bh,
mi_x, mi_y);
}
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
vp10_build_interintra_predictors(xd,
xd->plane[0].dst.buf,
xd->plane[1].dst.buf,
xd->plane[2].dst.buf,
xd->plane[0].dst.stride,
xd->plane[1].dst.stride,
xd->plane[2].dst.stride,
bsize);
#endif // CONFIG_EXT_INTER
}
#endif // CONFIG_SUPERTX
@ -755,3 +812,387 @@ void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
} // each mi in the left column
}
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
static void combine_interintra(PREDICTION_MODE mode,
BLOCK_SIZE plane_bsize,
uint8_t *comppred,
int compstride,
uint8_t *interpred,
int interstride,
uint8_t *intrapred,
int intrastride) {
static const int scale_bits = 8;
static const int scale_max = 256;
static const int scale_round = 127;
static const int weights1d[64] = {
128, 125, 122, 119, 116, 114, 111, 109,
107, 105, 103, 101, 99, 97, 96, 94,
93, 91, 90, 89, 88, 86, 85, 84,
83, 82, 81, 81, 80, 79, 78, 78,
77, 76, 76, 75, 75, 74, 74, 73,
73, 72, 72, 71, 71, 71, 70, 70,
70, 70, 69, 69, 69, 69, 68, 68,
68, 68, 68, 67, 67, 67, 67, 67,
};
const int bw = 4 << b_width_log2_lookup[plane_bsize];
const int bh = 4 << b_height_log2_lookup[plane_bsize];
int size = VPXMAX(bw, bh);
int size_scale = (size >= 64 ? 1 :
size == 32 ? 2 :
size == 16 ? 4 :
size == 8 ? 8 : 16);
int i, j;
switch (mode) {
case V_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = weights1d[i * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case H_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = weights1d[j * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D63_PRED:
case D117_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = (weights1d[i * size_scale] * 3 +
weights1d[j * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D207_PRED:
case D153_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = (weights1d[j * size_scale] * 3 +
weights1d[i * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D135_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = weights1d[(i < j ? i : j) * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D45_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = (weights1d[i * size_scale] +
weights1d[j * size_scale]) >> 1;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case TM_PRED:
case DC_PRED:
default:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
comppred[i * compstride + j] = (interpred[i * interstride + j] +
intrapred[i * intrastride + j]) >> 1;
}
}
break;
}
}
#if CONFIG_VP9_HIGHBITDEPTH
static void combine_interintra_highbd(PREDICTION_MODE mode,
BLOCK_SIZE plane_bsize,
uint8_t *comppred8,
int compstride,
uint8_t *interpred8,
int interstride,
uint8_t *intrapred8,
int intrastride, int bd) {
static const int scale_bits = 8;
static const int scale_max = 256;
static const int scale_round = 127;
static const int weights1d[64] = {
128, 125, 122, 119, 116, 114, 111, 109,
107, 105, 103, 101, 99, 97, 96, 94,
93, 91, 90, 89, 88, 86, 85, 84,
83, 82, 81, 81, 80, 79, 78, 78,
77, 76, 76, 75, 75, 74, 74, 73,
73, 72, 72, 71, 71, 71, 70, 70,
70, 70, 69, 69, 69, 69, 68, 68,
68, 68, 68, 67, 67, 67, 67, 67,
};
const int bw = 4 << b_width_log2_lookup[plane_bsize];
const int bh = 4 << b_height_log2_lookup[plane_bsize];
int size = VPXMAX(bw, bh);
int size_scale = (size >= 64 ? 1 :
size == 32 ? 2 :
size == 16 ? 4 :
size == 8 ? 8 : 16);
int i, j;
uint16_t *comppred = CONVERT_TO_SHORTPTR(comppred8);
uint16_t *interpred = CONVERT_TO_SHORTPTR(interpred8);
uint16_t *intrapred = CONVERT_TO_SHORTPTR(intrapred8);
(void) bd;
switch (mode) {
case V_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = weights1d[i * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case H_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = weights1d[j * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D63_PRED:
case D117_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = (weights1d[i * size_scale] * 3 +
weights1d[j * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D207_PRED:
case D153_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = (weights1d[j * size_scale] * 3 +
weights1d[i * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D135_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = weights1d[(i < j ? i : j) * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case D45_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int scale = (weights1d[i * size_scale] +
weights1d[j * size_scale]) >> 1;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
>> scale_bits;
}
}
break;
case TM_PRED:
case DC_PRED:
default:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
comppred[i * compstride + j] = (interpred[i * interstride + j] +
intrapred[i * intrastride + j]) >> 1;
}
}
break;
}
}
#endif // CONFIG_VP9_HIGHBITDEPTH
// Break down rectangular intra prediction for joint spatio-temporal prediction
// into two square intra predictions.
static void build_intra_predictors_for_interintra(
MACROBLOCKD *xd,
uint8_t *ref, int ref_stride,
uint8_t *dst, int dst_stride,
PREDICTION_MODE mode,
BLOCK_SIZE bsize,
int plane) {
BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, &xd->plane[plane]);
const int bwl = b_width_log2_lookup[plane_bsize];
const int bhl = b_height_log2_lookup[plane_bsize];
TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
if (bwl == bhl) {
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
ref, ref_stride, dst, dst_stride,
0, 0, plane);
} else if (bwl < bhl) {
uint8_t *src_2 = ref + (4 << bwl)*ref_stride;
uint8_t *dst_2 = dst + (4 << bwl)*dst_stride;
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
ref, ref_stride, dst, dst_stride,
0, 0, plane);
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
src_2, ref_stride, dst_2, dst_stride,
0, 1 << bwl, plane);
} else {
uint8_t *src_2 = ref + (4 << bhl);
uint8_t *dst_2 = dst + (4 << bhl);
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
ref, ref_stride, dst, dst_stride,
0, 0, plane);
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
src_2, ref_stride, dst_2, dst_stride,
1 << bhl, 0, plane);
}
}
void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd,
uint8_t *ypred,
int ystride,
BLOCK_SIZE bsize) {
const int bw = 4 << b_width_log2_lookup[bsize];
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t,
intrapredictor[CU_SIZE * CU_SIZE]);
build_intra_predictors_for_interintra(
xd, xd->plane[0].dst.buf, xd->plane[0].dst.stride,
CONVERT_TO_BYTEPTR(intrapredictor), bw,
xd->mi[0]->mbmi.interintra_mode, bsize, 0);
combine_interintra_highbd(xd->mi[0]->mbmi.interintra_mode,
bsize,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
ypred, ystride,
CONVERT_TO_BYTEPTR(intrapredictor), bw, xd->bd);
return;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
{
uint8_t intrapredictor[CU_SIZE * CU_SIZE];
build_intra_predictors_for_interintra(
xd, xd->plane[0].dst.buf, xd->plane[0].dst.stride,
intrapredictor, bw,
xd->mi[0]->mbmi.interintra_mode, bsize, 0);
combine_interintra(xd->mi[0]->mbmi.interintra_mode,
bsize,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
ypred, ystride, intrapredictor, bw);
}
}
void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd,
uint8_t *upred,
int ustride,
int plane,
BLOCK_SIZE bsize) {
const BLOCK_SIZE uvbsize = get_plane_block_size(bsize, &xd->plane[plane]);
const int bw = 4 << b_width_log2_lookup[uvbsize];
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t,
uintrapredictor[CU_SIZE * CU_SIZE]);
build_intra_predictors_for_interintra(
xd, xd->plane[plane].dst.buf, xd->plane[plane].dst.stride,
CONVERT_TO_BYTEPTR(uintrapredictor), bw,
xd->mi[0]->mbmi.interintra_uv_mode, bsize, plane);
combine_interintra_highbd(xd->mi[0]->mbmi.interintra_uv_mode,
uvbsize,
xd->plane[plane].dst.buf,
xd->plane[plane].dst.stride,
upred, ustride,
CONVERT_TO_BYTEPTR(uintrapredictor), bw, xd->bd);
return;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
{
uint8_t uintrapredictor[CU_SIZE * CU_SIZE];
build_intra_predictors_for_interintra(
xd, xd->plane[plane].dst.buf, xd->plane[plane].dst.stride,
uintrapredictor, bw,
xd->mi[0]->mbmi.interintra_uv_mode, bsize, 1);
combine_interintra(xd->mi[0]->mbmi.interintra_uv_mode,
uvbsize,
xd->plane[plane].dst.buf,
xd->plane[plane].dst.stride,
upred, ustride, uintrapredictor, bw);
}
}
void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd,
uint8_t *upred,
uint8_t *vpred,
int ustride, int vstride,
BLOCK_SIZE bsize) {
vp10_build_interintra_predictors_sbc(xd, upred, ustride, 1, bsize);
vp10_build_interintra_predictors_sbc(xd, vpred, vstride, 2, bsize);
}
void vp10_build_interintra_predictors(MACROBLOCKD *xd,
uint8_t *ypred,
uint8_t *upred,
uint8_t *vpred,
int ystride, int ustride, int vstride,
BLOCK_SIZE bsize) {
vp10_build_interintra_predictors_sby(xd, ypred, ystride, bsize);
vp10_build_interintra_predictors_sbuv(xd, upred, vpred,
ustride, vstride, bsize);
}
#endif // CONFIG_EXT_INTER

Просмотреть файл

@ -369,6 +369,31 @@ void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
int tmp_stride2[MAX_MB_PLANE]);
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
void vp10_build_interintra_predictors(MACROBLOCKD *xd,
uint8_t *ypred,
uint8_t *upred,
uint8_t *vpred,
int ystride,
int ustride,
int vstride,
BLOCK_SIZE bsize);
void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd,
uint8_t *ypred,
int ystride,
BLOCK_SIZE bsize);
void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd,
uint8_t *upred,
int ustride,
int plane,
BLOCK_SIZE bsize);
void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd,
uint8_t *upred,
uint8_t *vpred,
int ustride, int vstride,
BLOCK_SIZE bsize);
#endif // CONFIG_EXT_INTER
#ifdef __cplusplus
} // extern "C"
#endif

Просмотреть файл

@ -403,6 +403,10 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
for (j = 0; j < INTER_COMPOUND_MODES; j++)
cm->counts.inter_compound_mode[i][j] +=
counts->inter_compound_mode[i][j];
for (i = 0; i < BLOCK_SIZES; i++)
for (j = 0; j < 2; j++)
cm->counts.interintra[i][j] += counts->interintra[i][j];
#endif // CONFIG_EXT_INTER
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)

Просмотреть файл

@ -871,6 +871,17 @@ static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
}
}
}
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
vp10_build_interintra_predictors(xd,
xd->plane[0].dst.buf,
xd->plane[1].dst.buf,
xd->plane[2].dst.buf,
xd->plane[0].dst.stride,
xd->plane[1].dst.stride,
xd->plane[2].dst.stride,
sb_type);
#endif // CONFIG_EXT_INTER
}
#if CONFIG_SUPERTX
@ -915,6 +926,17 @@ static void dec_build_inter_predictors_sb_sub8x8(VP10Decoder *const pbi,
&mv, ref_frame_buf, is_scaled, ref);
}
}
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
vp10_build_interintra_predictors(xd,
xd->plane[0].dst.buf,
xd->plane[1].dst.buf,
xd->plane[2].dst.buf,
xd->plane[0].dst.stride,
xd->plane[1].dst.stride,
xd->plane[2].dst.stride,
xd->mi[0]->mbmi.sb_type);
#endif // CONFIG_EXT_INTER
}
#endif // CONFIG_SUPERTX
@ -3569,6 +3591,14 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
#if CONFIG_EXT_INTER
read_inter_compound_mode_probs(fc, &r);
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed_bsize(i)) {
vp10_diff_update_prob(&r, &fc->interintra_prob[i]);
}
}
}
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
@ -3634,6 +3664,8 @@ static void debug_check_frame_counts(const VP10_COMMON *const cm) {
assert(!memcmp(cm->counts.inter_compound_mode,
zero_counts.inter_compound_mode,
sizeof(cm->counts.inter_compound_mode)));
assert(!memcmp(cm->counts.interintra, zero_counts.interintra,
sizeof(cm->counts.interintra)));
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
assert(!memcmp(cm->counts.obmc, zero_counts.obmc,

Просмотреть файл

@ -1133,7 +1133,7 @@ static void fpm_sync(void *const data, int mi_row) {
static void read_inter_block_mode_info(VP10Decoder *const pbi,
MACROBLOCKD *const xd,
MODE_INFO *const mi,
#if CONFIG_OBMC && CONFIG_SUPERTX
#if (CONFIG_OBMC || CONFIG_EXT_INTER) && CONFIG_SUPERTX
int mi_row, int mi_col, vpx_reader *r,
int supertx_enabled) {
#else
@ -1430,6 +1430,37 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
#endif // CONFIG_EXT_INTER
nearestmv, nearmv, is_compound, allow_hp, r);
}
#if CONFIG_EXT_INTER
if (cm->reference_mode != COMPOUND_REFERENCE &&
#if CONFIG_SUPERTX
!supertx_enabled &&
#endif
is_interintra_allowed(mbmi)) {
const int interintra = vpx_read(r, cm->fc->interintra_prob[bsize]);
if (xd->counts)
xd->counts->interintra[bsize][interintra]++;
assert(mbmi->ref_frame[1] == NONE);
if (interintra) {
const PREDICTION_MODE interintra_mode =
read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]);
mbmi->ref_frame[1] = INTRA_FRAME;
mbmi->interintra_mode = interintra_mode;
mbmi->interintra_uv_mode = interintra_mode;
#if CONFIG_EXT_INTRA
// TODO(debargha|geza.lore):
// Should we use ext_intra modes for interintra?
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
mbmi->angle_delta[0] = 0;
mbmi->angle_delta[1] = 0;
mbmi->intra_filter = INTRA_FILTER_LINEAR;
#endif // CONFIG_EXT_INTRA
}
}
#endif // CONFIG_EXT_INTER
#if CONFIG_EXT_INTERP
mbmi->interp_filter = (cm->interp_filter == SWITCHABLE)
? read_switchable_interp_filter(cm, xd, r)
@ -1514,7 +1545,8 @@ static void read_inter_frame_mode_info(VP10Decoder *const pbi,
if (inter_block)
read_inter_block_mode_info(pbi, xd,
#if CONFIG_OBMC && CONFIG_SUPERTX
#if (CONFIG_OBMC || CONFIG_EXT_INTER) && CONFIG_SUPERTX
mi, mi_row, mi_col, r, supertx_enabled);
#else
mi, mi_row, mi_col, r);

Просмотреть файл

@ -1028,7 +1028,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
#endif // CONFIG_EXT_INTER
write_inter_mode(cm, w, mode,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
has_second_ref(mbmi),
is_compound,
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
mode_ctx);
@ -1163,6 +1163,22 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
}
}
#if CONFIG_EXT_INTER
if (cpi->common.reference_mode != COMPOUND_REFERENCE &&
#if CONFIG_SUPERTX
!supertx_enabled &&
#endif // CONFIG_SUPERTX
is_interintra_allowed(mbmi)) {
const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
vpx_write(w, interintra, cm->fc->interintra_prob[bsize]);
if (interintra) {
write_intra_mode(w, mbmi->interintra_mode,
cm->fc->y_mode_prob[size_group_lookup[bsize]]);
assert(mbmi->interintra_mode == mbmi->interintra_uv_mode);
}
}
#endif // CONFIG_EXT_INTER
#if CONFIG_EXT_INTERP
write_switchable_interp_filter(cpi, xd, w);
#endif // CONFIG_EXT_INTERP
@ -2444,6 +2460,16 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
#if CONFIG_EXT_INTER
update_inter_compound_mode_probs(cm, &header_bc);
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed_bsize(i)) {
vp10_cond_prob_diff_update(&header_bc,
&fc->interintra_prob[i],
cm->counts.interintra[i]);
}
}
}
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC

Просмотреть файл

@ -1810,6 +1810,22 @@ static void update_stats(VP10_COMMON *cm, ThreadData *td
#endif // CONFIG_OBMC
}
}
#if CONFIG_EXT_INTER
if (cm->reference_mode != COMPOUND_REFERENCE &&
#if CONFIG_SUPERTX
!supertx_enabled &&
#endif
is_interintra_allowed(mbmi)) {
if (mbmi->ref_frame[1] == INTRA_FRAME) {
counts->y_mode[size_group_lookup[bsize]][mbmi->interintra_mode]++;
counts->interintra[bsize][1]++;
} else {
counts->interintra[bsize][0]++;
}
}
#endif // CONFIG_EXT_INTER
if (inter_block &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
int16_t mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
@ -4573,7 +4589,13 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
#if CONFIG_SUPERTX
static int check_intra_b(PICK_MODE_CONTEXT *ctx) {
return !is_inter_mode((&ctx->mic)->mbmi.mode);
if (!is_inter_mode((&ctx->mic)->mbmi.mode))
return 1;
#if CONFIG_EXT_INTER
if (ctx->mic.mbmi.ref_frame[1] == INTRA_FRAME)
return 1;
#endif // CONFIG_EXT_INTER
return 0;
}
static int check_intra_sb(VP10_COMP *cpi, const TileInfo *const tile,

Просмотреть файл

@ -73,7 +73,11 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
else
#endif // CONFIG_EXT_INTER
xd->mi[0]->mbmi.mode = NEWMV;
xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv;
#if CONFIG_EXT_INTER
xd->mi[0]->mbmi.ref_frame[1] = NONE;
#endif // CONFIG_EXT_INTER
vp10_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);

Просмотреть файл

@ -859,6 +859,44 @@ void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
rd->thresh_mult[THR_D153_PRED] += 2500;
rd->thresh_mult[THR_D207_PRED] += 2500;
rd->thresh_mult[THR_D63_PRED] += 2500;
#if CONFIG_EXT_INTER
rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL ] += 1500;
#if CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL2 ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL3 ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL4 ] += 1500;
#endif // CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_ZEROG ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_ZEROA ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL] += 1500;
#if CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL2] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL3] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL4] += 1500;
#endif // CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTG] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTA] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARL ] += 1500;
#if CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_NEARL2 ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARL3 ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARL4 ] += 1500;
#endif // CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_NEARG ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARA ] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEWL ] += 2000;
#if CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_NEWL2 ] += 2000;
rd->thresh_mult[THR_COMP_INTERINTRA_NEWL3 ] += 2000;
rd->thresh_mult[THR_COMP_INTERINTRA_NEWL4 ] += 2000;
#endif // CONFIG_EXT_REFS
rd->thresh_mult[THR_COMP_INTERINTRA_NEWG ] += 2000;
rd->thresh_mult[THR_COMP_INTERINTRA_NEWA ] += 2000;
#endif // CONFIG_EXT_INTER
}
void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {

Просмотреть файл

@ -42,13 +42,13 @@ extern "C" {
#if CONFIG_EXT_REFS
#if CONFIG_EXT_INTER
#define MAX_MODES 85
#define MAX_MODES 109
#else
#define MAX_MODES 54
#endif // CONFIG_EXT_INTER
#else
#if CONFIG_EXT_INTER
#define MAX_MODES 43
#define MAX_MODES 55
#else
#define MAX_MODES 30
#endif // CONFIG_EXT_INTER
@ -212,6 +212,40 @@ typedef enum {
THR_D63_PRED,
THR_D117_PRED,
THR_D45_PRED,
#if CONFIG_EXT_INTER
THR_COMP_INTERINTRA_ZEROL,
THR_COMP_INTERINTRA_NEARESTL,
THR_COMP_INTERINTRA_NEARL,
THR_COMP_INTERINTRA_NEWL,
#if CONFIG_EXT_REFS
THR_COMP_INTERINTRA_ZEROL2,
THR_COMP_INTERINTRA_NEARESTL2,
THR_COMP_INTERINTRA_NEARL2,
THR_COMP_INTERINTRA_NEWL2,
THR_COMP_INTERINTRA_ZEROL3,
THR_COMP_INTERINTRA_NEARESTL3,
THR_COMP_INTERINTRA_NEARL3,
THR_COMP_INTERINTRA_NEWL3,
THR_COMP_INTERINTRA_ZEROL4,
THR_COMP_INTERINTRA_NEARESTL4,
THR_COMP_INTERINTRA_NEARL4,
THR_COMP_INTERINTRA_NEWL4,
#endif // CONFIG_EXT_REFS
THR_COMP_INTERINTRA_ZEROG,
THR_COMP_INTERINTRA_NEARESTG,
THR_COMP_INTERINTRA_NEARG,
THR_COMP_INTERINTRA_NEWG,
THR_COMP_INTERINTRA_ZEROA,
THR_COMP_INTERINTRA_NEARESTA,
THR_COMP_INTERINTRA_NEARA,
THR_COMP_INTERINTRA_NEWA,
#endif // CONFIG_EXT_INTER
} THR_MODES;
typedef enum {

Просмотреть файл

@ -44,6 +44,10 @@
#include "vp10/encoder/rdopt.h"
#include "vp10/encoder/aq_variance.h"
// TODO(geza.lore) Update this when the extended coding unit size experiment
// have been ported.
#define CU_SIZE 64
#if CONFIG_EXT_REFS
#define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
@ -260,6 +264,40 @@ static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
{D63_PRED, {INTRA_FRAME, NONE}},
{D117_PRED, {INTRA_FRAME, NONE}},
{D45_PRED, {INTRA_FRAME, NONE}},
#if CONFIG_EXT_INTER
{ZEROMV, {LAST_FRAME, INTRA_FRAME}},
{NEARESTMV, {LAST_FRAME, INTRA_FRAME}},
{NEARMV, {LAST_FRAME, INTRA_FRAME}},
{NEWMV, {LAST_FRAME, INTRA_FRAME}},
#if CONFIG_EXT_REFS
{ZEROMV, {LAST2_FRAME, INTRA_FRAME}},
{NEARESTMV, {LAST2_FRAME, INTRA_FRAME}},
{NEARMV, {LAST2_FRAME, INTRA_FRAME}},
{NEWMV, {LAST2_FRAME, INTRA_FRAME}},
{ZEROMV, {LAST3_FRAME, INTRA_FRAME}},
{NEARESTMV, {LAST3_FRAME, INTRA_FRAME}},
{NEARMV, {LAST3_FRAME, INTRA_FRAME}},
{NEWMV, {LAST3_FRAME, INTRA_FRAME}},
{ZEROMV, {LAST4_FRAME, INTRA_FRAME}},
{NEARESTMV, {LAST4_FRAME, INTRA_FRAME}},
{NEARMV, {LAST4_FRAME, INTRA_FRAME}},
{NEWMV, {LAST4_FRAME, INTRA_FRAME}},
#endif // CONFIG_EXT_REFS
{ZEROMV, {GOLDEN_FRAME, INTRA_FRAME}},
{NEARESTMV, {GOLDEN_FRAME, INTRA_FRAME}},
{NEARMV, {GOLDEN_FRAME, INTRA_FRAME}},
{NEWMV, {GOLDEN_FRAME, INTRA_FRAME}},
{ZEROMV, {ALTREF_FRAME, INTRA_FRAME}},
{NEARESTMV, {ALTREF_FRAME, INTRA_FRAME}},
{NEARMV, {ALTREF_FRAME, INTRA_FRAME}},
{NEWMV, {ALTREF_FRAME, INTRA_FRAME}},
#endif // CONFIG_EXT_INTER
};
static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
@ -1056,33 +1094,6 @@ static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
rate, distortion, skip,
sse, ref_best_rd, 0, bs,
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
#if CONFIG_EXT_TX
if (get_ext_tx_types(mbmi->tx_size, bs, is_inter) > 1 &&
!xd->lossless[mbmi->segment_id] && *rate != INT_MAX) {
int ext_tx_set = get_ext_tx_set(mbmi->tx_size, bs, is_inter);
if (is_inter) {
if (ext_tx_set > 0)
*rate += cpi->inter_tx_type_costs[ext_tx_set][mbmi->tx_size]
[mbmi->tx_type];
} else {
if (ext_tx_set > 0 && ALLOW_INTRA_EXT_TX)
*rate +=
cpi->intra_tx_type_costs[ext_tx_set][mbmi->tx_size]
[mbmi->mode][mbmi->tx_type];
}
}
#else
if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id] &&
*rate != INT_MAX) {
if (is_inter)
*rate += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
else
*rate += cpi->intra_tx_type_costs[mbmi->tx_size]
[intra_mode_to_tx_type_context[mbmi->mode]]
[mbmi->tx_type];
}
#endif // CONFIG_EXT_TX
}
static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
@ -3818,9 +3829,14 @@ static int check_best_zero_mv(
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
const MV_REFERENCE_FRAME ref_frames[2],
const BLOCK_SIZE bsize, int block) {
#if !CONFIG_EXT_INTER
assert(ref_frames[1] != INTRA_FRAME); // Just sanity check
#endif
if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
(ref_frames[1] == NONE ||
(ref_frames[1] <= INTRA_FRAME ||
frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
#if CONFIG_REF_MV
int16_t rfc = vp10_mode_context_analyzer(mode_context,
@ -3849,7 +3865,7 @@ static int check_best_zero_mv(
if (c2 > c3) return 0;
} else {
assert(this_mode == ZEROMV);
if (ref_frames[1] == NONE) {
if (ref_frames[1] <= INTRA_FRAME) {
if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
(c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
return 0;
@ -3889,15 +3905,6 @@ static int check_best_zero_mv(
if (c4 > c3) return 0;
} else {
assert(this_mode == ZERO_ZEROMV);
if (ref_frames[1] == NONE) {
if ((c3 >= c2 &&
frame_mv[NEAREST_NEARESTMV][ref_frames[0]].as_int == 0) ||
(c3 >= c1 &&
frame_mv[NEAREST_NEARMV][ref_frames[0]].as_int == 0) ||
(c3 >= c4 &&
frame_mv[NEAR_NEARESTMV][ref_frames[0]].as_int == 0))
return 0;
} else {
if ((c3 >= c2 &&
frame_mv[NEAREST_NEARESTMV][ref_frames[0]].as_int == 0 &&
frame_mv[NEAREST_NEARESTMV][ref_frames[1]].as_int == 0) ||
@ -3910,7 +3917,6 @@ static int check_best_zero_mv(
return 0;
}
}
}
#endif // CONFIG_EXT_INTER
return 1;
}
@ -5160,6 +5166,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
int_mv single_newmvs[2][MAX_REF_FRAMES],
int single_newmvs_rate[2][MAX_REF_FRAMES],
int *compmode_interintra_cost,
#else
int_mv single_newmv[MAX_REF_FRAMES],
#endif // CONFIG_EXT_INTER
@ -5183,6 +5191,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
#if CONFIG_EXT_INTER
int mv_idx = (this_mode == NEWFROMNEARMV) ? 1 : 0;
int_mv single_newmv[MAX_REF_FRAMES];
const int * const intra_mode_cost =
cpi->mbmode_cost[size_group_lookup[bsize]];
const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
#if CONFIG_REF_MV
uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
#endif
@ -5193,6 +5204,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_EXT_INTER
const int tmp_buf_sz = CU_SIZE * CU_SIZE;
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
int allow_obmc = is_obmc_allowed(mbmi);
int best_obmc_flag = 0;
@ -5243,6 +5257,15 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
int64_t distortion_y = 0, distortion_uv = 0;
int16_t mode_ctx = mbmi_ext->mode_context[refs[0]];
#if CONFIG_EXT_INTER
*compmode_interintra_cost = 0;
// is_comp_interintra_pred implies !is_comp_pred
assert(!is_comp_interintra_pred || (!is_comp_pred));
// is_comp_interintra_pred implies is_interintra_allowed(mbmi->sb_type)
assert(!is_comp_interintra_pred || is_interintra_allowed(mbmi));
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
tmp_rd = 0;
#endif // CONFIG_OBMC
@ -5345,36 +5368,40 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
#endif // CONFIG_EXT_INTER
*rate2 += rate_mv;
} else {
int_mv tmp_mv;
single_motion_search(cpi, x, bsize, mi_row, mi_col,
#if CONFIG_EXT_INTER
0, mv_idx,
if (is_comp_interintra_pred) {
tmp_mv = single_newmvs[mv_idx][refs[0]];
rate_mv = single_newmvs_rate[mv_idx][refs[0]];
} else {
single_motion_search(cpi, x, bsize, mi_row, mi_col,
0, mv_idx, &tmp_mv, &rate_mv);
single_newmvs[mv_idx][refs[0]] = tmp_mv;
single_newmvs_rate[mv_idx][refs[0]] = rate_mv;
}
#else
single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
single_newmv[refs[0]] = tmp_mv;
#endif // CONFIG_EXT_INTER
&tmp_mv, &rate_mv);
if (tmp_mv.as_int == INVALID_MV)
return INT64_MAX;
frame_mv[refs[0]].as_int =
xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
#if CONFIG_EXT_INTER
single_newmvs[mv_idx][refs[0]].as_int = tmp_mv.as_int;
#else
single_newmv[refs[0]].as_int = tmp_mv.as_int;
#endif // CONFIG_EXT_INTER
frame_mv[refs[0]] = tmp_mv;
xd->mi[0]->bmi[0].as_mv[0] = tmp_mv;
// Estimate the rate implications of a new mv but discount this
// under certain circumstances where we want to help initiate a weak
// motion field, where the distortion gain for a single block may not
// be enough to overcome the cost of a new mv.
if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
*rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
} else {
rate_mv = VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
}
}
*rate2 += rate_mv;
}
}
}
for (i = 0; i < is_comp_pred + 1; ++i) {
cur_mv[i] = frame_mv[refs[i]];
@ -5586,6 +5613,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
if ((cm->interp_filter == SWITCHABLE &&
(!i || best_needs_copy)) ||
#if CONFIG_EXT_INTER
is_comp_interintra_pred ||
#endif // CONFIG_EXT_INTER
(cm->interp_filter != SWITCHABLE &&
(cm->interp_filter == mbmi->interp_filter ||
(i == 0 && intpel_mv && IsInterpolatingFilter(i))))) {
@ -5714,6 +5744,63 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
mbmi->interp_filter = cm->interp_filter != SWITCHABLE ?
cm->interp_filter : best_filter;
rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
#if CONFIG_EXT_INTER
if (is_comp_interintra_pred) {
PREDICTION_MODE interintra_mode, best_interintra_mode = DC_PRED;
int64_t best_interintra_rd = INT64_MAX;
int rmode, rate_sum;
int64_t dist_sum;
int j;
mbmi->ref_frame[1] = NONE;
for (j = 0; j < MAX_MB_PLANE; j++) {
xd->plane[j].dst.buf = tmp_buf + j * tmp_buf_sz;
xd->plane[j].dst.stride = CU_SIZE;
}
vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
restore_dst_buf(xd, orig_dst, orig_dst_stride);
mbmi->ref_frame[1] = INTRA_FRAME;
for (interintra_mode = DC_PRED; interintra_mode <= TM_PRED;
++interintra_mode) {
mbmi->interintra_mode = interintra_mode;
mbmi->interintra_uv_mode = interintra_mode;
rmode = intra_mode_cost[mbmi->interintra_mode];
vp10_build_interintra_predictors(xd,
tmp_buf,
tmp_buf + tmp_buf_sz,
tmp_buf + 2 * tmp_buf_sz,
CU_SIZE,
CU_SIZE,
CU_SIZE,
bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
&skip_txfm_sb, &skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rmode + rate_sum, dist_sum);
if (rd < best_interintra_rd) {
best_interintra_rd = rd;
best_interintra_mode = interintra_mode;
}
}
mbmi->interintra_mode = best_interintra_mode;
mbmi->interintra_uv_mode = best_interintra_mode;
if (ref_best_rd < INT64_MAX &&
best_interintra_rd / 2 > ref_best_rd) {
return INT64_MAX;
}
pred_exists = 0;
tmp_rd = best_interintra_rd;
*compmode_interintra_cost =
vp10_cost_bit(cm->fc->interintra_prob[bsize], 1);
*compmode_interintra_cost += intra_mode_cost[mbmi->interintra_mode];
} else if (is_interintra_allowed(mbmi)) {
*compmode_interintra_cost =
vp10_cost_bit(cm->fc->interintra_prob[bsize], 0);
}
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
if (allow_obmc)
mbmi->obmc = best_obmc_flag;
@ -6303,6 +6390,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
struct buf_2d yv12_mb[MAX_REF_FRAMES][MAX_MB_PLANE];
#if CONFIG_EXT_INTER
int_mv single_newmvs[2][MAX_REF_FRAMES] = { { { 0 } }, { { 0 } } };
int single_newmvs_rate[2][MAX_REF_FRAMES] = { { 0 }, { 0 } };
#else
int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
#endif // CONFIG_EXT_INTER
@ -6325,9 +6413,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
MB_MODE_INFO best_mbmode;
#if CONFIG_REF_MV
uint8_t best_ref_mv_idx[MODE_CTX_REF_FRAMES] = { 0 };
#endif
int best_mode_skippable = 0;
int midx, best_mode_index = -1;
unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
@ -6594,6 +6679,9 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
int64_t this_rd = INT64_MAX;
int disable_skip = 0;
int compmode_cost = 0;
#if CONFIG_EXT_INTER
int compmode_interintra_cost = 0;
#endif // CONFIG_EXT_INTER
int rate2 = 0, rate_y = 0, rate_uv = 0;
int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
int skippable = 0;
@ -6609,6 +6697,14 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
#if CONFIG_EXT_INTER
if (ref_frame > INTRA_FRAME && second_ref_frame == INTRA_FRAME) {
// Mode must by compatible
assert(is_interintra_allowed_mode(this_mode));
if (!is_interintra_allowed_bsize(bsize))
continue;
}
if (this_mode == NEAREST_NEARESTMV) {
frame_mv[NEAREST_NEARESTMV][ref_frame].as_int =
frame_mv[NEARESTMV][ref_frame].as_int;
@ -6792,6 +6888,11 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
}
#if CONFIG_EXT_INTER
mbmi->interintra_mode = (PREDICTION_MODE)(DC_PRED - 1);
mbmi->interintra_uv_mode = (PREDICTION_MODE)(DC_PRED - 1);
#endif // CONFIG_EXT_INTER
if (ref_frame == INTRA_FRAME) {
TX_SIZE uv_tx;
struct macroblockd_plane *const pd = &xd->plane[1];
@ -6936,6 +7037,21 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
} else {
#if CONFIG_EXT_INTER
if (second_ref_frame == INTRA_FRAME) {
mbmi->interintra_mode = best_intra_mode;
mbmi->interintra_uv_mode = best_intra_mode;
#if CONFIG_EXT_INTRA
// TODO(debargha|geza.lore):
// Should we use ext_intra modes for interintra?
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
mbmi->angle_delta[0] = 0;
mbmi->angle_delta[1] = 0;
mbmi->intra_filter = INTRA_FILTER_LINEAR;
#endif // CONFIG_EXT_INTRA
}
#endif // CONFIG_EXT_INTER
#if CONFIG_REF_MV
mbmi->ref_mv_idx = 0;
ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
@ -6951,11 +7067,14 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
single_newmvs,
single_newmvs_rate,
&compmode_interintra_cost,
#else
single_newmv,
#endif // CONFIG_EXT_INTER
single_inter_filter,
single_skippable, &total_sse, best_rd,
single_skippable,
&total_sse, best_rd,
&mask_filter, filter_cache);
#if CONFIG_REF_MV
@ -7008,6 +7127,9 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
#if CONFIG_EXT_INTER
int_mv dummy_single_newmvs[2][MAX_REF_FRAMES] =
{ { { 0 } }, { { 0 } } };
int dummy_single_newmvs_rate[2][MAX_REF_FRAMES] =
{ { 0 }, { 0 } };
int dummy_compmode_interintra_cost = 0;
#else
int_mv dummy_single_newmv[MAX_REF_FRAMES] = { { 0 } };
#endif
@ -7027,6 +7149,8 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
dummy_single_newmvs,
dummy_single_newmvs_rate,
&dummy_compmode_interintra_cost,
#else
dummy_single_newmv,
#endif
@ -7071,8 +7195,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
rate_uv = tmp_rate_uv;
total_sse = tmp_sse;
this_rd = tmp_alt_rd;
// Indicator of the effective nearmv reference motion vector.
best_ref_mv_idx[ref_frame_type] = 1 + ref_idx;
tmp_ref_rd = tmp_alt_rd;
backup_mbmi = *mbmi;
} else {
@ -7082,7 +7204,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
frame_mv[NEARMV][ref_frame] = backup_mv;
}
#endif
#endif // CONFIG_REF_MV
if (this_rd == INT64_MAX)
continue;
@ -7093,6 +7215,10 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
rate2 += compmode_cost;
}
#if CONFIG_EXT_INTER
rate2 += compmode_interintra_cost;
#endif // CONFIG_EXT_INTER
// Estimate the reference frame signaling cost and add it
// to the rolling cost variable.
if (comp_pred) {
@ -7308,12 +7434,11 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
best_mbmode.ref_frame[1]};
int comp_pred_mode = refs[1] > INTRA_FRAME;
#if CONFIG_REF_MV
const uint8_t rf_type = vp10_ref_frame_type(best_mbmode.ref_frame);
if (!comp_pred_mode) {
if (best_ref_mv_idx[best_mbmode.ref_frame[0]] > 0 &&
best_mbmode.ref_frame[1] == NONE) {
int idx = best_ref_mv_idx[best_mbmode.ref_frame[0]] + 1;
int_mv cur_mv =
mbmi_ext->ref_mv_stack[best_mbmode.ref_frame[0]][idx].this_mv;
if (best_mbmode.ref_mv_idx > 0 && refs[1] == NONE) {
int idx = best_mbmode.ref_mv_idx + 1;
int_mv cur_mv = mbmi_ext->ref_mv_stack[refs[0]][idx].this_mv;
lower_mv_precision(&cur_mv.as_mv, cm->allow_high_precision_mv);
frame_mv[NEARMV][refs[0]] = cur_mv;
}
@ -7325,7 +7450,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
else if (best_mbmode.mv[0].as_int == 0)
best_mbmode.mode = ZEROMV;
} else {
uint8_t rf_type = vp10_ref_frame_type(best_mbmode.ref_frame);
int i;
const int allow_hp = cm->allow_high_precision_mv;
int_mv nearestmv[2] = { frame_mv[NEARESTMV][refs[0]],
@ -7340,7 +7464,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
}
if (mbmi_ext->ref_mv_count[rf_type] > 1) {
int ref_mv_idx = best_ref_mv_idx[rf_type] + 1;
int ref_mv_idx = best_mbmode.ref_mv_idx + 1;
nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][ref_mv_idx].this_mv;
nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][ref_mv_idx].comp_mv;
}
@ -7417,10 +7541,11 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
if (best_mbmode.ref_frame[0] > INTRA_FRAME &&
best_mbmode.mv[0].as_int == 0 &&
#if CONFIG_EXT_INTER
best_mbmode.ref_frame[1] == NONE) {
(best_mbmode.ref_frame[1] <= INTRA_FRAME)
#else
(best_mbmode.ref_frame[1] == NONE || best_mbmode.mv[1].as_int == 0)) {
(best_mbmode.ref_frame[1] == NONE || best_mbmode.mv[1].as_int == 0)
#endif // CONFIG_EXT_INTER
) {
int16_t mode_ctx = mbmi_ext->mode_context[best_mbmode.ref_frame[0]];
#if !CONFIG_EXT_INTER
if (best_mbmode.ref_frame[1] > NONE)
@ -7430,11 +7555,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET))
best_mbmode.mode = ZEROMV;
}
if (best_mbmode.mode == NEARMV) {
uint8_t ref_frame_type = vp10_ref_frame_type(best_mbmode.ref_frame);
best_mbmode.ref_mv_idx = best_ref_mv_idx[ref_frame_type];
}
#endif
if (best_mode_index < 0 || best_rd >= best_rd_so_far) {

Просмотреть файл

@ -168,7 +168,6 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
if (speed >= 2) {
sf->tx_size_search_method = frame_is_boosted(cpi) ? USE_FULL_RD
: USE_LARGESTALL;
sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
FLAG_SKIP_INTRA_DIRMISMATCH |
FLAG_SKIP_INTRA_BESTINTER |