Enable recursive partition down to 4x4

This commit allows the rate-distortion optimization recursion
at encoder to go down to 4x4 block size. It deprecates the use
of I4X4_PRED and SPLITMV syntax elements from bit-stream
writing/reading. Will remove the unused probability models in
the next patch.

The partition type search and bit-stream are now capable of
supporting the rectangular partition of 8x8 block, i.e., 8x4
and 4x8. Need to revise the rate-distortion parts to get these
two partition tested in the rd loop.

Change-Id: I0dfe3b90a1507ad6138db10cc58e6e237a06a9d6
This commit is contained in:
Jingning Han 2013-05-10 17:06:37 -07:00
Родитель dee12bdf8f
Коммит 1f26840fbf
10 изменённых файлов: 350 добавлений и 42 удалений

Просмотреть файл

@ -222,12 +222,21 @@ static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) {
static INLINE int mi_width_log2(BLOCK_SIZE_TYPE sb_type) {
int a = b_width_log2(sb_type) - 1;
#if CONFIG_AB4X4
// align 4x4 block to mode_info
if (a < 0)
a = 0;
#endif
assert(a >= 0);
return a;
}
static INLINE int mi_height_log2(BLOCK_SIZE_TYPE sb_type) {
int a = b_height_log2(sb_type) - 1;
#if CONFIG_AB4X4
if (a < 0)
a = 0;
#endif
assert(a >= 0);
return a;
}
@ -442,9 +451,12 @@ static INLINE void update_partition_context(MACROBLOCKD *xd,
int bhl = mi_height_log2(sb_type);
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
int i;
// skip macroblock partition
#if !CONFIG_AB4X4
// skip 8x8 block partition
if (bsl == 0)
return;
#endif
// update the partition context at the end notes. set partition bits
// of block sizes larger than the current one to be one, and partition
@ -492,7 +504,11 @@ static INLINE int partition_plane_context(MACROBLOCKD *xd,
above = (above > 0);
left = (left > 0);
#if CONFIG_AB4X4
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
#else
return (left * 2 + above) + (bsl - 1) * PARTITION_PLOFFSET;
#endif
}
static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
@ -509,6 +525,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_SB32X16;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB16X8;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_SB8X4;
#endif
else
assert(0);
break;
@ -519,6 +539,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_SB16X32;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB8X16;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_SB4X8;
#endif
else
assert(0);
break;
@ -529,6 +553,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_MB16X16;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB8X8;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_AB4X4;
#endif
else
assert(0);
break;

Просмотреть файл

@ -106,6 +106,12 @@ const vp9_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP9_SUBMVREFS - 1] = {
const vp9_prob vp9_partition_probs[NUM_PARTITION_CONTEXTS]
[PARTITION_TYPES - 1] = {
// FIXME(jingning,rbultje) put real probabilities here
#if CONFIG_AB4X4
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
{104, 90, 134},
#endif
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
@ -513,6 +519,7 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
vp9_sub_mv_ref_tree, fc->sub_mv_ref_counts[i],
fc->pre_sub_mv_ref_prob[i], fc->sub_mv_ref_prob[i],
LEFT4X4);
for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
update_mode_probs(PARTITION_TYPES, vp9_partition_tree,
fc->partition_counts[i], fc->pre_partition_prob[i],

Просмотреть файл

@ -48,6 +48,10 @@ typedef enum PARTITION_TYPE {
} PARTITION_TYPE;
#define PARTITION_PLOFFSET 4 // number of probability models per block size
#if CONFIG_AB4X4
#define NUM_PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
#else
#define NUM_PARTITION_CONTEXTS (3 * PARTITION_PLOFFSET)
#endif
#endif // VP9_COMMON_VP9_ENUMS_H_

Просмотреть файл

@ -119,13 +119,25 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.mb_skip_coeff = vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
// luma mode
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
m->mbmi.mode = read_kf_sb_ymode(r,
cm->sb_kf_ymode_prob[cm->kf_ymode_probs_index]);
else
m->mbmi.mode = I4X4_PRED;
#else
m->mbmi.mode = m->mbmi.sb_type > BLOCK_SIZE_SB8X8 ?
read_kf_sb_ymode(r, cm->sb_kf_ymode_prob[cm->kf_ymode_probs_index]):
read_kf_mb_ymode(r, cm->kf_ymode_prob[cm->kf_ymode_probs_index]);
#endif
m->mbmi.ref_frame = INTRA_FRAME;
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (m->mbmi.mode == I4X4_PRED) {
#endif
int i;
for (i = 0; i < 4; ++i) {
const B_PREDICTION_MODE a = above_block_mode(m, i, mis);
@ -139,7 +151,13 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.uv_mode = read_uv_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
if (cm->txfm_mode == TX_MODE_SELECT &&
!m->mbmi.mb_skip_coeff && m->mbmi.mode != I4X4_PRED) {
!m->mbmi.mb_skip_coeff &&
#if CONFIG_AB4X4
m->mbmi.sb_type >= BLOCK_SIZE_SB8X8
#else
m->mbmi.mode != I4X4_PRED
#endif
) {
const int allow_16x16 = m->mbmi.sb_type >= BLOCK_SIZE_MB16X16;
const int allow_32x32 = m->mbmi.sb_type >= BLOCK_SIZE_SB32X32;
m->mbmi.txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
@ -150,7 +168,13 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 &&
m->mbmi.mode <= TM_PRED) {
m->mbmi.txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != I4X4_PRED) {
} else if (cm->txfm_mode >= ALLOW_8X8 &&
#if CONFIG_AB4X4
m->mbmi.sb_type >= BLOCK_SIZE_SB8X8
#else
m->mbmi.mode != I4X4_PRED
#endif
) {
m->mbmi.txfm_size = TX_8X8;
} else {
m->mbmi.txfm_size = TX_4X4;
@ -618,9 +642,16 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
} else {
#if CONFIG_AB4X4
if (mbmi->sb_type >= BLOCK_SIZE_SB8X8)
mbmi->mode = read_sb_mv_ref(r, mv_ref_p);
else
mbmi->mode = SPLITMV;
#else
mbmi->mode = mbmi->sb_type > BLOCK_SIZE_SB8X8 ?
read_sb_mv_ref(r, mv_ref_p)
: read_mv_ref(r, mv_ref_p);
#endif
vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref_frame]);
}
@ -820,6 +851,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// required for left and above block mv
mv0->as_int = 0;
#if CONFIG_AB4X4
if (mbmi->sb_type >= BLOCK_SIZE_SB8X8) {
mbmi->mode = read_sb_ymode(r, cm->fc.sb_ymode_prob);
cm->fc.sb_ymode_counts[mbmi->mode]++;
} else {
mbmi->mode = I4X4_PRED;
}
#else
if (mbmi->sb_type > BLOCK_SIZE_SB8X8) {
mbmi->mode = read_sb_ymode(r, cm->fc.sb_ymode_prob);
cm->fc.sb_ymode_counts[mbmi->mode]++;
@ -827,9 +866,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->mode = read_ymode(r, cm->fc.ymode_prob);
cm->fc.ymode_counts[mbmi->mode]++;
}
#endif
// If MB mode is I4X4_PRED read the block modes
#if CONFIG_AB4X4
if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
#else
if (mbmi->mode == I4X4_PRED) {
#endif
int j = 0;
do {
int m = read_bmode(r, cm->fc.bmode_prob);
@ -842,9 +886,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->fc.uv_mode_counts[mbmi->mode][mbmi->uv_mode]++;
}
#if CONFIG_AB4X4
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
mbmi->sb_type >= BLOCK_SIZE_SB8X8) {
#else
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode != I4X4_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
#endif
const int allow_16x16 = mbmi->sb_type >= BLOCK_SIZE_MB16X16;
const int allow_32x32 = mbmi->sb_type >= BLOCK_SIZE_SB32X32;
mbmi->txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
@ -852,13 +901,21 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->txfm_mode >= ALLOW_32X32) {
mbmi->txfm_size = TX_32X32;
} else if (cm->txfm_mode >= ALLOW_16X16 &&
mbmi->sb_type >= BLOCK_SIZE_MB16X16 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->sb_type >= BLOCK_SIZE_MB16X16
#if !CONFIG_AB4X4
&& ((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))
#endif
) {
mbmi->txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 &&
#if CONFIG_AB4X4
(mbmi->sb_type >= BLOCK_SIZE_SB8X8))
#else
(!(mbmi->ref_frame == INTRA_FRAME && mbmi->mode == I4X4_PRED) &&
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV))) {
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV)))
#endif
{
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;

Просмотреть файл

@ -417,10 +417,14 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_decode_mb_mode_mv(pbi, xd, mi_row, mi_col, r);
set_refs(pbi, mi_row, mi_col);
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
#else
if (bsize == BLOCK_SIZE_SB8X8 &&
(xd->mode_info_context->mbmi.mode == SPLITMV ||
xd->mode_info_context->mbmi.mode == I4X4_PRED))
decode_atom(pbi, xd, mi_row, mi_col, r, bsize);
#endif
decode_atom(pbi, xd, mi_row, mi_col, r, BLOCK_SIZE_SB8X8);
else
decode_sb(pbi, xd, mi_row, mi_col, r, bsize);
@ -439,7 +443,17 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
if (mi_row >= pc->mi_rows || mi_col >= pc->mi_cols)
return;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize > BLOCK_SIZE_SB8X8) {
#endif
int pl;
// read the partition information
xd->left_seg_context = pc->left_seg_context + (mi_row & MI_MASK);
@ -451,6 +465,7 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
}
subsize = get_subsize(bsize, partition);
switch (partition) {
case PARTITION_NONE:
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
@ -476,8 +491,13 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
assert(0);
}
// update partition context
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
#endif
set_partition_seg_context(pc, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}

Просмотреть файл

@ -629,12 +629,21 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
active_section = 6;
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
#else
if (m->mbmi.sb_type > BLOCK_SIZE_SB8X8)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
else
write_ymode(bc, mode, pc->fc.ymode_prob);
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (mode == I4X4_PRED) {
#endif
int j = 0;
do {
write_bmode(bc, m->bmi[j].as_mode.first,
@ -654,11 +663,16 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// If segment skip is not enabled code the mode.
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
#if CONFIG_AB4X4
if (mi->sb_type >= BLOCK_SIZE_SB8X8)
write_sb_mv_ref(bc, mode, mv_ref_p);
#else
if (mi->sb_type > BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
} else {
write_mv_ref(bc, mode, mv_ref_p);
}
#endif
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
}
@ -744,11 +758,20 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
}
}
#if CONFIG_AB4X4
if (((rf == INTRA_FRAME && mi->sb_type >= BLOCK_SIZE_SB8X8) ||
(rf != INTRA_FRAME && mi->sb_type >= BLOCK_SIZE_SB8X8)) &&
pc->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id,
SEG_LVL_SKIP)))
#else
if (((rf == INTRA_FRAME && mode != I4X4_PRED) ||
(rf != INTRA_FRAME && mode != SPLITMV)) &&
pc->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id,
SEG_LVL_SKIP))) {
SEG_LVL_SKIP)))
#endif
{
TX_SIZE sz = mi->txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
@ -780,12 +803,21 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_write(bc, skip_coeff, vp9_get_pred_prob(c, xd, PRED_MBSKIP));
}
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
sb_kfwrite_ymode(bc, ym, c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
#else
if (m->mbmi.sb_type > BLOCK_SIZE_SB8X8)
sb_kfwrite_ymode(bc, ym, c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
else
kfwrite_ymode(bc, ym, c->kf_ymode_prob[c->kf_ymode_probs_index]);
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (ym == I4X4_PRED) {
#endif
int i = 0;
do {
const B_PREDICTION_MODE a = above_block_mode(m, i, mis);
@ -803,8 +835,13 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
#else
if (ym != I4X4_PRED && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
#endif
TX_SIZE sz = m->mbmi.txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
@ -876,7 +913,19 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
else
assert(0);
#if CONFIG_AB4X4
if (bsize == BLOCK_SIZE_SB8X8 && m->mbmi.sb_type < BLOCK_SIZE_SB8X8)
partition = PARTITION_SPLIT;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize > BLOCK_SIZE_SB8X8) {
#endif
int pl;
xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
xd->above_seg_context = cm->above_seg_context + mi_col;
@ -915,8 +964,13 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
}
// update partition context
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
#endif
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}

Просмотреть файл

@ -140,7 +140,12 @@ struct macroblock {
// TODO(jingning): Need to refactor the structure arrays that buffers the
// coding mode decisions of each partition type.
PICK_MODE_CONTEXT sb8_context[4][4][4];
#if CONFIG_AB4X4
PICK_MODE_CONTEXT ab4x4_context[4][4][4];
PICK_MODE_CONTEXT sb8x4_context[4][4][4];
PICK_MODE_CONTEXT sb4x8_context[4][4][4];
#endif
PICK_MODE_CONTEXT sb8x8_context[4][4][4];
PICK_MODE_CONTEXT sb8x16_context[4][4][2];
PICK_MODE_CONTEXT sb16x8_context[4][4][2];
PICK_MODE_CONTEXT mb_context[4][4];
@ -153,6 +158,9 @@ struct macroblock {
PICK_MODE_CONTEXT sb64_context;
int partition_cost[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
#if CONFIG_AB4X4
BLOCK_SIZE_TYPE b_partitioning[4][4][4];
#endif
BLOCK_SIZE_TYPE mb_partitioning[4][4];
BLOCK_SIZE_TYPE sb_partitioning[4];
BLOCK_SIZE_TYPE sb64_partitioning;

Просмотреть файл

@ -361,8 +361,8 @@ static void update_state(VP9_COMP *cpi,
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
#endif
assert(mi->mbmi.sb_type == bsize);
assert(mi->mbmi.sb_type == bsize);
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < bh; y++) {
@ -640,6 +640,12 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
set_offsets(cpi, mi_row, mi_col, bsize);
xd->mode_info_context->mbmi.sb_type = bsize;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
@ -718,7 +724,14 @@ static void set_block_index(MACROBLOCKD *xd, int idx,
} else if (bsize >= BLOCK_SIZE_MB16X16) {
xd->mb_index = idx;
} else {
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8)
xd->b_index = idx;
else
xd->ab_index = idx;
#else
xd->b_index = idx;
#endif
}
}
@ -749,7 +762,15 @@ static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x,
case BLOCK_SIZE_SB8X16:
return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X8:
return &x->sb8_context[xd->sb_index][xd->mb_index][xd->b_index];
return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index];
#if CONFIG_AB4X4
case BLOCK_SIZE_SB8X4:
return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB4X8:
return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_AB4X4:
return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index];
#endif
default:
assert(0);
return NULL;
@ -766,6 +787,10 @@ static BLOCK_SIZE_TYPE *get_sb_partitioning(MACROBLOCK *x,
return &x->sb_partitioning[xd->sb_index];
case BLOCK_SIZE_MB16X16:
return &x->mb_partitioning[xd->sb_index][xd->mb_index];
#if CONFIG_AB4X4
case BLOCK_SIZE_SB8X8:
return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
#endif
default:
assert(0);
return NULL;
@ -833,14 +858,20 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8;
const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1);
const int bsl = mi_width_log2(bsize), bs = (1 << bsl) / 2;
int bwl, bhl;
int UNINITIALIZED_IS_SAFE(pl);
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize > BLOCK_SIZE_SB8X8) {
#if CONFIG_AB4X4
c1 = BLOCK_SIZE_AB4X4;
if (bsize >= BLOCK_SIZE_SB8X8)
#else
if (bsize > BLOCK_SIZE_SB8X8)
#endif
{
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
c1 = *(get_sb_partitioning(x, bsize));
@ -849,8 +880,18 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
bwl = mi_width_log2(c1), bhl = mi_height_log2(c1);
if (bsl == bwl && bsl == bhl) {
#if CONFIG_AB4X4
if (output_enabled && bsize >= BLOCK_SIZE_SB8X8) {
if (bsize > BLOCK_SIZE_SB8X8 ||
(bsize == BLOCK_SIZE_SB8X8 && c1 == bsize))
cpi->partition_count[pl][PARTITION_NONE]++;
else
cpi->partition_count[pl][PARTITION_SPLIT]++;
}
#else
if (output_enabled && bsize > BLOCK_SIZE_SB8X8)
cpi->partition_count[pl][PARTITION_NONE]++;
#endif
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
} else if (bsl == bhl && bsl > bwl) {
if (output_enabled)
@ -867,14 +908,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
int i;
assert(bwl < bsl && bhl < bsl);
if (bsize == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
} else if (bsize == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(bsize == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
}
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cpi->partition_count[pl][PARTITION_SPLIT]++;
@ -888,8 +922,13 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
}
}
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || bsl == bwl || bsl == bhl)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || bsl == bwl || bsl == bhl)) {
#endif
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, c1, bsize);
}
@ -907,7 +946,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int bsl = b_width_log2(bsize), bs = 1 << bsl;
int msl = mi_height_log2(bsize), ms = 1 << msl;
int ms = bs / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
@ -915,6 +954,15 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
BLOCK_SIZE_TYPE subsize;
int srate = INT_MAX, sdist = INT_MAX;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
#endif
assert(mi_height_log2(bsize) == mi_width_log2(bsize));
// buffer the above/left context information of the block in search.
@ -932,7 +980,11 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
sizeof(PARTITION_CONTEXT) * ms);
// PARTITION_SPLIT
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize >= BLOCK_SIZE_MB16X16) {
#endif
int r4 = 0, d4 = 0;
subsize = get_subsize(bsize, PARTITION_SPLIT);
*(get_sb_partitioning(x, bsize)) = subsize;
@ -948,18 +1000,26 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
*(get_sb_index(xd, subsize)) = i;
rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize,
&r, &d);
r4 += r;
d4 += d;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
#if CONFIG_AB4X4
if (r4 < INT_MAX)
r4 += x->partition_cost[pl][PARTITION_SPLIT];
#else
r4 += x->partition_cost[pl][PARTITION_SPLIT];
#endif
assert(r4 >= 0);
assert(d4 >= 0);
srate = r4;
sdist = d4;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
// TODO(jingning): need to enable 4x8 and 8x4 partition coding
// PARTITION_HORZ
if ((mi_col + ms <= cm->mi_cols) && (mi_row + (ms >> 1) <= cm->mi_rows) &&
(bsize >= BLOCK_SIZE_MB16X16)) {
@ -1036,7 +1096,11 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
int r, d;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
get_block_context(x, bsize));
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize >= BLOCK_SIZE_MB16X16) {
#endif
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_NONE];
@ -1046,21 +1110,28 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r;
sdist = d;
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8)
#else
if (bsize >= BLOCK_SIZE_MB16X16)
#endif
*(get_sb_partitioning(x, bsize)) = bsize;
}
}
assert(srate < INT_MAX && sdist < INT_MAX);
*rate = srate;
*dist = sdist;
if (srate < INT_MAX && sdist < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
if (bsize == BLOCK_SIZE_SB64X64)
if (bsize == BLOCK_SIZE_SB64X64) {
assert(tp_orig < *tp);
else
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
} else {
assert(tp_orig == *tp);
}
}
static void encode_sb_row(VP9_COMP *cpi, int mi_row,
@ -1587,7 +1658,11 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
}
#endif
#if CONFIG_AB4X4
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
#else
if (xd->mode_info_context->mbmi.sb_type > BLOCK_SIZE_SB8X8) {
#endif
++cpi->sb_ymode_count[m];
} else {
++cpi->ymode_count[m];
@ -1672,13 +1747,17 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_update_zbin_extra(cpi, x);
}
#if CONFIG_AB4X4
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
bsize < BLOCK_SIZE_SB8X8) {
#else
if (xd->mode_info_context->mbmi.mode == I4X4_PRED) {
assert(bsize == BLOCK_SIZE_SB8X8 &&
xd->mode_info_context->mbmi.txfm_size == TX_4X4);
vp9_encode_intra4x4mby(x, bsize);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize);
vp9_encode_sbuv(cm, x, bsize);
#endif
vp9_encode_intra4x4mby(x, BLOCK_SIZE_SB8X8);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd, BLOCK_SIZE_SB8X8);
vp9_encode_sbuv(cm, x, BLOCK_SIZE_SB8X8);
if (output_enabled)
sum_intra_stats(cpi, x);
@ -1714,15 +1793,22 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
? &cpi->common.yv12_fb[second_ref_fb_idx] : NULL,
mi_row, mi_col, xd->scale_factor, xd->scale_factor_uv);
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
vp9_build_inter_predictors_sb(xd, mi_row, mi_col,
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
}
#if CONFIG_AB4X4
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
bsize < BLOCK_SIZE_SB8X8) {
#else
if (xd->mode_info_context->mbmi.mode == I4X4_PRED) {
assert(bsize == BLOCK_SIZE_SB8X8);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, bsize);
#endif
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, BLOCK_SIZE_SB8X8);
} else if (!x->skip) {
vp9_encode_sb(cm, x, bsize);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, bsize);
vp9_encode_sb(cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled,
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
} else {
// FIXME(rbultje): not tile-aware (mi - 1)
int mb_skip_context =
@ -1731,7 +1817,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_reset_sb_tokens_context(xd, bsize);
vp9_reset_sb_tokens_context(xd,
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
}
// copy skip flag on all mb_mode_info contexts in this SB
@ -1761,8 +1848,12 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
sz = TX_16X16;
if (sz == TX_16X16 && bsize < BLOCK_SIZE_MB16X16)
sz = TX_8X8;
#if CONFIG_AB4X4
if (sz == TX_8X8 && bsize < BLOCK_SIZE_SB8X8)
#else
if (sz == TX_8X8 && (xd->mode_info_context->mbmi.mode == SPLITMV ||
xd->mode_info_context->mbmi.mode == I4X4_PRED))
#endif
sz = TX_4X4;
for (y = 0; y < bh; y++) {

Просмотреть файл

@ -689,7 +689,11 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
int *Distortion, int64_t best_rd) {
int i;
MACROBLOCKD *const xd = &mb->e_mbd;
#if CONFIG_AB4X4
int cost = 0;
#else
int cost = mb->mbmode_cost[xd->frame_type][I4X4_PRED];
#endif
int distortion = 0;
int tot_rate_y = 0;
int64_t total_rd = 0;
@ -719,7 +723,6 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
total_rd += rd_pick_intra4x4block(cpi, mb, i, &best_mode, bmode_costs,
t_above + x_idx, t_left + y_idx,
&r, &ry, &d);
cost += r;
distortion += d;
tot_rate_y += ry;
@ -753,6 +756,13 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
TX_SIZE UNINITIALIZED_IS_SAFE(best_tx);
int i;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
return best_rd;
}
#endif
for (i = 0; i < NB_TXFM_MODES; i++)
txfm_cache[i] = INT64_MAX;
@ -2308,7 +2318,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
&dist_uv, &uv_skip,
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
bsize);
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
#else
if (bsize == BLOCK_SIZE_SB8X8)
#endif
err4x4 = rd_pick_intra4x4mby_modes(cpi, x, &rate4x4_y,
&rate4x4_y_tokenonly,
&dist4x4_y, err);
@ -2321,7 +2335,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
sizeof(x->sb32_context[xd->sb_index].txfm_rd_diff));
xd->mode_info_context->mbmi.mode = mode;
xd->mode_info_context->mbmi.txfm_size = txfm_size;
#if CONFIG_AB4X4
} else if (bsize < BLOCK_SIZE_SB8X8 && err4x4 < err) {
#else
} else if (bsize == BLOCK_SIZE_SB8X8 && err4x4 < err) {
#endif
*returnrate = rate4x4_y + rate_uv +
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
*returndist = dist4x4_y + (dist_uv >> 2);
@ -2463,7 +2481,9 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
i++) {
mbmi->txfm_size = i;
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_intra[i], &rate_uv_tokenonly[i],
&dist_uv[i], &skip_uv[i], bsize);
&dist_uv[i], &skip_uv[i],
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
bsize);
mode_uv[i] = mbmi->uv_mode;
}
}
@ -2493,6 +2513,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|| (cpi->ref_frame_flags & flag_list[ref_frame]))) {
continue;
}
if (cpi->speed > 0) {
if (!(ref_frame_mask & (1 << ref_frame))) {
continue;
@ -2539,10 +2560,18 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
mbmi->interp_filter = cm->mcomp_filter_type;
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
if (bsize < BLOCK_SIZE_SB8X8 &&
!(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
#else
if (bsize != BLOCK_SIZE_SB8X8 &&
(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
#endif
if (comp_pred) {
if (ref_frame == ALTREF_FRAME) {
@ -2605,7 +2634,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// Note the rate value returned here includes the cost of coding
// the I4X4_PRED mode : x->mbmode_cost[xd->frame_type][I4X4_PRED];
assert(bsize == BLOCK_SIZE_SB8X8);
mbmi->txfm_size = TX_4X4;
rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y,
&distortion_y, INT64_MAX);
@ -3001,7 +3029,13 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
}
#if CONFIG_AB4X4
if (best_rd == INT64_MAX && bsize < BLOCK_SIZE_SB8X8) {
*returnrate = INT_MAX;
*returndistortion = INT_MAX;
return best_rd;
}
#endif
assert((cm->mcomp_filter_type == SWITCHABLE) ||
(cm->mcomp_filter_type == best_mbmode.interp_filter) ||

Просмотреть файл

@ -119,7 +119,12 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
TOKENEXTRA *t = *tp; /* store tokens starting here */
const int eob = xd->plane[plane].eobs[block];
const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
#if CONFIG_AB4X4
const BLOCK_SIZE_TYPE sb_type = (mbmi->sb_type < BLOCK_SIZE_SB8X8) ?
BLOCK_SIZE_SB8X8 : mbmi->sb_type;
#else
const BLOCK_SIZE_TYPE sb_type = mbmi->sb_type;
#endif
const int bwl = b_width_log2(sb_type);
const int off = block >> (2 * tx_size);
const int mod = bwl - tx_size - xd->plane[plane].subsampling_x;