Merge "sb8x8 integration in rd loop." into experimental

This commit is contained in:
Ronald S. Bultje 2013-04-30 17:29:46 -07:00 коммит произвёл Gerrit Code Review
Родитель 3af212eb35 d068d869b9
Коммит 3cf77ccef7
26 изменённых файлов: 1580 добавлений и 146 удалений

Просмотреть файл

@ -83,7 +83,9 @@ typedef enum {
D27_PRED, /* Directional 22 deg prediction [anti-clockwise from 0 deg hor] */
D63_PRED, /* Directional 67 deg prediction [anti-clockwise from 0 deg hor] */
TM_PRED, /* Truemotion prediction */
#if !CONFIG_SB8X8
I8X8_PRED, /* 8x8 based prediction, each 8x8 has its own mode */
#endif
I4X4_PRED, /* 4x4 based prediction, each 4x4 has its own mode */
NEARESTMV,
NEARMV,
@ -126,7 +128,9 @@ typedef enum {
#define VP9_YMODES (I4X4_PRED + 1)
#define VP9_UV_MODES (TM_PRED + 1)
#if !CONFIG_SB8X8
#define VP9_I8X8_MODES (TM_PRED + 1)
#endif
#define VP9_I32X32_MODES (TM_PRED + 1)
#define VP9_MVREFS (1 + SPLITMV - NEARESTMV)
@ -169,6 +173,7 @@ typedef enum {
#define VP9_NKF_BINTRAMODES (VP9_BINTRAMODES) /* 10 */
#endif
#if !CONFIG_SB8X8
typedef enum {
PARTITIONING_16X8 = 0,
PARTITIONING_8X16,
@ -176,6 +181,7 @@ typedef enum {
PARTITIONING_4X4,
NB_PARTITIONINGS,
} SPLITMV_PARTITIONING_TYPE;
#endif
/* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there
@ -271,7 +277,9 @@ typedef struct {
int mb_mode_context[MAX_REF_FRAMES];
#if !CONFIG_SB8X8
SPLITMV_PARTITIONING_TYPE partitioning;
#endif
unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
unsigned char need_to_clamp_mvs;
unsigned char need_to_clamp_secondmv;
@ -293,7 +301,7 @@ typedef struct {
typedef struct {
MB_MODE_INFO mbmi;
union b_mode_info bmi[16];
union b_mode_info bmi[16 >> (CONFIG_SB8X8 * 2)];
} MODE_INFO;
struct scale_factors {
@ -433,8 +441,11 @@ typedef struct macroblockd {
int corrupted;
int sb_index;
int mb_index; // Index of the MB in the SB (0..3)
int sb_index; // index of 32x32 block inside the 64x64 block
int mb_index; // index of 16x16 block inside the 32x32 block
#if CONFIG_SB8X8
int b_index; // index of 8x8 block inside the 16x16 block
#endif
int q_index;
} MACROBLOCKD;
@ -442,10 +453,10 @@ typedef struct macroblockd {
static INLINE void update_partition_context(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE sb_type,
BLOCK_SIZE_TYPE sb_size) {
int bsl = mi_width_log2(sb_size) - CONFIG_SB8X8, bs = 1 << bsl;
int bwl = mi_width_log2(sb_type) - CONFIG_SB8X8;
int bhl = mi_height_log2(sb_type) - CONFIG_SB8X8;
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - CONFIG_SB8X8 - bsl;
int bsl = mi_width_log2(sb_size), bs = 1 << bsl;
int bwl = mi_width_log2(sb_type);
int bhl = mi_height_log2(sb_type);
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
int i;
// skip macroblock partition
if (bsl == 0)
@ -481,9 +492,9 @@ static INLINE void update_partition_context(MACROBLOCKD *xd,
static INLINE int partition_plane_context(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE sb_type) {
int bsl = mi_width_log2(sb_type) - CONFIG_SB8X8, bs = 1 << bsl;
int bsl = mi_width_log2(sb_type), bs = 1 << bsl;
int above = 0, left = 0, i;
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl - CONFIG_SB8X8;
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
assert(mi_width_log2(sb_type) == mi_height_log2(sb_type));
assert(bsl >= 0);
@ -581,6 +592,7 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
xd->mode_info_context->bmi[ib].as_mode.context :
#endif
xd->mode_info_context->bmi[ib].as_mode.first);
#if !CONFIG_SB8X8
} else if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
xd->q_index < ACTIVE_HT) {
const int ic = (ib & 10);
@ -615,7 +627,8 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
// Use 2D DCT
tx_type = DCT_DCT;
#endif
} else if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
#endif // !CONFIG_SB8X8
} else if (xd->mode_info_context->mbmi.mode <= TM_PRED &&
xd->q_index < ACTIVE_HT) {
#if USE_ADST_FOR_I16X16_4X4
#if USE_ADST_PERIPHERY_ONLY
@ -659,14 +672,17 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
#endif
if (ib >= (1 << (wb + hb))) // no chroma adst
return tx_type;
#if !CONFIG_SB8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
xd->q_index < ACTIVE_HT8) {
// TODO(rbultje): MB_PREDICTION_MODE / B_PREDICTION_MODE should be merged
// or the relationship otherwise modified to address this type conversion.
tx_type = txfm_map(pred_mode_conv(
(MB_PREDICTION_MODE)xd->mode_info_context->bmi[ib].as_mode.first));
} else if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
xd->q_index < ACTIVE_HT8) {
} else
#endif // CONFIG_SB8X8
if (xd->mode_info_context->mbmi.mode <= TM_PRED &&
xd->q_index < ACTIVE_HT8) {
#if USE_ADST_FOR_I16X16_8X8
#if USE_ADST_PERIPHERY_ONLY
const int hmax = 1 << wb;
@ -707,7 +723,7 @@ static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, int ib) {
#endif
if (ib >= (1 << (wb + hb)))
return tx_type;
if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
if (xd->mode_info_context->mbmi.mode <= TM_PRED &&
xd->q_index < ACTIVE_HT16) {
tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
#if USE_ADST_PERIPHERY_ONLY
@ -738,7 +754,9 @@ void vp9_setup_block_dptrs(MACROBLOCKD *xd);
static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
const TX_SIZE size = mbmi->txfm_size;
#if !CONFIG_SB8X8
const MB_PREDICTION_MODE mode = mbmi->mode;
#endif // !CONFIG_SB8X8
switch (mbmi->sb_type) {
case BLOCK_SIZE_SB64X64:
@ -750,6 +768,17 @@ static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
return TX_16X16;
else
return size;
#if CONFIG_SB8X8
case BLOCK_SIZE_SB32X16:
case BLOCK_SIZE_SB16X32:
case BLOCK_SIZE_MB16X16:
if (size == TX_16X16)
return TX_8X8;
else
return size;
default:
return TX_4X4;
#else // CONFIG_SB8X8
default:
if (size == TX_16X16)
return TX_8X8;
@ -757,6 +786,7 @@ static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
return TX_4X4;
else
return size;
#endif // CONFIG_SB8X8
}
return size;
@ -812,7 +842,10 @@ typedef void (*foreach_transformed_block_visitor)(int plane, int block,
void *arg);
static INLINE void foreach_transformed_block_in_plane(
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
int is_split, foreach_transformed_block_visitor visit, void *arg) {
#if !CONFIG_SB8X8
int is_split,
#endif // !CONFIG_SB8X8
foreach_transformed_block_visitor visit, void *arg) {
const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
@ -830,7 +863,10 @@ static INLINE void foreach_transformed_block_in_plane(
// than the size of the subsampled data, or forced externally by the mb mode.
const int ss_max = MAX(xd->plane[plane].subsampling_x,
xd->plane[plane].subsampling_y);
const int ss_txfrm_size = txfrm_size_b > ss_block_size || is_split
const int ss_txfrm_size = txfrm_size_b > ss_block_size
#if !CONFIG_SB8X8
|| is_split
#endif // !CONFIG_SB8X8
? txfrm_size_b - ss_max * 2
: txfrm_size_b;
const int step = 1 << ss_txfrm_size;
@ -847,17 +883,24 @@ static INLINE void foreach_transformed_block_in_plane(
static INLINE void foreach_transformed_block(
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
foreach_transformed_block_visitor visit, void *arg) {
#if !CONFIG_SB8X8
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
const int is_split =
xd->mode_info_context->mbmi.txfm_size == TX_8X8 &&
(mode == I8X8_PRED || mode == SPLITMV);
#endif // !CONFIG_SB8X8
int plane;
for (plane = 0; plane < MAX_MB_PLANE; plane++) {
#if !CONFIG_SB8X8
const int is_split_chroma = is_split &&
xd->plane[plane].plane_type == PLANE_TYPE_UV;
#endif // !CONFIG_SB8X8
foreach_transformed_block_in_plane(xd, bsize, plane, is_split_chroma,
foreach_transformed_block_in_plane(xd, bsize, plane,
#if !CONFIG_SB8X8
is_split_chroma,
#endif // !CONFIG_SB8X8
visit, arg);
}
}
@ -865,14 +908,19 @@ static INLINE void foreach_transformed_block(
static INLINE void foreach_transformed_block_uv(
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
foreach_transformed_block_visitor visit, void *arg) {
#if !CONFIG_SB8X8
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
const int is_split =
xd->mode_info_context->mbmi.txfm_size == TX_8X8 &&
(mode == I8X8_PRED || mode == SPLITMV);
#endif // !CONFIG_SB8X8
int plane;
for (plane = 1; plane < MAX_MB_PLANE; plane++) {
foreach_transformed_block_in_plane(xd, bsize, plane, is_split,
foreach_transformed_block_in_plane(xd, bsize, plane,
#if !CONFIG_SB8X8
is_split,
#endif // !CONFIG_SB8X8
visit, arg);
}
}
@ -900,11 +948,16 @@ static INLINE void foreach_predicted_block_in_plane(
int pred_w, pred_h;
if (mode == SPLITMV) {
#if CONFIG_SB8X8
pred_w = 0;
pred_h = 0;
#else
// 4x4 or 8x8
const int is_4x4 =
(xd->mode_info_context->mbmi.partitioning == PARTITIONING_4X4);
pred_w = is_4x4 ? 0 : 1 >> xd->plane[plane].subsampling_x;
pred_h = is_4x4 ? 0 : 1 >> xd->plane[plane].subsampling_y;
#endif
} else {
pred_w = bw;
pred_h = bh;

Просмотреть файл

@ -16,6 +16,17 @@
#include "vpx_mem/vpx_mem.h"
static const unsigned int kf_y_mode_cts[8][VP9_YMODES] = {
#if CONFIG_SB8X8
/* DC V H D45 135 117 153 D27 D63 TM i4X4 */
{12, 6, 5, 5, 5, 5, 5, 5, 5, 2, 200},
{25, 13, 13, 7, 7, 7, 7, 7, 7, 6, 160},
{31, 17, 18, 8, 8, 8, 8, 8, 8, 9, 139},
{40, 22, 23, 8, 8, 8, 8, 8, 8, 12, 116},
{53, 26, 28, 8, 8, 8, 8, 8, 8, 13, 94},
{68, 33, 35, 8, 8, 8, 8, 8, 8, 17, 68},
{78, 38, 38, 8, 8, 8, 8, 8, 8, 19, 52},
{89, 42, 42, 8, 8, 8, 8, 8, 8, 21, 34},
#else
/* DC V H D45 135 117 153 D27 D63 TM i8x8 i4X4 */
{12, 6, 5, 5, 5, 5, 5, 5, 5, 2, 22, 200},
{25, 13, 13, 7, 7, 7, 7, 7, 7, 6, 27, 160},
@ -25,11 +36,17 @@ static const unsigned int kf_y_mode_cts[8][VP9_YMODES] = {
{68, 33, 35, 8, 8, 8, 8, 8, 8, 17, 20, 68},
{78, 38, 38, 8, 8, 8, 8, 8, 8, 19, 16, 52},
{89, 42, 42, 8, 8, 8, 8, 8, 8, 21, 12, 34},
#endif
};
static const unsigned int y_mode_cts [VP9_YMODES] = {
#if CONFIG_SB8X8
/* DC V H D45 135 117 153 D27 D63 TM i4X4 */
98, 19, 15, 14, 14, 14, 14, 12, 12, 13, 70
#else
/* DC V H D45 135 117 153 D27 D63 TM i8x8 i4X4 */
98, 19, 15, 14, 14, 14, 14, 12, 12, 13, 16, 70
#endif
};
static const unsigned int uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
@ -44,14 +61,18 @@ static const unsigned int uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
{ 150, 15, 10, 10, 10, 10, 10, 75, 10, 6}, /* D27 */
{ 150, 15, 10, 10, 10, 10, 10, 10, 75, 6}, /* D63 */
{ 160, 30, 30, 10, 10, 10, 10, 10, 10, 16}, /* TM */
#if !CONFIG_SB8X8
{ 132, 46, 40, 10, 10, 10, 10, 10, 10, 18}, /* i8x8 - never used */
#endif
{ 150, 35, 41, 10, 10, 10, 10, 10, 10, 10}, /* i4X4 */
};
#if !CONFIG_SB8X8
static const unsigned int i8x8_mode_cts [VP9_I8X8_MODES] = {
/* DC V H D45 135 117 153 D27 D63 TM */
73, 49, 61, 30, 30, 30, 30, 30, 30, 13
};
#endif
static const unsigned int kf_uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
// DC V H D45 135 117 153 D27 D63 TM
@ -65,7 +86,9 @@ static const unsigned int kf_uv_mode_cts [VP9_YMODES] [VP9_UV_MODES] = {
{ 102, 33, 20, 20, 20, 20, 20, 64, 20, 14}, /* D27 */
{ 102, 33, 20, 20, 20, 20, 20, 20, 64, 14}, /* D63 */
{ 132, 36, 30, 20, 20, 20, 20, 20, 20, 18}, /* TM */
#if !CONFIG_SB8X8
{ 122, 41, 35, 20, 20, 20, 20, 20, 20, 18}, /* i8x8 - never used */
#endif
{ 122, 41, 35, 20, 20, 20, 20, 20, 20, 18}, /* I4X4 */
};
@ -123,6 +146,7 @@ const vp9_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP9_SUBMVREFS - 1] = {
{ 208, 1, 1 }
};
#if !CONFIG_SB8X8
vp9_mbsplit vp9_mbsplits [VP9_NUMMBSPLITS] = {
{
0, 0, 0, 0,
@ -150,9 +174,17 @@ vp9_mbsplit vp9_mbsplits [VP9_NUMMBSPLITS] = {
const int vp9_mbsplit_count [VP9_NUMMBSPLITS] = { 2, 2, 4, 16};
const vp9_prob vp9_mbsplit_probs [VP9_NUMMBSPLITS - 1] = { 110, 111, 150};
#endif
const vp9_prob vp9_partition_probs[NUM_PARTITION_CONTEXTS]
[PARTITION_TYPES - 1] = {
#if CONFIG_SB8X8
// FIXME(jingning,rbultje) put real probabilities here
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
{104, 90, 134},
#endif
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
@ -228,8 +260,12 @@ const vp9_tree_index vp9_ymode_tree[VP9_YMODES * 2 - 2] = {
-D27_PRED, -D63_PRED,
16, 18,
-V_PRED, -H_PRED,
#if CONFIG_SB8X8
-TM_PRED, -I4X4_PRED
#else
-TM_PRED, 20,
-I4X4_PRED, -I8X8_PRED
#endif
};
const vp9_tree_index vp9_kf_ymode_tree[VP9_YMODES * 2 - 2] = {
@ -242,10 +278,15 @@ const vp9_tree_index vp9_kf_ymode_tree[VP9_YMODES * 2 - 2] = {
-D27_PRED, -D63_PRED,
16, 18,
-V_PRED, -H_PRED,
#if CONFIG_SB8X8
-TM_PRED, -I4X4_PRED
#else
-TM_PRED, 20,
-I4X4_PRED, -I8X8_PRED
#endif
};
#if !CONFIG_SB8X8
const vp9_tree_index vp9_i8x8_mode_tree[VP9_I8X8_MODES * 2 - 2] = {
2, 14,
-DC_PRED, 4,
@ -257,6 +298,7 @@ const vp9_tree_index vp9_i8x8_mode_tree[VP9_I8X8_MODES * 2 - 2] = {
-V_PRED, 16,
-H_PRED, -TM_PRED
};
#endif
const vp9_tree_index vp9_uv_mode_tree[VP9_UV_MODES * 2 - 2] = {
2, 14,
@ -270,11 +312,13 @@ const vp9_tree_index vp9_uv_mode_tree[VP9_UV_MODES * 2 - 2] = {
-H_PRED, -TM_PRED
};
#if !CONFIG_SB8X8
const vp9_tree_index vp9_mbsplit_tree[6] = {
-PARTITIONING_4X4, 2,
-PARTITIONING_8X8, 4,
-PARTITIONING_16X8, -PARTITIONING_8X16,
};
#endif
const vp9_tree_index vp9_mv_ref_tree[8] = {
-ZEROMV, 2,
@ -308,8 +352,10 @@ struct vp9_token vp9_sb_ymode_encodings[VP9_I32X32_MODES];
struct vp9_token vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
struct vp9_token vp9_kf_ymode_encodings[VP9_YMODES];
struct vp9_token vp9_uv_mode_encodings[VP9_UV_MODES];
#if !CONFIG_SB8X8
struct vp9_token vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
struct vp9_token vp9_mbsplit_encodings[VP9_NUMMBSPLITS];
#endif
struct vp9_token vp9_mv_ref_encoding_array[VP9_MVREFS];
struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
@ -340,12 +386,16 @@ void vp9_init_mbmode_probs(VP9_COMMON *x) {
bct, uv_mode_cts[i], 0);
}
#if !CONFIG_SB8X8
vp9_tree_probs_from_distribution(vp9_i8x8_mode_tree, x->fc.i8x8_mode_prob,
bct, i8x8_mode_cts, 0);
#endif
vpx_memcpy(x->fc.sub_mv_ref_prob, vp9_sub_mv_ref_prob2,
sizeof(vp9_sub_mv_ref_prob2));
#if !CONFIG_SB8X8
vpx_memcpy(x->fc.mbsplit_prob, vp9_mbsplit_probs, sizeof(vp9_mbsplit_probs));
#endif
vpx_memcpy(x->fc.switchable_interp_prob, vp9_switchable_interp_prob,
sizeof(vp9_switchable_interp_prob));
@ -449,8 +499,10 @@ void vp9_entropy_mode_init() {
vp9_tokens_from_tree(vp9_sb_ymode_encodings, vp9_sb_ymode_tree);
vp9_tokens_from_tree(vp9_sb_kf_ymode_encodings, vp9_sb_kf_ymode_tree);
vp9_tokens_from_tree(vp9_uv_mode_encodings, vp9_uv_mode_tree);
#if !CONFIG_SB8X8
vp9_tokens_from_tree(vp9_i8x8_mode_encodings, vp9_i8x8_mode_tree);
vp9_tokens_from_tree(vp9_mbsplit_encodings, vp9_mbsplit_tree);
#endif
vp9_tokens_from_tree(vp9_switchable_interp_encodings,
vp9_switchable_interp_tree);
vp9_tokens_from_tree(vp9_partition_encodings, vp9_partition_tree);
@ -629,9 +681,11 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
update_mode_probs(VP9_NKF_BINTRAMODES, vp9_bmode_tree,
fc->bmode_counts, fc->pre_bmode_prob,
fc->bmode_prob, 0);
#if !CONFIG_SB8X8
update_mode_probs(VP9_I8X8_MODES,
vp9_i8x8_mode_tree, fc->i8x8_mode_counts,
fc->pre_i8x8_mode_prob, fc->i8x8_mode_prob, 0);
#endif
for (i = 0; i < SUBMVREF_COUNT; ++i)
update_mode_probs(VP9_SUBMVREFS,
@ -639,9 +693,11 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
fc->pre_sub_mv_ref_prob[i], fc->sub_mv_ref_prob[i],
LEFT4X4);
#if !CONFIG_SB8X8
update_mode_probs(VP9_NUMMBSPLITS, vp9_mbsplit_tree,
fc->mbsplit_counts, fc->pre_mbsplit_prob,
fc->mbsplit_prob, 0);
#endif
#if CONFIG_COMP_INTERINTRA_PRED
if (cm->use_interintra) {
int factor, interintra_prob, count;

Просмотреть файл

@ -15,7 +15,9 @@
#include "vp9/common/vp9_treecoder.h"
#define SUBMVREF_COUNT 5
#if !CONFIG_SB8X8
#define VP9_NUMMBSPLITS 4
#endif
#if CONFIG_COMP_INTERINTRA_PRED
#define VP9_DEF_INTERINTRA_PROB 248
@ -24,6 +26,7 @@
#define SEPARATE_INTERINTRA_UV 0
#endif
#if !CONFIG_SB8X8
typedef const int vp9_mbsplit[16];
extern vp9_mbsplit vp9_mbsplits[VP9_NUMMBSPLITS];
@ -31,6 +34,7 @@ extern vp9_mbsplit vp9_mbsplits[VP9_NUMMBSPLITS];
extern const int vp9_mbsplit_count[VP9_NUMMBSPLITS]; /* # of subsets */
extern const vp9_prob vp9_mbsplit_probs[VP9_NUMMBSPLITS - 1];
#endif
extern int vp9_mv_cont(const int_mv *l, const int_mv *a);
@ -48,8 +52,10 @@ extern const vp9_tree_index vp9_kf_ymode_tree[];
extern const vp9_tree_index vp9_uv_mode_tree[];
#define vp9_sb_ymode_tree vp9_uv_mode_tree
#define vp9_sb_kf_ymode_tree vp9_uv_mode_tree
#if !CONFIG_SB8X8
extern const vp9_tree_index vp9_i8x8_mode_tree[];
extern const vp9_tree_index vp9_mbsplit_tree[];
#endif
extern const vp9_tree_index vp9_mv_ref_tree[];
extern const vp9_tree_index vp9_sb_mv_ref_tree[];
extern const vp9_tree_index vp9_sub_mv_ref_tree[];
@ -60,9 +66,11 @@ extern struct vp9_token vp9_ymode_encodings[VP9_YMODES];
extern struct vp9_token vp9_sb_ymode_encodings[VP9_I32X32_MODES];
extern struct vp9_token vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
extern struct vp9_token vp9_kf_ymode_encodings[VP9_YMODES];
extern struct vp9_token vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
extern struct vp9_token vp9_uv_mode_encodings[VP9_UV_MODES];
#if !CONFIG_SB8X8
extern struct vp9_token vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
extern struct vp9_token vp9_mbsplit_encodings[VP9_NUMMBSPLITS];
#endif
/* Inter mode values do not start at zero */

Просмотреть файл

@ -47,6 +47,6 @@ typedef enum PARTITION_TYPE {
} PARTITION_TYPE;
#define PARTITION_PLOFFSET 4 // number of probability models per block size
#define NUM_PARTITION_CONTEXTS (2 * PARTITION_PLOFFSET)
#define NUM_PARTITION_CONTEXTS ((2 + CONFIG_SB8X8) * PARTITION_PLOFFSET)
#endif // VP9_COMMON_VP9_ENUMS_H_

Просмотреть файл

@ -74,11 +74,13 @@ vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc,
vp9_prob p[VP9_MVREFS - 1],
const int context);
#if !CONFIG_SB8X8
extern const uint8_t vp9_mbsplit_offset[4][16];
#endif
static int left_block_mv(const MACROBLOCKD *xd,
const MODE_INFO *cur_mb, int b) {
if (!(b & 3)) {
if (!(b & (3 >> CONFIG_SB8X8))) {
if (!xd->left_available)
return 0;
@ -88,7 +90,7 @@ static int left_block_mv(const MACROBLOCKD *xd,
if (cur_mb->mbmi.mode != SPLITMV)
return cur_mb->mbmi.mv[0].as_int;
b += 4;
b += 4 >> CONFIG_SB8X8;
}
return (cur_mb->bmi + b - 1)->as_mv[0].as_int;
@ -96,7 +98,7 @@ static int left_block_mv(const MACROBLOCKD *xd,
static int left_block_second_mv(const MACROBLOCKD *xd,
const MODE_INFO *cur_mb, int b) {
if (!(b & 3)) {
if (!(b & (3 >> CONFIG_SB8X8))) {
if (!xd->left_available)
return 0;
@ -106,7 +108,7 @@ static int left_block_second_mv(const MACROBLOCKD *xd,
if (cur_mb->mbmi.mode != SPLITMV)
return cur_mb->mbmi.second_ref_frame > 0 ?
cur_mb->mbmi.mv[1].as_int : cur_mb->mbmi.mv[0].as_int;
b += 4;
b += 4 >> CONFIG_SB8X8;
}
return cur_mb->mbmi.second_ref_frame > 0 ?
@ -115,72 +117,85 @@ static int left_block_second_mv(const MACROBLOCKD *xd,
}
static int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride) {
if (!(b >> 2)) {
if (!(b >> (2 >> CONFIG_SB8X8))) {
/* On top edge, get from MB above us */
cur_mb -= mi_stride;
if (cur_mb->mbmi.mode != SPLITMV)
return cur_mb->mbmi.mv[0].as_int;
b += 16;
b += 16 >> (2 * CONFIG_SB8X8);
}
return (cur_mb->bmi + b - 4)->as_mv[0].as_int;
return (cur_mb->bmi + b - (4 >> CONFIG_SB8X8))->as_mv[0].as_int;
}
static int above_block_second_mv(const MODE_INFO *cur_mb, int b, int mi_stride) {
if (!(b >> 2)) {
if (!(b >> (2 >> CONFIG_SB8X8))) {
/* On top edge, get from MB above us */
cur_mb -= mi_stride;
if (cur_mb->mbmi.mode != SPLITMV)
return cur_mb->mbmi.second_ref_frame > 0 ?
cur_mb->mbmi.mv[1].as_int : cur_mb->mbmi.mv[0].as_int;
b += 16;
b += 16 >> (2 * CONFIG_SB8X8);
}
return cur_mb->mbmi.second_ref_frame > 0 ?
(cur_mb->bmi + b - 4)->as_mv[1].as_int :
(cur_mb->bmi + b - 4)->as_mv[0].as_int;
(cur_mb->bmi + b - (4 >> CONFIG_SB8X8))->as_mv[1].as_int :
(cur_mb->bmi + b - (4 >> CONFIG_SB8X8))->as_mv[0].as_int;
}
static B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) {
if (!(b & 3)) {
#if CONFIG_SB8X8
// FIXME(rbultje, jingning): temporary hack because jenkins doesn't
// understand this condition. This will go away soon.
if (b == 0 || b == 2) {
#else
if (!(b & (3 >> CONFIG_SB8X8))) {
#endif
/* On L edge, get from MB to left of us */
--cur_mb;
if (cur_mb->mbmi.mode < I8X8_PRED) {
if (cur_mb->mbmi.mode <= TM_PRED) {
return pred_mode_conv(cur_mb->mbmi.mode);
#if !CONFIG_SB8X8
} else if (cur_mb->mbmi.mode == I8X8_PRED) {
return pred_mode_conv(
(MB_PREDICTION_MODE)(cur_mb->bmi + 3 + b)->as_mode.first);
#endif // !CONFIG_SB8X8
} else if (cur_mb->mbmi.mode == I4X4_PRED) {
return ((cur_mb->bmi + 3 + b)->as_mode.first);
return ((cur_mb->bmi + (3 >> CONFIG_SB8X8) + b)->as_mode.first);
} else {
return B_DC_PRED;
}
}
#if CONFIG_SB8X8
assert(b == 1 || b == 3);
#endif
return (cur_mb->bmi + b - 1)->as_mode.first;
}
static B_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb,
int b, int mi_stride) {
if (!(b >> 2)) {
if (!(b >> (2 >> CONFIG_SB8X8))) {
/* On top edge, get from MB above us */
cur_mb -= mi_stride;
if (cur_mb->mbmi.mode < I8X8_PRED) {
if (cur_mb->mbmi.mode <= TM_PRED) {
return pred_mode_conv(cur_mb->mbmi.mode);
#if !CONFIG_SB8X8
} else if (cur_mb->mbmi.mode == I8X8_PRED) {
return pred_mode_conv(
(MB_PREDICTION_MODE)(cur_mb->bmi + 12 + b)->as_mode.first);
#endif
} else if (cur_mb->mbmi.mode == I4X4_PRED) {
return ((cur_mb->bmi + 12 + b)->as_mode.first);
return ((cur_mb->bmi + (CONFIG_SB8X8 ? 2 : 12) + b)->as_mode.first);
} else {
return B_DC_PRED;
}
}
return (cur_mb->bmi + b - 4)->as_mode.first;
return (cur_mb->bmi + b - (4 >> CONFIG_SB8X8))->as_mode.first;
}
#endif // VP9_COMMON_VP9_FINDNEARMV_H_

Просмотреть файл

@ -27,7 +27,9 @@ static void lf_init_lut(loop_filter_info_n *lfi) {
lfi->mode_lf_lut[H_PRED] = 1;
lfi->mode_lf_lut[TM_PRED] = 1;
lfi->mode_lf_lut[I4X4_PRED] = 0;
#if !CONFIG_SB8X8
lfi->mode_lf_lut[I8X8_PRED] = 0;
#endif
lfi->mode_lf_lut[ZEROMV] = 1;
lfi->mode_lf_lut[NEARESTMV] = 2;
lfi->mode_lf_lut[NEARMV] = 2;
@ -165,10 +167,14 @@ void vp9_loop_filter_frame_init(VP9_COMMON *cm,
// the MB uses a prediction size of 16x16 and either 16x16 transform
// is used or there is no residue at all.
static int mb_lf_skip(const MB_MODE_INFO *const mbmi) {
const MB_PREDICTION_MODE mode = mbmi->mode;
const int skip_coef = mbmi->mb_skip_coeff;
const int tx_size = mbmi->txfm_size;
#if CONFIG_SB8X8
return mbmi->sb_type >= BLOCK_SIZE_MB16X16 &&
#else
const MB_PREDICTION_MODE mode = mbmi->mode;
return mode != I4X4_PRED && mode != I8X8_PRED && mode != SPLITMV &&
#endif
(tx_size >= TX_16X16 || skip_coef);
}
@ -220,7 +226,13 @@ static void lpf_mb(VP9_COMMON *cm, const MODE_INFO *mi,
if (!skip_lf) {
if (tx_size >= TX_8X8) {
if (tx_size == TX_8X8 && (mode == I8X8_PRED || mode == SPLITMV))
if (tx_size == TX_8X8 &&
#if CONFIG_SB8X8
(mi->mbmi.sb_type < BLOCK_SIZE_MB16X16)
#else
(mode == I8X8_PRED || mode == SPLITMV)
#endif
)
vp9_loop_filter_bh8x8(y_ptr, u_ptr, v_ptr,
y_stride, uv_stride, &lfi);
else
@ -244,7 +256,13 @@ static void lpf_mb(VP9_COMMON *cm, const MODE_INFO *mi,
if (!skip_lf) {
if (tx_size >= TX_8X8) {
if (tx_size == TX_8X8 && (mode == I8X8_PRED || mode == SPLITMV))
if (tx_size == TX_8X8 &&
#if CONFIG_SB8X8
(mi->mbmi.sb_type < BLOCK_SIZE_MB16X16)
#else
(mode == I8X8_PRED || mode == SPLITMV)
#endif
)
vp9_loop_filter_bv8x8(y_ptr, u_ptr, v_ptr,
y_stride, uv_stride, &lfi);
else

Просмотреть файл

@ -65,9 +65,13 @@ typedef struct frame_contexts {
vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
vp9_prob sb_ymode_prob[VP9_I32X32_MODES - 1];
vp9_prob uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
#if !CONFIG_SB8X8
vp9_prob i8x8_mode_prob[VP9_I8X8_MODES - 1];
#endif
vp9_prob sub_mv_ref_prob[SUBMVREF_COUNT][VP9_SUBMVREFS - 1];
#if !CONFIG_SB8X8
vp9_prob mbsplit_prob[VP9_NUMMBSPLITS - 1];
#endif
vp9_prob partition_prob[NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
vp9_coeff_probs coef_probs_4x4[BLOCK_TYPES];
@ -87,17 +91,25 @@ typedef struct frame_contexts {
vp9_prob pre_ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
vp9_prob pre_sb_ymode_prob[VP9_I32X32_MODES - 1];
vp9_prob pre_uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
#if !CONFIG_SB8X8
vp9_prob pre_i8x8_mode_prob[VP9_I8X8_MODES - 1];
#endif
vp9_prob pre_sub_mv_ref_prob[SUBMVREF_COUNT][VP9_SUBMVREFS - 1];
#if !CONFIG_SB8X8
vp9_prob pre_mbsplit_prob[VP9_NUMMBSPLITS - 1];
#endif
vp9_prob pre_partition_prob[NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
unsigned int bmode_counts[VP9_NKF_BINTRAMODES];
unsigned int ymode_counts[VP9_YMODES]; /* interframe intra mode probs */
unsigned int sb_ymode_counts[VP9_I32X32_MODES];
unsigned int uv_mode_counts[VP9_YMODES][VP9_UV_MODES];
#if !CONFIG_SB8X8
unsigned int i8x8_mode_counts[VP9_I8X8_MODES]; /* interframe intra probs */
#endif
unsigned int sub_mv_ref_counts[SUBMVREF_COUNT][VP9_SUBMVREFS];
#if !CONFIG_SB8X8
unsigned int mbsplit_counts[VP9_NUMMBSPLITS];
#endif
unsigned int partition_counts[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
vp9_coeff_probs pre_coef_probs_4x4[BLOCK_TYPES];

Просмотреть файл

@ -265,19 +265,27 @@ static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
}
#if CONFIG_SB8X8
#define IDX1 2
#define IDX2 3
#else
#define IDX1 4
#define IDX2 5
#endif
static int mi_mv_pred_row_q4(MACROBLOCKD *mb, int off, int idx) {
const int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.row +
mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.row +
mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.row +
mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.row;
mb->mode_info_context->bmi[off + IDX1].as_mv[idx].as_mv.row +
mb->mode_info_context->bmi[off + IDX2].as_mv[idx].as_mv.row;
return round_mv_comp_q4(temp);
}
static int mi_mv_pred_col_q4(MACROBLOCKD *mb, int off, int idx) {
const int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.col +
mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.col +
mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.col +
mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.col;
mb->mode_info_context->bmi[off + IDX1].as_mv[idx].as_mv.col +
mb->mode_info_context->bmi[off + IDX2].as_mv[idx].as_mv.col;
return round_mv_comp_q4(temp);
}

Просмотреть файл

@ -65,9 +65,11 @@ static MB_PREDICTION_MODE read_kf_mb_ymode(vp9_reader *r, const vp9_prob *p) {
return (MB_PREDICTION_MODE)treed_read(r, vp9_kf_ymode_tree, p);
}
#if !CONFIG_SB8X8
static int read_i8x8_mode(vp9_reader *r, const vp9_prob *p) {
return treed_read(r, vp9_i8x8_mode_tree, p);
}
#endif
static MB_PREDICTION_MODE read_uv_mode(vp9_reader *r, const vp9_prob *p) {
return (MB_PREDICTION_MODE)treed_read(r, vp9_uv_mode_tree, p);
@ -161,6 +163,7 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
}
}
#if !CONFIG_SB8X8
if (m->mbmi.mode == I8X8_PRED) {
int i;
for (i = 0; i < 4; ++i) {
@ -175,14 +178,25 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
}
// chroma mode
if (m->mbmi.mode != I8X8_PRED) {
if (m->mbmi.mode != I8X8_PRED)
#endif
{
m->mbmi.uv_mode = read_uv_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
}
if (cm->txfm_mode == TX_MODE_SELECT &&
!m->mbmi.mb_skip_coeff &&
m->mbmi.mode <= I8X8_PRED) {
#if CONFIG_SB8X8
m->mbmi.mode != I4X4_PRED
#else
m->mbmi.mode <= I8X8_PRED
#endif
) {
#if CONFIG_SB8X8
const int allow_16x16 = m->mbmi.sb_type >= BLOCK_SIZE_MB16X16;
#else
const int allow_16x16 = m->mbmi.mode != I8X8_PRED;
#endif
const int allow_32x32 = m->mbmi.sb_type >= BLOCK_SIZE_SB32X32;
m->mbmi.txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
} else if (cm->txfm_mode >= ALLOW_32X32 &&
@ -767,19 +781,29 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->uv_mode = DC_PRED;
switch (mbmi->mode) {
case SPLITMV: {
#if CONFIG_SB8X8
const int num_p = 4;
#else
const int s = treed_read(r, vp9_mbsplit_tree, cm->fc.mbsplit_prob);
const int num_p = vp9_mbsplit_count[s];
#endif
int j = 0;
#if !CONFIG_SB8X8
cm->fc.mbsplit_counts[s]++;
mbmi->need_to_clamp_mvs = 0;
mbmi->partitioning = s;
#endif
mbmi->need_to_clamp_mvs = 0;
do { // for each subset j
int_mv leftmv, abovemv, second_leftmv, second_abovemv;
int_mv blockmv, secondmv;
int mv_contz;
int blockmode;
#if CONFIG_SB8X8
int k = j;
#else
int k = vp9_mbsplit_offset[s][j]; // first block in subset j
#endif
leftmv.as_int = left_block_mv(xd, mi, k);
abovemv.as_int = above_block_mv(mi, k, mis);
@ -851,6 +875,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
}
*/
#if !CONFIG_SB8X8
{
/* Fill (uniform) modes, mvs of jth subset.
Must do it here because ensuing subsets can
@ -866,12 +891,12 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
fill_offset++;
} while (--fill_count);
}
#endif
} while (++j < num_p);
}
mv0->as_int = mi->bmi[15].as_mv[0].as_int;
mv1->as_int = mi->bmi[15].as_mv[1].as_int;
mv0->as_int = mi->bmi[15 >> (2 * CONFIG_SB8X8)].as_mv[0].as_int;
mv1->as_int = mi->bmi[15 >> (2 * CONFIG_SB8X8)].as_mv[1].as_int;
break; /* done with SPLITMV */
@ -957,6 +982,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
} while (++j < 16);
}
#if !CONFIG_SB8X8
if (mbmi->mode == I8X8_PRED) {
int i;
for (i = 0; i < 4; i++) {
@ -969,7 +995,9 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mi->bmi[ib + 5].as_mode.first = mode8x8;
cm->fc.i8x8_mode_counts[mode8x8]++;
}
} else {
} else
#endif
{
mbmi->uv_mode = read_uv_mode(r, cm->fc.uv_mode_prob[mbmi->mode]);
cm->fc.uv_mode_counts[mbmi->mode][mbmi->uv_mode]++;
}
@ -980,23 +1008,44 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
*/
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= I8X8_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && !(mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4)))) {
((mbmi->ref_frame == INTRA_FRAME &&
#if CONFIG_SB8X8
mbmi->mode != I4X4_PRED
#else
mbmi->mode <= I8X8_PRED
#endif
) ||
(mbmi->ref_frame != INTRA_FRAME &&
#if CONFIG_SB8X8
mbmi->mode != SPLITMV
#else
!(mbmi->mode == SPLITMV && mbmi->partitioning == PARTITIONING_4X4)
#endif
))) {
#if CONFIG_SB8X8
const int allow_16x16 = mbmi->sb_type >= BLOCK_SIZE_MB16X16;
#else
const int allow_16x16 = mbmi->mode != I8X8_PRED && mbmi->mode != SPLITMV;
#endif
const int allow_32x32 = mbmi->sb_type >= BLOCK_SIZE_SB32X32;
mbmi->txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
} else if (mbmi->sb_type >= BLOCK_SIZE_SB32X32 &&
cm->txfm_mode >= ALLOW_32X32) {
mbmi->txfm_size = TX_32X32;
} else if (cm->txfm_mode >= ALLOW_16X16 &&
#if CONFIG_SB8X8
mbmi->sb_type >= BLOCK_SIZE_MB16X16 &&
#endif
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 &&
(!(mbmi->ref_frame == INTRA_FRAME && mbmi->mode == I4X4_PRED) &&
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4))) {
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV
#if !CONFIG_SB8X8
&& mbmi->partitioning == PARTITIONING_4X4
#endif
))) {
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;

Просмотреть файл

@ -203,6 +203,7 @@ static void mb_init_dequantizer(VP9_COMMON *pc, MACROBLOCKD *xd) {
xd->plane[i].dequant = pc->uv_dequant[xd->q_index];
}
#if !CONFIG_SB8X8
static void decode_16x16(MACROBLOCKD *xd) {
const TX_TYPE tx_type = get_tx_type_16x16(xd, 0);
@ -283,6 +284,7 @@ static void decode_8x8(MACROBLOCKD *xd) {
xd->plane[1].dst.stride, xd->plane[2].eobs[0]);
}
}
#endif
static INLINE void dequant_add_y(MACROBLOCKD *xd, TX_TYPE tx_type, int idx) {
struct macroblockd_plane *const y = &xd->plane[0];
@ -298,6 +300,7 @@ static INLINE void dequant_add_y(MACROBLOCKD *xd, TX_TYPE tx_type, int idx) {
}
}
#if !CONFIG_SB8X8
static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
TX_TYPE tx_type;
int i = 0;
@ -353,6 +356,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
xd->plane[1].dst.stride, xd->plane[2].eobs);
}
}
#endif
static int txfrm_block_to_raster_block(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE bsize,
@ -520,6 +524,7 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mi_row, int mi_col,
}
}
#if !CONFIG_SB8X8
// TODO(jingning): Need to merge SB and MB decoding. The MB decoding currently
// couples special handles on I8x8, B_PRED, and splitmv modes.
static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
@ -622,6 +627,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
#endif
}
#endif
static int get_delta_q(vp9_reader *r, int *dq) {
const int old_value = *dq;
@ -708,6 +714,9 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_decode_mb_mode_mv(pbi, xd, mi_row, mi_col, r);
set_refs(pbi, mi_row, mi_col);
#if CONFIG_SB8X8
decode_sb(pbi, xd, mi_row, mi_col, r, bsize);
#else
// TODO(jingning): merge decode_sb_ and decode_mb_
if (bsize > BLOCK_SIZE_MB16X16) {
decode_sb(pbi, xd, mi_row, mi_col, r, bsize);
@ -724,6 +733,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
// and SPLITMV of 8x8, 16x8, and 8x16. To be migrated into decode_sb.
decode_mb(pbi, xd, mi_row, mi_col, r);
}
#endif
xd->corrupted |= vp9_reader_has_error(r);
}
@ -1142,9 +1152,13 @@ static void update_frame_context(FRAME_CONTEXT *fc) {
vp9_copy(fc->pre_sb_ymode_prob, fc->sb_ymode_prob);
vp9_copy(fc->pre_uv_mode_prob, fc->uv_mode_prob);
vp9_copy(fc->pre_bmode_prob, fc->bmode_prob);
#if !CONFIG_SB8X8
vp9_copy(fc->pre_i8x8_mode_prob, fc->i8x8_mode_prob);
#endif
vp9_copy(fc->pre_sub_mv_ref_prob, fc->sub_mv_ref_prob);
#if !CONFIG_SB8X8
vp9_copy(fc->pre_mbsplit_prob, fc->mbsplit_prob);
#endif
vp9_copy(fc->pre_partition_prob, fc->partition_prob);
fc->pre_nmvc = fc->nmvc;
@ -1157,9 +1171,13 @@ static void update_frame_context(FRAME_CONTEXT *fc) {
vp9_zero(fc->sb_ymode_counts);
vp9_zero(fc->uv_mode_counts);
vp9_zero(fc->bmode_counts);
#if !CONFIG_SB8X8
vp9_zero(fc->i8x8_mode_counts);
#endif
vp9_zero(fc->sub_mv_ref_counts);
#if !CONFIG_SB8X8
vp9_zero(fc->mbsplit_counts);
#endif
vp9_zero(fc->NMVcount);
vp9_zero(fc->mv_ref_ct);
vp9_zero(fc->partition_counts);

Просмотреть файл

@ -281,9 +281,11 @@ static void sb_kfwrite_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_uv_mode_tree, p, vp9_sb_kf_ymode_encodings + m);
}
#if !CONFIG_SB8X8
static void write_i8x8_mode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_i8x8_mode_tree, p, vp9_i8x8_mode_encodings + m);
}
#endif
static void write_uv_mode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_uv_mode_tree, p, vp9_uv_mode_encodings + m);
@ -302,9 +304,11 @@ static void write_kf_bmode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_kf_bmode_tree, p, vp9_kf_bmode_encodings + m);
}
#if !CONFIG_SB8X8
static void write_split(vp9_writer *bc, int x, const vp9_prob *p) {
write_token(bc, vp9_mbsplit_tree, p, vp9_mbsplit_encodings + x);
}
#endif
static int prob_update_savings(const unsigned int *ct,
const vp9_prob oldp, const vp9_prob newp,
@ -728,8 +732,9 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
do {
write_bmode(bc, m->bmi[j].as_mode.first,
pc->fc.bmode_prob);
} while (++j < 16);
} while (++j < (16 >> (CONFIG_SB8X8 * 2)));
}
#if !CONFIG_SB8X8
if (mode == I8X8_PRED) {
write_i8x8_mode(bc, m->bmi[0].as_mode.first,
pc->fc.i8x8_mode_prob);
@ -739,7 +744,9 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
pc->fc.i8x8_mode_prob);
write_i8x8_mode(bc, m->bmi[10].as_mode.first,
pc->fc.i8x8_mode_prob);
} else {
} else
#endif
{
write_uv_mode(bc, mi->uv_mode,
pc->fc.uv_mode_prob[mode]);
}
@ -824,25 +831,33 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
++count_mb_seg[mi->partitioning];
#endif
#if !CONFIG_SB8X8
write_split(bc, mi->partitioning, cpi->common.fc.mbsplit_prob);
cpi->mbsplit_count[mi->partitioning]++;
#endif
do {
B_PREDICTION_MODE blockmode;
int_mv blockmv;
#if !CONFIG_SB8X8
const int *const L = vp9_mbsplits[mi->partitioning];
#endif
int k = -1; /* first block in subset j */
int mv_contz;
int_mv leftmv, abovemv;
blockmode = cpi->mb.partition_info->bmi[j].mode;
blockmv = cpi->mb.partition_info->bmi[j].mv;
#if CONFIG_SB8X8
k = j;
#else
#if CONFIG_DEBUG
while (j != L[++k])
if (k >= 16)
assert(0);
#else
while (j != L[++k]);
#endif
#endif
leftmv.as_int = left_block_mv(xd, m, k);
abovemv.as_int = above_block_mv(m, k, mis);
@ -875,6 +890,22 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
}
}
#if CONFIG_SB8X8
if (((rf == INTRA_FRAME && mode != I4X4_PRED) ||
(rf != INTRA_FRAME && mode != SPLITMV)) &&
pc->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id,
SEG_LVL_SKIP))) {
TX_SIZE sz = mi->txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
if (mi->sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
if (mi->sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
}
}
#else
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
(rf != INTRA_FRAME && !(mode == SPLITMV &&
mi->partitioning == PARTITIONING_4X4))) &&
@ -890,6 +921,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
}
}
#endif
}
static void write_mb_modes_kf(const VP9_COMP *cpi,
@ -930,8 +962,9 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
#endif
write_kf_bmode(bc, bm, c->kf_bmode_prob[a][l]);
} while (++i < 16);
} while (++i < (16 >> (CONFIG_SB8X8 * 2)));
}
#if !CONFIG_SB8X8
if (ym == I8X8_PRED) {
write_i8x8_mode(bc, m->bmi[0].as_mode.first, c->fc.i8x8_mode_prob);
// printf(" mode: %d\n", m->bmi[0].as_mode.first); fflush(stdout);
@ -942,8 +975,22 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
write_i8x8_mode(bc, m->bmi[10].as_mode.first, c->fc.i8x8_mode_prob);
// printf(" mode: %d\n", m->bmi[10].as_mode.first); fflush(stdout);
} else
#endif
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
#if CONFIG_SB8X8
if (ym != I4X4_PRED && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
TX_SIZE sz = m->mbmi.txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
if (m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
if (m->mbmi.sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
}
}
#else
if (ym <= I8X8_PRED && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
TX_SIZE sz = m->mbmi.txfm_size;
@ -955,6 +1002,7 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
}
}
#endif
}
@ -2153,15 +2201,19 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
vp9_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
vp9_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
vp9_copy(cpi->common.fc.pre_sub_mv_ref_prob, cpi->common.fc.sub_mv_ref_prob);
#if !CONFIG_SB8X8
vp9_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
vp9_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
#endif
vp9_copy(cpi->common.fc.pre_partition_prob, cpi->common.fc.partition_prob);
cpi->common.fc.pre_nmvc = cpi->common.fc.nmvc;
#if CONFIG_COMP_INTERINTRA_PRED
cpi->common.fc.pre_interintra_prob = cpi->common.fc.interintra_prob;
#endif
vp9_zero(cpi->sub_mv_ref_count);
#if !CONFIG_SB8X8
vp9_zero(cpi->mbsplit_count);
#endif
vp9_zero(cpi->common.fc.mv_ref_ct);
update_coef_probs(cpi, &header_bc);

Просмотреть файл

@ -117,7 +117,9 @@ struct macroblock {
int mbmode_cost[2][MB_MODE_COUNT];
int intra_uv_mode_cost[2][MB_MODE_COUNT];
int bmode_costs[VP9_KF_BINTRAMODES][VP9_KF_BINTRAMODES][VP9_KF_BINTRAMODES];
#if !CONFIG_SB8X8
int i8x8_mode_costs[MB_MODE_COUNT];
#endif
int inter_bmode_costs[B_MODE_COUNT];
int switchable_interp_costs[VP9_SWITCHABLE_FILTERS + 1]
[VP9_SWITCHABLE_FILTERS];
@ -141,6 +143,11 @@ struct macroblock {
// Structure to hold context for each of the 4 MBs within a SB:
// when encoded as 4 independent MBs:
#if CONFIG_SB8X8
PICK_MODE_CONTEXT sb8_context[4][4][4];
PICK_MODE_CONTEXT sb8x16_context[4][4][2];
PICK_MODE_CONTEXT sb16x8_context[4][4][2];
#endif
PICK_MODE_CONTEXT mb_context[4][4];
PICK_MODE_CONTEXT sb32x16_context[4][2];
PICK_MODE_CONTEXT sb16x32_context[4][2];

Просмотреть файл

@ -47,8 +47,10 @@ int enc_debug = 0;
void vp9_select_interp_filter_type(VP9_COMP *cpi);
#if !CONFIG_SB8X8
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col);
#endif
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
@ -380,6 +382,8 @@ static void update_state(VP9_COMP *cpi,
}
}
if (bsize < BLOCK_SIZE_SB32X32) {
if (bsize < BLOCK_SIZE_MB16X16)
ctx->txfm_rd_diff[ALLOW_16X16] = ctx->txfm_rd_diff[ALLOW_8X8];
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
}
@ -387,8 +391,10 @@ static void update_state(VP9_COMP *cpi,
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
mbmi->mv[0].as_int =
x->partition_info->bmi[15 >> (CONFIG_SB8X8 * 2)].mv.as_int;
mbmi->mv[1].as_int =
x->partition_info->bmi[15 >> (CONFIG_SB8X8 * 2)].second_mv.as_int;
#if CONFIG_SB8X8
vpx_memcpy(x->partition_info + mis, &ctx->partition_info,
sizeof(PARTITION_INFO));
@ -453,7 +459,9 @@ static void update_state(VP9_COMP *cpi,
THR_D27_PRED /*D27_PRED*/,
THR_D63_PRED /*D63_PRED*/,
THR_TM /*TM_PRED*/,
#if !CONFIG_SB8X8
THR_I8X8_PRED /*I8X8_PRED*/,
#endif
THR_B_PRED /*I4X4_PRED*/,
};
cpi->mode_chosen_counts[kf_mode_index[mb_mode]]++;
@ -667,6 +675,7 @@ static void set_offsets(VP9_COMP *cpi,
}
}
#if !CONFIG_SB8X8
static int pick_mb_mode(VP9_COMP *cpi,
int mi_row,
int mi_col,
@ -707,6 +716,7 @@ static int pick_mb_mode(VP9_COMP *cpi,
return splitmodes_used;
}
#endif
static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
TOKENEXTRA **tp, int *totalrate, int *totaldist,
@ -790,11 +800,15 @@ static void set_block_index(MACROBLOCKD *xd, int idx,
BLOCK_SIZE_TYPE bsize) {
if (bsize >= BLOCK_SIZE_SB32X32) {
xd->sb_index = idx;
} else {
#if CONFIG_SB8X8
assert(bsize >= BLOCK_SIZE_MB16X16);
#endif
} else if (bsize >= BLOCK_SIZE_MB16X16) {
xd->mb_index = idx;
} else {
xd->b_index = idx;
#else
} else {
xd->mb_index = idx;
#endif
}
}
@ -817,6 +831,14 @@ static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x,
return &x->sb16x32_context[xd->sb_index][xd->mb_index];
case BLOCK_SIZE_MB16X16:
return &x->mb_context[xd->sb_index][xd->mb_index];
#if CONFIG_SB8X8
case BLOCK_SIZE_SB16X8:
return &x->sb16x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X16:
return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X8:
return &x->sb8_context[xd->sb_index][xd->mb_index][xd->b_index];
#endif
default:
assert(0);
return NULL;
@ -837,12 +859,15 @@ static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp,
set_block_index(xd, sub_index, bsize);
set_offsets(cpi, mi_row, mi_col, bsize);
update_state(cpi, get_block_context(x, bsize), bsize, output_enabled);
#if !CONFIG_SB8X8
if (bsize == BLOCK_SIZE_MB16X16) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
encode_macroblock(cpi, tp, output_enabled, mi_row, mi_col);
} else {
} else
#endif
{
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
}
@ -857,22 +882,38 @@ static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp,
static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
int mi_row, int mi_col, int output_enabled,
BLOCK_SIZE_TYPE level,
BLOCK_SIZE_TYPE c1, BLOCK_SIZE_TYPE c2[4]) {
BLOCK_SIZE_TYPE c1, BLOCK_SIZE_TYPE c2[4]
#if CONFIG_SB8X8
, BLOCK_SIZE_TYPE c3[4][4]
#endif
) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int bsl = mi_width_log2(level), bs = 1 << (bsl - 1);
const int bwl = mi_width_log2(c1), bhl = mi_height_log2(c1);
int pl;
int UNINITIALIZED_IS_SAFE(pl);
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
set_partition_seg_context(cpi, mi_row, mi_col);
pl = partition_plane_context(xd, level);
#if CONFIG_SB8X8
if (level > BLOCK_SIZE_SB8X8) {
#endif
set_partition_seg_context(cpi, mi_row, mi_col);
pl = partition_plane_context(xd, level);
#if CONFIG_SB8X8
}
#endif
if (bsl == bwl && bsl == bhl) {
if (output_enabled && level > BLOCK_SIZE_MB16X16)
if (output_enabled &&
#if CONFIG_SB8X8
level > BLOCK_SIZE_SB8X8
#else
level > BLOCK_SIZE_MB16X16
#endif
)
cpi->partition_count[pl][PARTITION_NONE]++;
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
} else if (bsl == bhl && bsl > bwl) {
@ -892,9 +933,17 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
assert(bwl < bsl && bhl < bsl);
if (level == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
#if CONFIG_SB8X8
} else if (level == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(level == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
#else
} else {
assert(level == BLOCK_SIZE_SB32X32);
subsize = BLOCK_SIZE_MB16X16;
#endif
}
if (output_enabled)
@ -906,12 +955,22 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
set_block_index(xd, i, subsize);
encode_sb(cpi, tp, mi_row + y_idx * bs, mi_col + x_idx * bs,
output_enabled, subsize,
subsize == BLOCK_SIZE_MB16X16 ? c1 : c2[i], c2);
#if CONFIG_SB8X8
c2 ? c2[i] : c1, c3 ? c3[i] : NULL, NULL);
#else
c2 ? c2[i] : c1, NULL);
#endif
}
}
#if CONFIG_SB8X8
if (level > BLOCK_SIZE_SB8X8 &&
(level == BLOCK_SIZE_MB16X16 || bsl == bwl || bsl == bhl))
#else
if (level > BLOCK_SIZE_MB16X16 &&
(level == BLOCK_SIZE_SB32X32 || bsl == bwl || bsl == bhl)) {
(level == BLOCK_SIZE_SB32X32 || bsl == bwl || bsl == bhl))
#endif
{
set_partition_seg_context(cpi, mi_row, mi_col);
update_partition_context(xd, c1, level);
}
@ -934,7 +993,11 @@ static void encode_sb_row(VP9_COMP *cpi,
for (mi_col = cm->cur_tile_mi_col_start;
mi_col < cm->cur_tile_mi_col_end; mi_col += (4 << CONFIG_SB8X8)) {
int i, p;
#if CONFIG_SB8X8
BLOCK_SIZE_TYPE mb_partitioning[4][4];
#endif
BLOCK_SIZE_TYPE sb_partitioning[4];
BLOCK_SIZE_TYPE sb64_partitioning = BLOCK_SIZE_SB32X32;
int sb64_rate = 0, sb64_dist = 0;
int sb64_skip = 0;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
@ -951,6 +1014,9 @@ static void encode_sb_row(VP9_COMP *cpi,
memcpy(&seg_a, cm->above_seg_context + (mi_col >> CONFIG_SB8X8),
sizeof(seg_a));
memcpy(&seg_l, cm->left_seg_context, sizeof(seg_l));
// FIXME(rbultje): this function should probably be rewritten to be
// recursive at some point in the future.
for (i = 0; i < 4; i++) {
const int x_idx = (i & 1) << (1 + CONFIG_SB8X8);
const int y_idx = (i & 2) << CONFIG_SB8X8;
@ -985,6 +1051,10 @@ static void encode_sb_row(VP9_COMP *cpi,
const int x_idx_m = x_idx + ((j & 1) << CONFIG_SB8X8);
const int y_idx_m = y_idx + ((j >> 1) << CONFIG_SB8X8);
int r, d;
#if CONFIG_SB8X8
int r2, d2, mb16_rate = 0, mb16_dist = 0, k;
ENTROPY_CONTEXT l3[4 * MAX_MB_PLANE], a3[4 * MAX_MB_PLANE];
#endif
if (mi_row + y_idx_m >= cm->mi_rows ||
mi_col + x_idx_m >= cm->mi_cols) {
@ -995,18 +1065,175 @@ static void encode_sb_row(VP9_COMP *cpi,
// Index of the MB in the SB 0..3
xd->mb_index = j;
#if CONFIG_SB8X8
for (p = 0; p < MAX_MB_PLANE; p++) {
vpx_memcpy(l3 + 4 * p,
cm->left_context[p] +
(y_idx_m * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_y)),
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_y);
vpx_memcpy(a3 + 4 * p,
cm->above_context[p] +
((mi_col + x_idx_m) * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_x)),
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_x);
}
mb_partitioning[i][j] = BLOCK_SIZE_SB8X8;
for (k = 0; k < 4; k++) {
xd->b_index = k;
// try 8x8 coding
pick_sb_modes(cpi, mi_row + y_idx_m + (k & 1),
mi_col + x_idx_m + (k >> 1),
tp, &r, &d, BLOCK_SIZE_SB8X8,
&x->sb8_context[xd->sb_index][xd->mb_index]
[xd->b_index]);
mb16_rate += r;
mb16_dist += d;
update_state(cpi, &x->sb8_context[xd->sb_index][xd->mb_index]
[xd->b_index],
BLOCK_SIZE_SB8X8, 0);
encode_superblock(cpi, tp,
0, mi_row + y_idx_m, mi_col + x_idx_m,
BLOCK_SIZE_SB8X8);
}
set_partition_seg_context(cpi, mi_row + y_idx_m, mi_col + x_idx_m);
pl = partition_plane_context(xd, BLOCK_SIZE_MB16X16);
mb16_rate += x->partition_cost[pl][PARTITION_SPLIT];
for (p = 0; p < MAX_MB_PLANE; p++) {
vpx_memcpy(cm->left_context[p] +
(y_idx_m * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_y)),
l3 + 4 * p,
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_y);
vpx_memcpy(cm->above_context[p] +
((mi_col + x_idx_m) * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_x)),
a3 + 4 * p,
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_x);
}
// try 8x16 coding
r2 = 0;
d2 = 0;
xd->b_index = 0;
pick_sb_modes(cpi, mi_row + y_idx_m, mi_col + x_idx_m,
tp, &r, &d, BLOCK_SIZE_SB8X16,
&x->sb8x16_context[xd->sb_index][xd->mb_index]
[xd->b_index]);
r2 += r;
d2 += d;
update_state(cpi, &x->sb8x16_context[xd->sb_index][xd->mb_index]
[xd->b_index],
BLOCK_SIZE_SB8X16, 0);
encode_superblock(cpi, tp,
0, mi_row + y_idx_m, mi_col + x_idx_m,
BLOCK_SIZE_SB8X16);
xd->b_index = 1;
pick_sb_modes(cpi, mi_row + y_idx_m, mi_col + x_idx_m + 1,
tp, &r, &d, BLOCK_SIZE_SB8X16,
&x->sb8x16_context[xd->sb_index][xd->mb_index]
[xd->b_index]);
r2 += r;
d2 += d;
set_partition_seg_context(cpi, mi_row + y_idx_m, mi_col + x_idx_m);
pl = partition_plane_context(xd, BLOCK_SIZE_MB16X16);
r2 += x->partition_cost[pl][PARTITION_VERT];
if (RDCOST(x->rdmult, x->rddiv, r2, d2) <
RDCOST(x->rdmult, x->rddiv, mb16_rate, mb16_dist)) {
mb16_rate = r;
mb16_dist = d;
mb_partitioning[i][j] = BLOCK_SIZE_SB8X16;
}
for (p = 0; p < MAX_MB_PLANE; p++) {
vpx_memcpy(cm->left_context[p] +
(y_idx_m * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_y)),
l3 + 4 * p,
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_y);
vpx_memcpy(cm->above_context[p] +
((mi_col + x_idx_m) * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_x)),
a3 + 4 * p,
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_x);
}
// try 16x8 coding
r2 = 0;
d2 = 0;
xd->b_index = 0;
pick_sb_modes(cpi, mi_row + y_idx_m, mi_col + x_idx_m,
tp, &r, &d, BLOCK_SIZE_SB16X8,
&x->sb16x8_context[xd->sb_index][xd->mb_index]
[xd->b_index]);
r2 += r;
d2 += d;
update_state(cpi, &x->sb16x8_context[xd->sb_index][xd->mb_index]
[xd->b_index],
BLOCK_SIZE_SB16X8, 0);
encode_superblock(cpi, tp,
0, mi_row + y_idx_m, mi_col + x_idx_m,
BLOCK_SIZE_SB16X8);
xd->b_index = 1;
pick_sb_modes(cpi, mi_row + y_idx_m + 1, mi_col + x_idx_m,
tp, &r, &d, BLOCK_SIZE_SB16X8,
&x->sb16x8_context[xd->sb_index][xd->mb_index]
[xd->b_index]);
r2 += r;
d2 += d;
set_partition_seg_context(cpi, mi_row + y_idx_m, mi_col + x_idx_m);
pl = partition_plane_context(xd, BLOCK_SIZE_MB16X16);
r2 += x->partition_cost[pl][PARTITION_HORZ];
if (RDCOST(x->rdmult, x->rddiv, r2, d2) <
RDCOST(x->rdmult, x->rddiv, mb16_rate, mb16_dist)) {
mb16_rate = r;
mb16_dist = d;
mb_partitioning[i][j] = BLOCK_SIZE_SB16X8;
}
for (p = 0; p < MAX_MB_PLANE; p++) {
vpx_memcpy(cm->left_context[p] +
(y_idx_m * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_y)),
l3 + 4 * p,
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_y);
vpx_memcpy(cm->above_context[p] +
((mi_col + x_idx_m) * 4 >> (CONFIG_SB8X8 +
xd->plane[p].subsampling_x)),
a3 + 4 * p,
sizeof(ENTROPY_CONTEXT) * 4 >> xd->plane[p].subsampling_x);
}
// try as 16x16
pick_sb_modes(cpi, mi_row + y_idx_m, mi_col + x_idx_m,
tp, &r, &d, BLOCK_SIZE_MB16X16,
&x->mb_context[xd->sb_index][xd->mb_index]);
set_partition_seg_context(cpi, mi_row + y_idx_m, mi_col + x_idx_m);
pl = partition_plane_context(xd, BLOCK_SIZE_MB16X16);
r += x->partition_cost[pl][PARTITION_NONE];
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, mb16_rate, mb16_dist)) {
mb16_rate = r;
mb16_dist = d;
mb_partitioning[i][j] = BLOCK_SIZE_MB16X16;
}
sb32_rate += mb16_rate;
sb32_dist += mb16_dist;
#else
splitmodes_used += pick_mb_mode(cpi, mi_row + y_idx_m,
mi_col + x_idx_m, tp, &r, &d);
sb32_rate += r;
sb32_dist += d;
#endif
// Dummy encode, do not do the tokenization
#if CONFIG_SB8X8
update_state(cpi, &x->mb_context[xd->sb_index][xd->mb_index],
BLOCK_SIZE_MB16X16, 0);
#endif
encode_sb(cpi, tp, mi_row + y_idx, mi_col + x_idx, 0,
BLOCK_SIZE_MB16X16, mb_partitioning[i][j], NULL, NULL);
#else
encode_macroblock(cpi, tp, 0, mi_row + y_idx_m,
mi_col + x_idx_m);
#endif
}
/* Restore L & A coding context to those in place on entry */
@ -1170,7 +1397,12 @@ static void encode_sb_row(VP9_COMP *cpi,
// instead of small->big) means we can use as threshold for small, which
// may enable breakouts if RD is not good enough (i.e. faster)
encode_sb(cpi, tp, mi_row + y_idx, mi_col + x_idx, 0,
BLOCK_SIZE_SB32X32, sb_partitioning[i], sb_partitioning);
#if CONFIG_SB8X8
BLOCK_SIZE_SB32X32, sb_partitioning[i], mb_partitioning[i],
NULL);
#else
BLOCK_SIZE_SB32X32, sb_partitioning[i], NULL);
#endif
}
for (p = 0; p < MAX_MB_PLANE; p++) {
@ -1221,7 +1453,7 @@ static void encode_sb_row(VP9_COMP *cpi,
RDCOST(x->rdmult, x->rddiv, sb64_rate, sb64_dist)) {
sb64_rate = r;
sb64_dist = d;
sb_partitioning[0] = BLOCK_SIZE_SB64X32;
sb64_partitioning = BLOCK_SIZE_SB64X32;
}
for (p = 0; p < MAX_MB_PLANE; p++) {
@ -1266,7 +1498,7 @@ static void encode_sb_row(VP9_COMP *cpi,
RDCOST(x->rdmult, x->rddiv, sb64_rate, sb64_dist)) {
sb64_rate = r;
sb64_dist = d;
sb_partitioning[0] = BLOCK_SIZE_SB32X64;
sb64_partitioning = BLOCK_SIZE_SB32X64;
}
for (p = 0; p < MAX_MB_PLANE; p++) {
@ -1295,13 +1527,17 @@ static void encode_sb_row(VP9_COMP *cpi,
RDCOST(x->rdmult, x->rddiv, sb64_rate, sb64_dist)) {
sb64_rate = r;
sb64_dist = d;
sb_partitioning[0] = BLOCK_SIZE_SB64X64;
sb64_partitioning = BLOCK_SIZE_SB64X64;
}
}
assert(tp_orig == *tp);
encode_sb(cpi, tp, mi_row, mi_col, 1,
BLOCK_SIZE_SB64X64, sb_partitioning[0], sb_partitioning);
encode_sb(cpi, tp, mi_row, mi_col, 1, BLOCK_SIZE_SB64X64,
#if CONFIG_SB8X8
sb64_partitioning, sb_partitioning, mb_partitioning);
#else
sb64_partitioning, sb_partitioning);
#endif
assert(tp_orig < *tp);
}
}
@ -1346,10 +1582,14 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
vp9_zero(cpi->count_mb_ref_frame_usage)
vp9_zero(cpi->bmode_count)
vp9_zero(cpi->ymode_count)
#if !CONFIG_SB8X8
vp9_zero(cpi->i8x8_mode_count)
#endif
vp9_zero(cpi->y_uv_mode_count)
vp9_zero(cpi->sub_mv_ref_count)
#if !CONFIG_SB8X8
vp9_zero(cpi->mbsplit_count)
#endif
vp9_zero(cpi->common.fc.mv_ref_ct)
vp9_zero(cpi->sb_ymode_count)
vp9_zero(cpi->partition_count);
@ -1616,9 +1856,17 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
assert(bwl < bsl && bhl < bsl);
if (bsize == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
#if CONFIG_SB8X8
} else if (bsize == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(bsize == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
#else
} else {
assert(bsize == BLOCK_SIZE_SB32X32);
subsize = BLOCK_SIZE_MB16X16;
#endif
}
for (n = 0; n < 4; n++) {
@ -1823,15 +2071,17 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
do {
++ bct[xd->block[b].bmi.as_mode.first];
} while (++b < 16);
} while (++b < (16 >> (CONFIG_SB8X8 * 2)));
}
#if !CONFIG_SB8X8
if (m == I8X8_PRED) {
i8x8_modes[xd->block[0].bmi.as_mode.first]++;
i8x8_modes[xd->block[2].bmi.as_mode.first]++;
i8x8_modes[xd->block[8].bmi.as_mode.first]++;
i8x8_modes[xd->block[10].bmi.as_mode.first]++;
}
#endif
#endif
if (xd->mode_info_context->mbmi.sb_type > BLOCK_SIZE_MB16X16) {
@ -1839,14 +2089,18 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
} else {
++cpi->ymode_count[m];
}
#if !CONFIG_SB8X8
if (m != I8X8_PRED)
#endif
++cpi->y_uv_mode_count[m][uvm];
#if !CONFIG_SB8X8
else {
cpi->i8x8_mode_count[xd->mode_info_context->bmi[0].as_mode.first]++;
cpi->i8x8_mode_count[xd->mode_info_context->bmi[2].as_mode.first]++;
cpi->i8x8_mode_count[xd->mode_info_context->bmi[8].as_mode.first]++;
cpi->i8x8_mode_count[xd->mode_info_context->bmi[10].as_mode.first]++;
}
#endif
if (m == I4X4_PRED) {
int b = 0;
do {
@ -1855,7 +2109,7 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
if (m == B_CONTEXT_PRED) m -= CONTEXT_PRED_REPLACEMENTS;
#endif
++cpi->bmode_count[m];
} while (++b < 16);
} while (++b < (16 >> (CONFIG_SB8X8 * 2)));
}
}
@ -1880,6 +2134,7 @@ static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
#endif
}
#if !CONFIG_SB8X8
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled,
int mi_row, int mi_col) {
@ -2127,6 +2382,7 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
}
}
#endif
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
@ -2177,6 +2433,24 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_update_zbin_extra(cpi, x);
}
#if CONFIG_SB8X8
if (xd->mode_info_context->mbmi.mode == I4X4_PRED) {
assert(bsize == BLOCK_SIZE_SB8X8 &&
xd->mode_info_context->mbmi.txfm_size == TX_4X4);
vp9_encode_intra4x4mby(x, bsize);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize);
vp9_subtract_sbuv(x, bsize);
vp9_transform_sbuv_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
vp9_optimize_sbuv_4x4(cm, x, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
vp9_recon_sbuv(xd, bsize);
if (output_enabled)
sum_intra_stats(cpi, x);
} else
#endif
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sby_s(&x->e_mbd, bsize);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize);
@ -2212,6 +2486,12 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
}
#if CONFIG_SB8X8
if (xd->mode_info_context->mbmi.mode == I4X4_PRED) {
assert(bsize == BLOCK_SIZE_SB8X8);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, bsize);
} else
#endif
if (!x->skip) {
vp9_subtract_sb(x, bsize);
@ -2264,15 +2544,23 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
break;
case TX_8X8:
vp9_transform_sby_8x8(x, bsize);
vp9_transform_sbuv_8x8(x, bsize);
vp9_quantize_sby_8x8(x, bsize);
vp9_quantize_sbuv_8x8(x, bsize);
if (x->optimize) {
if (x->optimize)
vp9_optimize_sby_8x8(cm, x, bsize);
vp9_optimize_sbuv_8x8(cm, x, bsize);
}
vp9_inverse_transform_sby_8x8(xd, bsize);
vp9_inverse_transform_sbuv_8x8(xd, bsize);
if (bsize >= BLOCK_SIZE_MB16X16) {
vp9_transform_sbuv_8x8(x, bsize);
vp9_quantize_sbuv_8x8(x, bsize);
if (x->optimize)
vp9_optimize_sbuv_8x8(cm, x, bsize);
vp9_inverse_transform_sbuv_8x8(xd, bsize);
} else {
vp9_transform_sbuv_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
if (x->optimize)
vp9_optimize_sbuv_4x4(cm, x, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
}
break;
case TX_4X4:
vp9_transform_sby_4x4(x, bsize);
@ -2315,8 +2603,10 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
if (bsize >= BLOCK_SIZE_SB32X32) {
cpi->txfm_count_32x32p[mi->mbmi.txfm_size]++;
} else {
} else if (bsize >= BLOCK_SIZE_MB16X16) {
cpi->txfm_count_16x16p[mi->mbmi.txfm_size]++;
} else {
cpi->txfm_count_8x8p[mi->mbmi.txfm_size]++;
}
} else {
int x, y;
@ -2324,6 +2614,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (sz == TX_32X32 && bsize < BLOCK_SIZE_SB32X32)
sz = TX_16X16;
if (sz == TX_16X16 && bsize < BLOCK_SIZE_MB16X16)
sz = TX_8X8;
for (y = 0; y < bh; y++) {
for (x = 0; x < bw; x++) {

Просмотреть файл

@ -22,12 +22,15 @@ int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
(void) cpi;
#if !CONFIG_SB8X8
if (use_16x16_pred) {
#endif
mbmi->mode = DC_PRED;
mbmi->uv_mode = DC_PRED;
mbmi->ref_frame = INTRA_FRAME;
vp9_encode_intra16x16mby(&cpi->common, x);
#if !CONFIG_SB8X8
} else {
int i;
@ -36,6 +39,7 @@ int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
encode_intra4x4block(x, i, BLOCK_SIZE_MB16X16);
}
}
#endif
return vp9_get_mb_ss(x->plane[0].src_diff);
}
@ -58,7 +62,7 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib,
xd->plane[0].diff);
int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff, ib, 16);
assert(ib < 16);
assert(ib < (16 >> (2 * CONFIG_SB8X8)));
#if CONFIG_NEWBINTRAMODES
xd->mode_info_context->bmi[ib].as_mode.context =
@ -68,22 +72,22 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib,
vp9_intra4x4_predict(&x->e_mbd, ib,
xd->mode_info_context->bmi[ib].as_mode.first,
dst, xd->plane[0].dst.stride);
vp9_subtract_block(4, 4, src_diff, 16,
vp9_subtract_block(4, 4, src_diff, 16 >> CONFIG_SB8X8,
src, x->plane[0].src.stride,
dst, xd->plane[0].dst.stride);
tx_type = get_tx_type_4x4(&x->e_mbd, ib);
if (tx_type != DCT_DCT) {
vp9_short_fht4x4(src_diff, coeff, 16, tx_type);
vp9_short_fht4x4(src_diff, coeff, 16 >> CONFIG_SB8X8, tx_type);
x->quantize_b_4x4(x, ib, tx_type, 16);
vp9_short_iht4x4(BLOCK_OFFSET(xd->plane[0].dqcoeff, ib, 16),
diff, 16, tx_type);
diff, 16 >> CONFIG_SB8X8, tx_type);
} else {
x->fwd_txm4x4(src_diff, coeff, 32);
x->fwd_txm4x4(src_diff, coeff, 32 >> CONFIG_SB8X8);
x->quantize_b_4x4(x, ib, tx_type, 16);
vp9_inverse_transform_b_4x4(&x->e_mbd, xd->plane[0].eobs[ib],
BLOCK_OFFSET(xd->plane[0].dqcoeff, ib, 16),
diff, 32);
diff, 32 >> CONFIG_SB8X8);
}
vp9_recon_b(dst, diff, dst, xd->plane[0].dst.stride);
@ -159,6 +163,7 @@ void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
vp9_recon_sbuv(xd, BLOCK_SIZE_MB16X16);
}
#if !CONFIG_SB8X8
void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
MACROBLOCKD *xd = &x->e_mbd;
uint8_t* const src =
@ -304,3 +309,4 @@ void vp9_encode_intra8x8mbuv(MACROBLOCK *x) {
encode_intra_uv4x4(x, i + 20, mode); // v
}
}
#endif

Просмотреть файл

@ -17,8 +17,10 @@ int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred);
void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_encode_intra4x4mby(MACROBLOCK *mb, BLOCK_SIZE_TYPE bs);
#if !CONFIG_SB8X8
void vp9_encode_intra8x8mby(MACROBLOCK *x);
void vp9_encode_intra8x8mbuv(MACROBLOCK *x);
void vp9_encode_intra8x8(MACROBLOCK *x, int ib);
#endif
#endif // VP9_ENCODER_VP9_ENCODEINTRA_H_

Просмотреть файл

@ -677,6 +677,7 @@ void vp9_optimize_sbuv_4x4(VP9_COMMON *const cm, MACROBLOCK *x,
}
}
#if !CONFIG_SB8X8
void vp9_fidct_mb(VP9_COMMON *const cm, MACROBLOCK *x) {
MACROBLOCKD *const xd = &x->e_mbd;
const TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
@ -735,6 +736,7 @@ void vp9_encode_inter16x16(VP9_COMMON *const cm, MACROBLOCK *x,
vp9_fidct_mb(cm, x);
vp9_recon_sb(xd, BLOCK_SIZE_MB16X16);
}
#endif
/* this function is used by first pass only */
void vp9_encode_inter16x16y(MACROBLOCK *x, int mi_row, int mi_col) {

Просмотреть файл

@ -24,8 +24,10 @@ typedef struct {
struct VP9_ENCODER_RTCD;
#if !CONFIG_SB8X8
void vp9_encode_inter16x16(VP9_COMMON *const cm, MACROBLOCK *x,
int mb_row, int mb_col);
#endif
void vp9_encode_inter16x16y(MACROBLOCK *x, int mb_row, int mb_col);
@ -54,7 +56,9 @@ void vp9_transform_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_4x4(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
#if !CONFIG_SB8X8
void vp9_fidct_mb(VP9_COMMON *const cm, MACROBLOCK *x);
#endif
void vp9_subtract_block(int rows, int cols,
int16_t *diff_ptr, int diff_stride,

Просмотреть файл

@ -41,8 +41,10 @@ void vp9_init_mode_costs(VP9_COMP *c) {
x->fc.uv_mode_prob[VP9_YMODES - 1], vp9_uv_mode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
x->kf_uv_mode_prob[VP9_YMODES - 1], vp9_uv_mode_tree);
#if !CONFIG_SB8X8
vp9_cost_tokens(c->mb.i8x8_mode_costs,
x->fc.i8x8_mode_prob, vp9_i8x8_mode_tree);
#endif
for (i = 0; i <= VP9_SWITCHABLE_FILTERS; ++i)
vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],

Просмотреть файл

@ -628,7 +628,9 @@ static void set_rd_speed_thresholds(VP9_COMP *cpi, int mode, int speed) {
sf->thresh_mult[THR_D63_PRED ] += speed_multiplier * 1500;
sf->thresh_mult[THR_B_PRED ] += speed_multiplier * 2500;
#if !CONFIG_SB8X8
sf->thresh_mult[THR_I8X8_PRED] += speed_multiplier * 2500;
#endif
sf->thresh_mult[THR_NEWMV ] += speed_multiplier * 1000;
sf->thresh_mult[THR_NEWG ] += speed_multiplier * 1000;
@ -3326,9 +3328,13 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
vp9_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
vp9_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
vp9_copy(cpi->common.fc.bmode_counts, cpi->bmode_count);
#if !CONFIG_SB8X8
vp9_copy(cpi->common.fc.i8x8_mode_counts, cpi->i8x8_mode_count);
#endif
vp9_copy(cpi->common.fc.sub_mv_ref_counts, cpi->sub_mv_ref_count);
#if !CONFIG_SB8X8
vp9_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
#endif
vp9_copy(cpi->common.fc.partition_counts, cpi->partition_count);
#if CONFIG_COMP_INTERINTRA_PRED
vp9_copy(cpi->common.fc.interintra_counts, cpi->interintra_count);

Просмотреть файл

@ -48,9 +48,9 @@
#define KEY_FRAME_CONTEXT 5
#if CONFIG_COMP_INTERINTRA_PRED
#define MAX_MODES 54
#define MAX_MODES 54 - CONFIG_SB8X8
#else
#define MAX_MODES 42
#define MAX_MODES 42 - CONFIG_SB8X8
#endif
#define MIN_THRESHMULT 32
@ -72,7 +72,9 @@ typedef struct {
// Stats
int y_modes[VP9_YMODES];
int uv_modes[VP9_UV_MODES];
#if !CONFIG_SB8X8
int i8x8_modes[VP9_I8X8_MODES];
#endif
int b_modes[B_MODE_COUNT];
int inter_y_modes[MB_MODE_COUNT];
int inter_uv_modes[VP9_UV_MODES];
@ -100,9 +102,13 @@ typedef struct {
vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
vp9_prob uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
vp9_prob bmode_prob[VP9_NKF_BINTRAMODES - 1];
#if !CONFIG_SB8X8
vp9_prob i8x8_mode_prob[VP9_I8X8_MODES - 1];
#endif
vp9_prob sub_mv_ref_prob[SUBMVREF_COUNT][VP9_SUBMVREFS - 1];
#if !CONFIG_SB8X8
vp9_prob mbsplit_prob[VP9_NUMMBSPLITS - 1];
#endif
vp9_prob partition_prob[NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
@ -207,7 +213,9 @@ typedef enum {
THR_SPLITA,
THR_B_PRED,
#if !CONFIG_SB8X8
THR_I8X8_PRED,
#endif
THR_COMP_ZEROLG,
THR_COMP_NEARESTLG,
@ -273,10 +281,17 @@ typedef struct {
} SPEED_FEATURES;
enum BlockSize {
#if CONFIG_SB8X8
BLOCK_4X4,
BLOCK_8X8,
BLOCK_8X16,
BLOCK_16X8,
#else
BLOCK_16X8 = PARTITIONING_16X8,
BLOCK_8X16 = PARTITIONING_8X16,
BLOCK_8X8 = PARTITIONING_8X8,
BLOCK_4X4 = PARTITIONING_4X4,
#endif
BLOCK_16X16,
BLOCK_MAX_SEGMENTS,
BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
@ -451,9 +466,13 @@ typedef struct VP9_COMP {
int sb_ymode_count [VP9_I32X32_MODES];
int ymode_count[VP9_YMODES]; /* intra MB type cts this frame */
int bmode_count[VP9_NKF_BINTRAMODES];
#if !CONFIG_SB8X8
int i8x8_mode_count[VP9_I8X8_MODES];
#endif
int sub_mv_ref_count[SUBMVREF_COUNT][VP9_SUBMVREFS];
#if !CONFIG_SB8X8
int mbsplit_count[VP9_NUMMBSPLITS];
#endif
int y_uv_mode_count[VP9_YMODES][VP9_UV_MODES];
unsigned int partition_count[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
#if CONFIG_COMP_INTERINTRA_PRED

Просмотреть файл

@ -223,9 +223,9 @@ void vp9_quantize_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
}
void vp9_quantize_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2;
const int bhl = b_height_log2(bsize) - 2;
const int uoff = 16 << (bhl + bwl);
const int bwl = b_width_log2(bsize) - 1;
const int bhl = b_height_log2(bsize) - 1;
const int uoff = 4 << (bhl + bwl);
int i;
for (i = uoff; i < ((uoff * 3) >> 1); i += 4)
@ -233,9 +233,9 @@ void vp9_quantize_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
}
void vp9_quantize_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2;
const int bhl = b_height_log2(bsize) - 2;
const int uoff = 16 << (bhl + bwl);
const int bwl = b_width_log2(bsize);
const int bhl = b_height_log2(bsize);
const int uoff = 1 << (bhl + bwl);
int i;
for (i = uoff; i < ((uoff * 3) >> 1); i++)

Просмотреть файл

@ -138,9 +138,13 @@ void vp9_save_coding_context(VP9_COMP *cpi) {
vp9_copy(cc->sb_ymode_prob, cm->fc.sb_ymode_prob);
vp9_copy(cc->bmode_prob, cm->fc.bmode_prob);
vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
#if !CONFIG_SB8X8
vp9_copy(cc->i8x8_mode_prob, cm->fc.i8x8_mode_prob);
#endif
vp9_copy(cc->sub_mv_ref_prob, cm->fc.sub_mv_ref_prob);
#if !CONFIG_SB8X8
vp9_copy(cc->mbsplit_prob, cm->fc.mbsplit_prob);
#endif
vp9_copy(cc->partition_prob, cm->fc.partition_prob);
// Stats
@ -198,10 +202,14 @@ void vp9_restore_coding_context(VP9_COMP *cpi) {
vp9_copy(cm->fc.ymode_prob, cc->ymode_prob);
vp9_copy(cm->fc.sb_ymode_prob, cc->sb_ymode_prob);
vp9_copy(cm->fc.bmode_prob, cc->bmode_prob);
#if !CONFIG_SB8X8
vp9_copy(cm->fc.i8x8_mode_prob, cc->i8x8_mode_prob);
#endif
vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
vp9_copy(cm->fc.sub_mv_ref_prob, cc->sub_mv_ref_prob);
#if !CONFIG_SB8X8
vp9_copy(cm->fc.mbsplit_prob, cc->mbsplit_prob);
#endif
vp9_copy(cm->fc.partition_prob, cc->partition_prob);
// Stats

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -19,16 +19,20 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex);
void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex);
#if !CONFIG_SB8X8
void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
int *r, int *d);
#endif
void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int *r, int *d, BLOCK_SIZE_TYPE bsize,
PICK_MODE_CONTEXT *ctx);
#if !CONFIG_SB8X8
void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
int mi_row, int mi_col,
int *r, int *d);
#endif
int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int mi_row, int mi_col,

Просмотреть файл

@ -199,9 +199,17 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi,
assert(bwl < bsl && bhl < bsl);
if (bsize == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
#if CONFIG_SB8X8
} else if (bsize == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(bsize == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
#else
} else {
assert(bsize == BLOCK_SIZE_SB32X32);
subsize = BLOCK_SIZE_MB16X16;
#endif
}
for (n = 0; n < 4; n++) {

Просмотреть файл

@ -376,7 +376,11 @@ int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
int vp9_sby_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
int result = 1;
struct is_skippable_args args = {xd, &result};
foreach_transformed_block_in_plane(xd, bsize, 0, 0, is_skippable, &args);
foreach_transformed_block_in_plane(xd, bsize, 0,
#if !CONFIG_SB8X8
0,
#endif
is_skippable, &args);
return result;
}