Removed unnecessary MB_MODE_INFO copies

These copies occurred for each macroblock in the encoder and decoder.
Thetemp MB_MODE_INFO mbmi was removed from MACROBLOCKD.  As a result,
a large number compile errors had to be fixed.

Change-Id: I4cf0ffae3ce244f6db04a4c217d52dd256382cf3
This commit is contained in:
Scott LaVarnway 2010-08-12 16:25:43 -04:00
Родитель f5615b6149
Коммит 9c7a0090e0
16 изменённых файлов: 216 добавлений и 246 удалений

Просмотреть файл

@ -215,7 +215,7 @@ typedef struct
{
DECLARE_ALIGNED(16, short, diff[400]); // from idct diff
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
DECLARE_ALIGNED(16, short, reference[384]);
//not used DECLARE_ALIGNED(16, short, reference[384]);
DECLARE_ALIGNED(16, short, qcoeff[400]);
DECLARE_ALIGNED(16, short, dqcoeff[400]);
@ -232,8 +232,6 @@ typedef struct
FRAME_TYPE frame_type;
MB_MODE_INFO mbmi;
int up_available;
int left_available;

Просмотреть файл

@ -65,7 +65,8 @@ void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x
{
int i;
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != B_PRED &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
// do 2nd order transform on the dc block

Просмотреть файл

@ -210,7 +210,8 @@ void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x)
{
int i;
if (x->mbmi.ref_frame != INTRA_FRAME && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
unsigned char *uptr, *vptr;
unsigned char *upred_ptr = &x->predictor[256];
@ -254,16 +255,18 @@ void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x)
}
}
//encoder only
void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
{
if (x->mbmi.ref_frame != INTRA_FRAME && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
unsigned char *ptr_base;
unsigned char *ptr;
unsigned char *pred_ptr = x->predictor;
int mv_row = x->mbmi.mv.as_mv.row;
int mv_col = x->mbmi.mv.as_mv.col;
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
int pre_stride = x->block[0].pre_stride;
ptr_base = x->pre.y_buffer;
@ -282,7 +285,7 @@ void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
{
int i;
if (x->mbmi.partitioning < 3)
if (x->mode_info_context->mbmi.partitioning < 3)
{
for (i = 0; i < 4; i++)
{
@ -313,7 +316,9 @@ void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
{
if (x->mbmi.ref_frame != INTRA_FRAME && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
int offset;
unsigned char *ptr_base;
@ -323,8 +328,8 @@ void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
unsigned char *upred_ptr = &x->predictor[256];
unsigned char *vpred_ptr = &x->predictor[320];
int mv_row = x->mbmi.mv.as_mv.row;
int mv_col = x->mbmi.mv.as_mv.col;
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
int pre_stride = x->block[0].pre_stride;
ptr_base = x->pre.y_buffer;
@ -361,7 +366,7 @@ void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
{
int i;
if (x->mbmi.partitioning < 3)
if (x->mode_info_context->mbmi.partitioning < 3)
{
for (i = 0; i < 4; i++)
{
@ -410,7 +415,7 @@ void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
{
int i, j;
if (x->mbmi.mode == SPLITMV)
if (x->mode_info_context->mbmi.mode == SPLITMV)
{
for (i = 0; i < 2; i++)
{
@ -455,8 +460,8 @@ void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
}
else
{
int mvrow = x->mbmi.mv.as_mv.row;
int mvcol = x->mbmi.mv.as_mv.col;
int mvrow = x->mode_info_context->mbmi.mv.as_mv.row;
int mvcol = x->mode_info_context->mbmi.mv.as_mv.col;
if (mvrow < 0)
mvrow -= 1;
@ -535,7 +540,7 @@ void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
unsigned char *pred_ptr = x->predictor;
unsigned char *dst_ptr = x->dst.y_buffer;
if (x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != SPLITMV)
{
int offset;
unsigned char *ptr_base;
@ -547,8 +552,8 @@ void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
unsigned char *udst_ptr = x->dst.u_buffer;
unsigned char *vdst_ptr = x->dst.v_buffer;
int mv_row = x->mbmi.mv.as_mv.row;
int mv_col = x->mbmi.mv.as_mv.col;
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
int pre_stride = x->dst.y_stride; //x->block[0].pre_stride;
ptr_base = x->pre.y_buffer;
@ -587,7 +592,7 @@ void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
//if sth is wrong, go back to what it is in build_inter_predictors_mb.
int i;
if (x->mbmi.partitioning < 3)
if (x->mode_info_context->mbmi.partitioning < 3)
{
for (i = 0; i < 4; i++)
{

Просмотреть файл

@ -43,7 +43,7 @@ void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
}
// for Y
switch (x->mbmi.mode)
switch (x->mode_info_context->mbmi.mode)
{
case DC_PRED:
{
@ -164,7 +164,7 @@ void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
}
// for Y
switch (x->mbmi.mode)
switch (x->mode_info_context->mbmi.mode)
{
case DC_PRED:
{
@ -290,7 +290,7 @@ void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
}
switch (x->mbmi.uv_mode)
switch (x->mode_info_context->mbmi.uv_mode)
{
case DC_PRED:
{
@ -430,7 +430,7 @@ void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x)
vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
}
switch (x->mbmi.uv_mode)
switch (x->mode_info_context->mbmi.uv_mode)
{
case DC_PRED:
{

Просмотреть файл

@ -113,7 +113,7 @@ static void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd)
// to dst buffer, we can write the result directly to dst buffer. This eliminates unnecessary copy.
static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
if (xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME)
if (xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv_s(xd);
@ -164,7 +164,7 @@ static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
static void clamp_mvs(MACROBLOCKD *xd)
{
if (xd->mbmi.mode == SPLITMV)
if (xd->mode_info_context->mbmi.mode == SPLITMV)
{
int i;
@ -175,7 +175,7 @@ static void clamp_mvs(MACROBLOCKD *xd)
}
else
{
clamp_mv_to_umv_border(&xd->mbmi.mv.as_mv, xd);
clamp_mv_to_umv_border(&xd->mode_info_context->mbmi.mv.as_mv, xd);
clamp_uvmv_to_umv_border(&xd->block[16].bmi.mv.as_mv, xd);
}
@ -184,10 +184,9 @@ static void clamp_mvs(MACROBLOCKD *xd)
void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
int eobtotal = 0;
MV orig_mvs[24];
int i, do_clamp = xd->mbmi.need_to_clamp_mvs;
int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs;
if (xd->mbmi.mb_skip_coeff)
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
vp8_reset_mb_tokens_context(xd);
}
@ -199,20 +198,12 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
/* Perform temporary clamping of the MV to be used for prediction */
if (do_clamp)
{
if (xd->mbmi.mode == SPLITMV)
for (i=0; i<24; i++)
orig_mvs[i] = xd->block[i].bmi.mv.as_mv;
else
{
orig_mvs[0] = xd->mbmi.mv.as_mv;
orig_mvs[1] = xd->block[16].bmi.mv.as_mv;
}
clamp_mvs(xd);
}
xd->mode_info_context->mbmi.dc_diff = 1;
if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV && eobtotal == 0)
if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV && eobtotal == 0)
{
xd->mode_info_context->mbmi.dc_diff = 0;
skip_recon_mb(pbi, xd);
@ -223,11 +214,11 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
mb_init_dequantizer(pbi, xd);
// do prediction
if (xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME)
if (xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv(xd);
if (xd->mbmi.mode != B_PRED)
if (xd->mode_info_context->mbmi.mode != B_PRED)
{
vp8_build_intra_predictors_mby_ptr(xd);
} else {
@ -240,7 +231,7 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
}
// dequantization and idct
if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV)
if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
{
BLOCKD *b = &xd->block[24];
DEQUANT_INVOKE(&pbi->dequant, block)(b);
@ -283,7 +274,7 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
}
}
}
else if ((xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME) && xd->mbmi.mode == B_PRED)
else if ((xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) && xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
@ -394,12 +385,8 @@ void vp8_decode_mb_row(VP8D_COMP *pbi,
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
@ -420,9 +407,9 @@ void vp8_decode_mb_row(VP8D_COMP *pbi,
xd->left_available = (mb_col != 0);
// Select the appropriate reference frame for this MB
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;
@ -608,7 +595,7 @@ static void init_frame(VP8D_COMP *pbi)
xd->left_context = pc->left_context;
xd->mode_info_context = pc->mi;
xd->frame_type = pc->frame_type;
xd->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_stride = pc->mode_info_stride;
}

Просмотреть файл

@ -95,7 +95,7 @@ void vp8_reset_mb_tokens_context(MACROBLOCKD *x)
*(l+1) = 0;
/* Clear entropy contexts for Y2 blocks */
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
{
a = A[Y2CONTEXT];
l = L[Y2CONTEXT];
@ -240,7 +240,7 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x)
i = 0;
stop = 16;
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
{
i = 24;
stop = 24;

Просмотреть файл

@ -69,9 +69,6 @@ void vp8_setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC
mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
vpx_memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data));
mbd->mbmi.mode = DC_PRED;
mbd->mbmi.uv_mode = DC_PRED;
mbd->current_bc = &pbi->bc2;
for (j = 0; j < 25; j++)
@ -222,12 +219,7 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data)
}
}
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
@ -248,9 +240,9 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data)
xd->left_available = (mb_col != 0);
// Select the appropriate reference frame for this MB
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;
@ -639,12 +631,7 @@ void vp8_mtdecode_mb_rows(VP8D_COMP *pbi,
}
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
@ -665,9 +652,9 @@ void vp8_mtdecode_mb_rows(VP8D_COMP *pbi,
xd->left_available = (mb_col != 0);
// Select the appropriate reference frame for this MB
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;

Просмотреть файл

@ -254,7 +254,6 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
int i;
int QIndex;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mbmi;
int zbin_extra;
// Select the baseline MB Q index.
@ -262,12 +261,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
// Abs Value
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
// Delta Value
else
{
QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
}
}
@ -388,14 +387,14 @@ void encode_mb_row(VP8_COMP *cpi,
{
// Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
xd->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
else
xd->mbmi.segment_id = 0;
xd->mode_info_context->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
}
else
xd->mbmi.segment_id = 0; // Set to Segment 0 by default
xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
x->active_ptr = cpi->active_map + seg_map_index + mb_col;
@ -426,7 +425,7 @@ void encode_mb_row(VP8_COMP *cpi,
#endif
// Count of last ref frame 0,0 useage
if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame == LAST_FRAME))
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
// Special case code for cyclic refresh
@ -434,14 +433,14 @@ void encode_mb_row(VP8_COMP *cpi,
// during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
{
cpi->segmentation_map[seg_map_index+mb_col] = xd->mbmi.segment_id;
cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
// If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
// Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
// else mark it as dirty (1).
if (xd->mbmi.segment_id)
if (xd->mode_info_context->mbmi.segment_id)
cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
else if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame == LAST_FRAME))
else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
{
if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
@ -456,9 +455,6 @@ void encode_mb_row(VP8_COMP *cpi,
x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
// store macroblock mode info into context array
vpx_memcpy(&xd->mode_info_context->mbmi, &xd->mbmi, sizeof(xd->mbmi));
for (i = 0; i < 16; i++)
vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
@ -471,7 +467,7 @@ void encode_mb_row(VP8_COMP *cpi,
recon_uvoffset += 8;
// Keep track of segment useage
segment_counts[xd->mbmi.segment_id] ++;
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
// skip to next mb
xd->mode_info_context++;
@ -627,8 +623,8 @@ void vp8_encode_frame(VP8_COMP *cpi)
//x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
#endif
xd->mbmi.mode = DC_PRED;
xd->mbmi.uv_mode = DC_PRED;
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
xd->left_context = cm->left_context;
@ -982,8 +978,8 @@ void vp8_build_block_offsets(MACROBLOCK *x)
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
{
const MACROBLOCKD *xd = & x->e_mbd;
const MB_PREDICTION_MODE m = xd->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mbmi.uv_mode;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
#ifdef MODE_STATS
const int is_key = cpi->common.frame_type == KEY_FRAME;
@ -1021,7 +1017,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
int rateuv_tokenonly = 0;
int i;
x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
#if !(CONFIG_REALTIME_ONLY)
@ -1037,7 +1033,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
rate += rateuv;
@ -1045,7 +1041,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
if (Error4x4 < Error16x16)
{
rate += rate4x4;
x->e_mbd.mbmi.mode = B_PRED;
x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
// get back the intra block modes
for (i = 0; i < 16; i++)
@ -1085,7 +1081,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
for (mode = DC_PRED; mode <= TM_PRED; mode ++)
{
x->e_mbd.mbmi.mode = mode;
x->e_mbd.mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
@ -1105,17 +1101,17 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
else
Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
if (Error4x4 < Error16x16)
{
x->e_mbd.mbmi.mode = B_PRED;
x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
cpi->prediction_error += Error4x4;
}
else
{
x->e_mbd.mbmi.mode = best_mode;
x->e_mbd.mode_info_context->mbmi.mode = best_mode;
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
cpi->prediction_error += Error16x16;
}
@ -1149,7 +1145,7 @@ int vp8cx_encode_inter_macroblock
x->skip = 0;
if (xd->segmentation_enabled)
x->encode_breakout = cpi->segment_encode_breakout[xd->mbmi.segment_id];
x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
else
x->encode_breakout = cpi->oxcf.encode_breakout;
@ -1180,17 +1176,17 @@ int vp8cx_encode_inter_macroblock
if (cpi->cyclic_refresh_mode_enabled)
{
// Clear segment_id back to 0 if not coded (last frame 0,0)
if ((xd->mbmi.segment_id == 1) &&
((xd->mbmi.ref_frame != LAST_FRAME) || (xd->mbmi.mode != ZEROMV)))
if ((xd->mode_info_context->mbmi.segment_id == 1) &&
((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
{
xd->mbmi.segment_id = 0;
xd->mode_info_context->mbmi.segment_id = 0;
}
}
// Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
if (cpi->zbin_mode_boost_enabled)
{
if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame != LAST_FRAME))
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME))
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = 0;
@ -1199,15 +1195,15 @@ int vp8cx_encode_inter_macroblock
vp8cx_mb_init_quantizer(cpi, x);
}
cpi->count_mb_ref_frame_usage[xd->mbmi.ref_frame] ++;
cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
if (xd->mbmi.ref_frame == INTRA_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
if (xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == B_PRED)
{
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
}
@ -1226,13 +1222,13 @@ int vp8cx_encode_inter_macroblock
int ref_fb_idx;
vp8_find_near_mvs(xd, xd->mode_info_context,
&nearest, &nearby, &best_ref_mv, mdcounts, xd->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
&nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
vp8_build_uvmvs(xd, cpi->common.full_pixel);
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
else
ref_fb_idx = cpi->common.alt_fb_idx;
@ -1241,7 +1237,7 @@ int vp8cx_encode_inter_macroblock
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mbmi.mode == SPLITMV)
if (xd->mode_info_context->mbmi.mode == SPLITMV)
{
int i;
@ -1254,19 +1250,19 @@ int vp8cx_encode_inter_macroblock
}
}
}
else if (xd->mbmi.mode == NEWMV)
else if (xd->mode_info_context->mbmi.mode == NEWMV)
{
cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
}
if (!x->skip && !x->e_mbd.mbmi.force_no_skip)
if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
{
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
xd->mbmi.mb_skip_coeff = 0;
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
}
else
@ -1279,19 +1275,19 @@ int vp8cx_encode_inter_macroblock
{
if (cpi->common.mb_no_coeff_skip)
{
if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV)
xd->mbmi.dc_diff = 0;
if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
xd->mode_info_context->mbmi.dc_diff = 0;
else
xd->mbmi.dc_diff = 1;
xd->mode_info_context->mbmi.dc_diff = 1;
xd->mbmi.mb_skip_coeff = 1;
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
cpi->skip_true_count ++;
vp8_fix_contexts(cpi, xd);
}
else
{
vp8_stuff_mb(cpi, xd, t);
xd->mbmi.mb_skip_coeff = 0;
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count ++;
}
}

Просмотреть файл

@ -53,7 +53,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK
x->quantize_b(be, b);
x->e_mbd.mbmi.mb_skip_coeff &= (!b->eob);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!b->eob);
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
@ -70,7 +70,7 @@ void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BL
x->quantize_b(be, b);
x->e_mbd.mbmi.mb_skip_coeff &= (!b->eob);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!b->eob);
IDCT_INVOKE(&rtcd->common->idct, idct16)(b->dqcoeff, b->diff, 32);
@ -124,7 +124,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
BLOCKD *d = &x->e_mbd.block[b];
switch (x->e_mbd.mbmi.mode)
switch (x->e_mbd.mode_info_context->mbmi.mode)
{
case DC_PRED:
@ -157,7 +157,7 @@ void vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
vp8_transform_intra_mby(x);
x->e_mbd.mbmi.mb_skip_coeff = 1;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
vp8_quantize_mby(x);
@ -170,7 +170,7 @@ void vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
BLOCKD *d = &x->e_mbd.block[b];
switch (x->e_mbd.mbmi.mode)
switch (x->e_mbd.mode_info_context->mbmi.mode)
{
case DC_PRED:

Просмотреть файл

@ -121,7 +121,7 @@ void vp8_transform_mbuv(MACROBLOCK *x)
for (i = 16; i < 24; i += 2)
{
x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 16);
}
}
@ -158,7 +158,7 @@ void vp8_transform_mb(MACROBLOCK *x)
}
// build dc block from 16 y dc values
if (x->e_mbd.mbmi.mode != SPLITMV)
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
vp8_build_dcblock(x);
for (i = 16; i < 24; i += 2)
@ -168,7 +168,7 @@ void vp8_transform_mb(MACROBLOCK *x)
}
// do 2nd order transform on the dc block
if (x->e_mbd.mbmi.mode != SPLITMV)
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
@ -185,7 +185,7 @@ void vp8_transform_mby(MACROBLOCK *x)
}
// build dc block from 16 y dc values
if (x->e_mbd.mbmi.mode != SPLITMV)
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
vp8_build_dcblock(x);
x->short_walsh4x4(&x->block[24].src_diff[0],
@ -494,8 +494,8 @@ void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
vp8_setup_temp_context(&t, x->e_mbd.above_context[Y1CONTEXT],
x->e_mbd.left_context[Y1CONTEXT], 4);
has_2nd_order = (x->e_mbd.mbmi.mode != B_PRED
&& x->e_mbd.mbmi.mode != SPLITMV);
has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
type = has_2nd_order ? 0 : 3;
for (b = 0; b < 16; b++)
@ -538,25 +538,25 @@ static void vp8_find_mb_skip_coef(MACROBLOCK *x)
{
int i;
x->e_mbd.mbmi.mb_skip_coeff = 1;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
if (x->e_mbd.mbmi.mode != B_PRED && x->e_mbd.mbmi.mode != SPLITMV)
if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
for (i = 0; i < 16; i++)
{
x->e_mbd.mbmi.mb_skip_coeff &= (x->e_mbd.block[i].eob < 2);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (x->e_mbd.block[i].eob < 2);
}
for (i = 16; i < 25; i++)
{
x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
}
}
else
{
for (i = 0; i < 24; i++)
{
x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
}
}
}
@ -576,8 +576,8 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
return;
vp8_setup_temp_context(&t, x->e_mbd.above_context[Y1CONTEXT],
x->e_mbd.left_context[Y1CONTEXT], 4);
has_2nd_order = (x->e_mbd.mbmi.mode != B_PRED
&& x->e_mbd.mbmi.mode != SPLITMV);
has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
type = has_2nd_order ? 0 : 3;
for (b = 0; b < 16; b++)

Просмотреть файл

@ -120,14 +120,14 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
{
// Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
xd->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
else
xd->mbmi.segment_id = 0;
xd->mode_info_context->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
}
else
xd->mbmi.segment_id = 0; // Set to Segment 0 by default
xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
if (cm->frame_type == KEY_FRAME)
@ -157,7 +157,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
#endif
// Count of last ref frame 0,0 useage
if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame == LAST_FRAME))
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
}
@ -166,9 +166,6 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
// store macroblock mode info into context array
vpx_memcpy(&xd->mode_info_context->mbmi, &xd->mbmi, sizeof(xd->mbmi));
for (i = 0; i < 16; i++)
vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
@ -181,7 +178,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
recon_uvoffset += 8;
// Keep track of segment useage
segment_counts[xd->mbmi.segment_id] ++;
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
// skip to next mb
xd->mode_info_context++;
@ -405,9 +402,6 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
mb->rddiv = cpi->RDDIV;
mb->rdmult = cpi->RDMULT;
mbd->mbmi.mode = DC_PRED;
mbd->mbmi.uv_mode = DC_PRED;
mbd->left_context = cm->left_context;
mb->mvc = cm->fc.mvc;

Просмотреть файл

@ -78,9 +78,9 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
if (use_dc_pred)
{
x->e_mbd.mbmi.mode = DC_PRED;
x->e_mbd.mbmi.uv_mode = DC_PRED;
x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
x->e_mbd.mode_info_context->mbmi.mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
@ -565,6 +565,8 @@ void vp8_first_pass(VP8_COMP *cpi)
xd->pre = *lst_yv12;
xd->dst = *new_yv12;
xd->mode_info_context = cm->mi;
vp8_build_block_offsets(x);
vp8_setup_block_dptrs(&x->e_mbd);

Просмотреть файл

@ -410,7 +410,7 @@ int vp8_pick_intra_mbuv_mode(MACROBLOCK *mb)
}
mb->e_mbd.mbmi.uv_mode = best_mode;
mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
return best_error;
}
@ -535,7 +535,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
best_rd = INT_MAX;
x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
// if we encode a new mv this is important
// find the best new motion vector
@ -547,9 +547,9 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
if (best_rd <= cpi->rd_threshes[mode_index])
continue;
x->e_mbd.mbmi.ref_frame = vp8_ref_frame_order[mode_index];
x->e_mbd.mode_info_context->mbmi.ref_frame = vp8_ref_frame_order[mode_index];
if (skip_mode[x->e_mbd.mbmi.ref_frame])
if (skip_mode[x->e_mbd.mode_info_context->mbmi.ref_frame])
continue;
// Check to see if the testing frequency for this mode is at its max
@ -582,29 +582,29 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
// Experimental debug code.
//all_rds[mode_index] = -1;
x->e_mbd.mbmi.mode = this_mode;
x->e_mbd.mbmi.uv_mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
// Work out the cost assosciated with selecting the reference frame
frame_cost = ref_frame_cost[x->e_mbd.mbmi.ref_frame];
frame_cost = ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
rate2 += frame_cost;
// everything but intra
if (x->e_mbd.mbmi.ref_frame)
if (x->e_mbd.mode_info_context->mbmi.ref_frame)
{
x->e_mbd.pre.y_buffer = y_buffer[x->e_mbd.mbmi.ref_frame];
x->e_mbd.pre.u_buffer = u_buffer[x->e_mbd.mbmi.ref_frame];
x->e_mbd.pre.v_buffer = v_buffer[x->e_mbd.mbmi.ref_frame];
mode_mv[NEARESTMV] = nearest_mv[x->e_mbd.mbmi.ref_frame];
mode_mv[NEARMV] = near_mv[x->e_mbd.mbmi.ref_frame];
best_ref_mv1 = best_ref_mv[x->e_mbd.mbmi.ref_frame];
memcpy(mdcounts, MDCounts[x->e_mbd.mbmi.ref_frame], sizeof(mdcounts));
x->e_mbd.pre.y_buffer = y_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
x->e_mbd.pre.u_buffer = u_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
x->e_mbd.pre.v_buffer = v_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
mode_mv[NEARESTMV] = nearest_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
mode_mv[NEARMV] = near_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
best_ref_mv1 = best_ref_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
memcpy(mdcounts, MDCounts[x->e_mbd.mode_info_context->mbmi.ref_frame], sizeof(mdcounts));
}
//Only consider ZEROMV/ALTREF_FRAME for alt ref frame.
if (cpi->is_src_frame_alt_ref)
{
if (this_mode != ZEROMV || x->e_mbd.mbmi.ref_frame != ALTREF_FRAME)
if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
continue;
}
@ -644,7 +644,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
case TM_PRED:
vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mbmi.mode];
rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
if (this_rd < best_intra_rd)
@ -782,10 +782,10 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
continue;
rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
x->e_mbd.mbmi.mode = this_mode;
x->e_mbd.mbmi.mv.as_mv = mode_mv[this_mode];
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
x->e_mbd.mode_info_context->mbmi.mv.as_mv = mode_mv[this_mode];
x->e_mbd.block[0].bmi.mode = this_mode;
x->e_mbd.block[0].bmi.mv.as_int = x->e_mbd.mbmi.mv.as_int;
x->e_mbd.block[0].bmi.mv.as_int = x->e_mbd.mode_info_context->mbmi.mv.as_int;
distortion2 = get_inter_mbpred_error(x, cpi->fn_ptr.svf, cpi->fn_ptr.vf, (unsigned int *)(&sse));
@ -824,7 +824,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
*returnrate = rate2;
*returndistortion = distortion2;
best_rd = this_rd;
vpx_memcpy(&best_mbmode, &x->e_mbd.mbmi, sizeof(MB_MODE_INFO));
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
if (this_mode == B_PRED || this_mode == SPLITMV)
for (i = 0; i < 16; i++)
@ -870,9 +870,9 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
if (best_mbmode.mode <= B_PRED)
{
x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
vp8_pick_intra_mbuv_mode(x);
best_mbmode.uv_mode = x->e_mbd.mbmi.uv_mode;
best_mbmode.uv_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
}
@ -898,23 +898,23 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
best_mbmode.partitioning = 0;
best_mbmode.dc_diff = 0;
vpx_memcpy(&x->e_mbd.mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
for (i = 0; i < 16; i++)
{
vpx_memset(&x->e_mbd.block[i].bmi, 0, sizeof(B_MODE_INFO));
}
x->e_mbd.mbmi.mv.as_int = 0;
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
return best_rd;
}
// macroblock modes
vpx_memcpy(&x->e_mbd.mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
if (x->e_mbd.mbmi.mode == B_PRED || x->e_mbd.mbmi.mode == SPLITMV)
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED || x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
for (i = 0; i < 16; i++)
{
vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
@ -922,10 +922,10 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
}
else
{
vp8_set_mbmode_and_mvs(x, x->e_mbd.mbmi.mode, &best_bmodes[0].mv.as_mv);
vp8_set_mbmode_and_mvs(x, x->e_mbd.mode_info_context->mbmi.mode, &best_bmodes[0].mv.as_mv);
}
x->e_mbd.mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
return best_rd;
}

Просмотреть файл

@ -277,34 +277,34 @@ void vp8_strict_quantize_b(BLOCK *b, BLOCKD *d)
void vp8_quantize_mby(MACROBLOCK *x)
{
int i;
int has_2nd_order = (x->e_mbd.mbmi.mode != B_PRED
&& x->e_mbd.mbmi.mode != SPLITMV);
int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
for (i = 0; i < 16; i++)
{
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
x->e_mbd.mbmi.mb_skip_coeff &=
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &=
(x->e_mbd.block[i].eob <= has_2nd_order);
}
if(has_2nd_order)
{
x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[24].eob);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[24].eob);
}
}
void vp8_quantize_mb(MACROBLOCK *x)
{
int i;
int has_2nd_order=(x->e_mbd.mbmi.mode != B_PRED
&& x->e_mbd.mbmi.mode != SPLITMV);
int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
x->e_mbd.mbmi.mb_skip_coeff = 1;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
for (i = 0; i < 24+has_2nd_order; i++)
{
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
x->e_mbd.mbmi.mb_skip_coeff &=
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &=
(x->e_mbd.block[i].eob <= (has_2nd_order && i<16));
}
}
@ -317,6 +317,6 @@ void vp8_quantize_mbuv(MACROBLOCK *x)
for (i = 16; i < 24; i++)
{
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
}
}

Просмотреть файл

@ -486,7 +486,7 @@ static int macro_block_max_error(MACROBLOCK *mb)
int i, j;
int berror;
dc = !(mb->e_mbd.mbmi.mode == B_PRED || mb->e_mbd.mbmi.mode == SPLITMV);
dc = !(mb->e_mbd.mode_info_context->mbmi.mode == B_PRED || mb->e_mbd.mode_info_context->mbmi.mode == SPLITMV);
for (i = 0; i < 16; i++)
{
@ -622,14 +622,14 @@ int vp8_rdcost_mby(MACROBLOCK *mb)
vp8_setup_temp_context(&t, x->above_context[Y1CONTEXT], x->left_context[Y1CONTEXT], 4);
vp8_setup_temp_context(&t2, x->above_context[Y2CONTEXT], x->left_context[Y2CONTEXT], 1);
if (x->mbmi.mode == SPLITMV)
if (x->mode_info_context->mbmi.mode == SPLITMV)
type = 3;
for (b = 0; b < 16; b++)
cost += cost_coeffs(mb, x->block + b, type,
t.a + vp8_block2above[b], t.l + vp8_block2left[b]);
if (x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != SPLITMV)
cost += cost_coeffs(mb, x->block + 24, 1,
t2.a + vp8_block2above[24], t2.l + vp8_block2left[24]);
@ -761,9 +761,9 @@ int vp8_rd_pick_intra16x16mby_mode(VP8_COMP *cpi, MACROBLOCK *x, int *Rate, int
int dummy;
rate = 0;
x->e_mbd.mbmi.mode = mode;
x->e_mbd.mode_info_context->mbmi.mode = mode;
rate += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mbmi.mode];
rate += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
vp8_encode_intra16x16mbyrd(IF_RTCD(&cpi->rtcd), x);
@ -785,7 +785,7 @@ int vp8_rd_pick_intra16x16mby_mode(VP8_COMP *cpi, MACROBLOCK *x, int *Rate, int
}
}
x->e_mbd.mbmi.mode = mode_selected;
x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
return best_rd;
}
@ -847,11 +847,11 @@ int vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *ra
int distortion;
int this_rd;
x->e_mbd.mbmi.uv_mode = mode;
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
vp8_encode_intra16x16mbuvrd(IF_RTCD(&cpi->rtcd), x);
rate_to = rd_cost_mbuv(x);
rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][x->e_mbd.mbmi.uv_mode];
rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.uv_mode];
distortion = vp8_get_mbuvrecon_error(IF_RTCD(&cpi->rtcd.variance), x);
@ -870,7 +870,7 @@ int vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *ra
*rate = r;
*distortion = d;
x->e_mbd.mbmi.uv_mode = mode_selected;
x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
return best_rd;
}
#endif
@ -888,9 +888,9 @@ void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, MV *mv)
{
int i;
x->e_mbd.mbmi.mode = mb;
x->e_mbd.mbmi.mv.as_mv.row = mv->row;
x->e_mbd.mbmi.mv.as_mv.col = mv->col;
x->e_mbd.mode_info_context->mbmi.mode = mb;
x->e_mbd.mode_info_context->mbmi.mv.as_mv.row = mv->row;
x->e_mbd.mode_info_context->mbmi.mv.as_mv.col = mv->col;
for (i = 0; i < 16; i++)
{
@ -1060,7 +1060,7 @@ static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion, const vp
}
// 2nd order fdct
if (x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != SPLITMV)
{
mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
}
@ -1072,7 +1072,7 @@ static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion, const vp
}
// DC predication and Quantization of 2nd Order block
if (x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != SPLITMV)
{
{
@ -1081,7 +1081,7 @@ static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion, const vp
}
// Distortion
if (x->mbmi.mode == SPLITMV)
if (x->mode_info_context->mbmi.mode == SPLITMV)
d = ENCODEMB_INVOKE(rtcd, mberr)(mb, 0) << 2;
else
{
@ -1401,10 +1401,10 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
// save partitions
labels = vp8_mbsplits[best_seg];
x->e_mbd.mbmi.partitioning = best_seg;
x->e_mbd.mbmi.partition_count = vp8_count_labels(labels);
x->e_mbd.mode_info_context->mbmi.partitioning = best_seg;
x->e_mbd.mode_info_context->mbmi.partition_count = vp8_count_labels(labels);
for (i = 0; i < x->e_mbd.mbmi.partition_count; i++)
for (i = 0; i < x->e_mbd.mode_info_context->mbmi.partition_count; i++)
{
int j;
@ -1414,8 +1414,8 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
break;
}
x->e_mbd.mbmi.partition_bmi[i].mode = x->e_mbd.block[j].bmi.mode;
x->e_mbd.mbmi.partition_bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
x->e_mbd.mode_info_context->mbmi.partition_bmi[i].mode = x->e_mbd.block[j].bmi.mode;
x->e_mbd.mode_info_context->mbmi.partition_bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
}
return best_segment_rd;
@ -1515,9 +1515,9 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
vpx_memset(mode_mv, 0, sizeof(mode_mv));
x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
vp8_rd_pick_intra_mbuv_mode(cpi, x, &uv_intra_rate, &uv_intra_rate_tokenonly, &uv_intra_distortion);
uv_intra_mode = x->e_mbd.mbmi.uv_mode;
uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
{
uvintra_eob = 0;
@ -1561,18 +1561,18 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
this_mode = vp8_mode_order[mode_index];
x->e_mbd.mbmi.mode = this_mode;
x->e_mbd.mbmi.uv_mode = DC_PRED;
x->e_mbd.mbmi.ref_frame = vp8_ref_frame_order[mode_index];
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.ref_frame = vp8_ref_frame_order[mode_index];
//Only consider ZEROMV/ALTREF_FRAME for alt ref frame.
if (cpi->is_src_frame_alt_ref)
{
if (this_mode != ZEROMV || x->e_mbd.mbmi.ref_frame != ALTREF_FRAME)
if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
continue;
}
if (x->e_mbd.mbmi.ref_frame == LAST_FRAME)
if (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME)
{
YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
@ -1586,7 +1586,7 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
x->e_mbd.pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
x->e_mbd.pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset;
}
else if (x->e_mbd.mbmi.ref_frame == GOLDEN_FRAME)
else if (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
{
YV12_BUFFER_CONFIG *gld_yv12 = &cpi->common.yv12_fb[cpi->common.gld_fb_idx];
@ -1601,7 +1601,7 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
x->e_mbd.pre.u_buffer = gld_yv12->u_buffer + recon_uvoffset;
x->e_mbd.pre.v_buffer = gld_yv12->v_buffer + recon_uvoffset;
}
else if (x->e_mbd.mbmi.ref_frame == ALTREF_FRAME)
else if (x->e_mbd.mode_info_context->mbmi.ref_frame == ALTREF_FRAME)
{
YV12_BUFFER_CONFIG *alt_yv12 = &cpi->common.yv12_fb[cpi->common.alt_fb_idx];
@ -1623,11 +1623,11 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
vp8_find_near_mvs(&x->e_mbd,
x->e_mbd.mode_info_context,
&mode_mv[NEARESTMV], &mode_mv[NEARMV], &best_ref_mv,
mdcounts, x->e_mbd.mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
mdcounts, x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
// Estimate the reference frame signaling cost and add it to the rolling cost variable.
frame_cost = ref_frame_cost[x->e_mbd.mbmi.ref_frame];
frame_cost = ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
rate2 += frame_cost;
if (this_mode <= B_PRED)
@ -1694,9 +1694,9 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
int breakout_rd = best_rd - frame_cost_rd;
int tmp_rd;
if (x->e_mbd.mbmi.ref_frame == LAST_FRAME)
if (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME)
tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, breakout_rd, mdcounts, &rate, &rate_y, &distortion, cpi->compressor_speed, x->mvcost, cpi->rd_threshes[THR_NEWMV], cpi->common.full_pixel) ;
else if (x->e_mbd.mbmi.ref_frame == GOLDEN_FRAME)
else if (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, breakout_rd, mdcounts, &rate, &rate_y, &distortion, cpi->compressor_speed, x->mvcost, cpi->rd_threshes[THR_NEWG], cpi->common.full_pixel) ;
else
tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, breakout_rd, mdcounts, &rate, &rate_y, &distortion, cpi->compressor_speed, x->mvcost, cpi->rd_threshes[THR_NEWA], cpi->common.full_pixel) ;
@ -1753,16 +1753,16 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (0)//x->e_mbd.mbmi.partition_count == 4)
{
if (x->e_mbd.mbmi.partition_bmi[0].mv.as_int == x->e_mbd.mbmi.partition_bmi[1].mv.as_int
&& x->e_mbd.mbmi.partition_bmi[2].mv.as_int == x->e_mbd.mbmi.partition_bmi[3].mv.as_int)
if (x->e_mbd.mode_info_context->mbmi.partition_bmi[0].mv.as_int == x->e_mbd.mode_info_context->mbmi.partition_bmi[1].mv.as_int
&& x->e_mbd.mode_info_context->mbmi.partition_bmi[2].mv.as_int == x->e_mbd.mode_info_context->mbmi.partition_bmi[3].mv.as_int)
{
const int *labels = vp8_mbsplits[2];
x->e_mbd.mbmi.partitioning = 0;
x->e_mbd.mode_info_context->mbmi.partitioning = 0;
rate -= vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + 2);
rate += vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings);
//rate -= x->inter_bmode_costs[ x->e_mbd.mbmi.partition_bmi[1]];
//rate -= x->inter_bmode_costs[ x->e_mbd.mbmi.partition_bmi[3]];
x->e_mbd.mbmi.partition_bmi[1] = x->e_mbd.mbmi.partition_bmi[2];
x->e_mbd.mode_info_context->mbmi.partition_bmi[1] = x->e_mbd.mode_info_context->mbmi.partition_bmi[2];
}
}
@ -1772,14 +1772,14 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
case V_PRED:
case H_PRED:
case TM_PRED:
x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
{
macro_block_yrd(x, &rate, &distortion, IF_RTCD(&cpi->rtcd.encodemb)) ;
rate2 += rate;
rate_y = rate;
distortion2 += distortion;
rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mbmi.mode];
rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
rate2 += uv_intra_rate;
rate_uv = uv_intra_rate_tokenonly;
distortion2 += uv_intra_distortion;
@ -2085,7 +2085,7 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
//all_rates[mode_index] = rate2;
//all_dist[mode_index] = distortion2;
if ((x->e_mbd.mbmi.ref_frame == INTRA_FRAME) && (this_rd < *returnintra))
if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) && (this_rd < *returnintra))
{
*returnintra = this_rd ;
}
@ -2095,17 +2095,17 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
{
// Note index of best mode so far
best_mode_index = mode_index;
x->e_mbd.mbmi.force_no_skip = force_no_skip;
x->e_mbd.mode_info_context->mbmi.force_no_skip = force_no_skip;
if (this_mode <= B_PRED)
{
x->e_mbd.mbmi.uv_mode = uv_intra_mode;
x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode;
}
*returnrate = rate2;
*returndistortion = distortion2;
best_rd = this_rd;
vpx_memcpy(&best_mbmode, &x->e_mbd.mbmi, sizeof(MB_MODE_INFO));
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
for (i = 0; i < 16; i++)
{
@ -2186,28 +2186,28 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
best_mbmode.partitioning = 0;
best_mbmode.dc_diff = 0;
vpx_memcpy(&x->e_mbd.mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
for (i = 0; i < 16; i++)
{
vpx_memset(&x->e_mbd.block[i].bmi, 0, sizeof(B_MODE_INFO));
}
x->e_mbd.mbmi.mv.as_int = 0;
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
return best_rd;
}
// macroblock modes
vpx_memcpy(&x->e_mbd.mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
for (i = 0; i < 16; i++)
{
vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
}
x->e_mbd.mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
return best_rd;
}

Просмотреть файл

@ -276,7 +276,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
TOKENEXTRA *start = *t;
TOKENEXTRA *tp = *t;
x->mbmi.dc_diff = 1;
x->mode_info_context->mbmi.dc_diff = 1;
#if 0
@ -291,7 +291,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
#if 1
if (x->mbmi.mb_skip_coeff)
if (x->mode_info_context->mbmi.mb_skip_coeff)
{
cpi->skip_true_count++;
@ -303,10 +303,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
vp8_fix_contexts(cpi, x);
}
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
x->mbmi.dc_diff = 0;
if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
x->mode_info_context->mbmi.dc_diff = 0;
else
x->mbmi.dc_diff = 1;
x->mode_info_context->mbmi.dc_diff = 1;
return;
@ -347,7 +347,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
vpx_memcpy(cpi->coef_counts_backup, cpi->coef_counts, sizeof(cpi->coef_counts));
#endif
if (x->mbmi.mode == B_PRED || x->mbmi.mode == SPLITMV)
if (x->mode_info_context->mbmi.mode == B_PRED || x->mode_info_context->mbmi.mode == SPLITMV)
{
plane_type = 3;
}
@ -592,10 +592,10 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
plane_type = 0;
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
x->mbmi.dc_diff = 0;
if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
x->mode_info_context->mbmi.dc_diff = 0;
else
x->mbmi.dc_diff = 1;
x->mode_info_context->mbmi.dc_diff = 1;
for (b = 0; b < 16; b++)
@ -629,7 +629,7 @@ void vp8_fix_contexts(VP8_COMP *cpi, MACROBLOCKD *x)
x->above_context[UCONTEXT][1] = 0;
x->above_context[VCONTEXT][1] = 0;
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
{
x->left_context[Y2CONTEXT][0] = 0;
x->above_context[Y2CONTEXT][0] = 0;