Merge remote branch 'origin/master' into experimental

Change-Id: I81ac427cbaf3d0865df4acef3e0bfc2e95556c4b
This commit is contained in:
John Koleszar 2011-06-04 00:05:13 -04:00
Родитель 480f025754 8c5b73de2a
Коммит 2c308f36fc
12 изменённых файлов: 87 добавлений и 119 удалений

Просмотреть файл

@ -137,12 +137,6 @@ typedef enum
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
typedef struct
{
B_PREDICTION_MODE mode;
int_mv mv;
} B_MODE_INFO;
union b_mode_info
{
B_PREDICTION_MODE as_mode;
@ -182,8 +176,6 @@ typedef struct
short *dqcoeff;
unsigned char *predictor;
short *diff;
short *reference;
short *dequant;
/* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
@ -197,14 +189,13 @@ typedef struct
int eob;
B_MODE_INFO bmi;
union b_mode_info bmi;
} BLOCKD;
typedef struct
{
DECLARE_ALIGNED(16, short, diff[400]); /* from idct diff */
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
/* not used DECLARE_ALIGNED(16, short, reference[384]); */
DECLARE_ALIGNED(16, short, qcoeff[400]);
DECLARE_ALIGNED(16, short, dqcoeff[400]);
DECLARE_ALIGNED(16, char, eobs[25]);
@ -284,19 +275,15 @@ extern void vp8_setup_block_dptrs(MACROBLOCKD *x);
static void update_blockd_bmi(MACROBLOCKD *xd)
{
int i;
if (xd->mode_info_context->mbmi.mode == SPLITMV)
int is_4x4;
is_4x4 = (xd->mode_info_context->mbmi.mode == SPLITMV) ||
(xd->mode_info_context->mbmi.mode == B_PRED);
if (is_4x4)
{
for (i = 0; i < 16; i++)
{
BLOCKD *d = &xd->block[i];
d->bmi.mv.as_int = xd->mode_info_context->bmi[i].mv.as_int;
}
}else if (xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
BLOCKD *d = &xd->block[i];
d->bmi.mode = xd->mode_info_context->bmi[i].as_mode;
xd->block[i].bmi = xd->mode_info_context->bmi[i];
}
}
}

Просмотреть файл

@ -355,7 +355,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
do /* for each subset j */
{
int_mv leftmv, abovemv;
B_MODE_INFO bmi;
int_mv blockmv;
int k; /* first block in subset j */
int mv_contz;
k = vp8_mbsplit_offset[s][j];
@ -364,30 +364,30 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
abovemv.as_int = above_block_mv(mi, k, mis);
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
switch (bmi.mode = (B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/
switch ((B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/
{
case NEW4X4:
read_mv(bc, &bmi.mv.as_mv, (const MV_CONTEXT *) mvc);
bmi.mv.as_mv.row += best_mv.as_mv.row;
bmi.mv.as_mv.col += best_mv.as_mv.col;
read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
blockmv.as_mv.row += best_mv.as_mv.row;
blockmv.as_mv.col += best_mv.as_mv.col;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][3]++;
#endif
break;
case LEFT4X4:
bmi.mv.as_int = leftmv.as_int;
blockmv.as_int = leftmv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][0]++;
#endif
break;
case ABOVE4X4:
bmi.mv.as_int = abovemv.as_int;
blockmv.as_int = abovemv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][1]++;
#endif
break;
case ZERO4X4:
bmi.mv.as_int = 0;
blockmv.as_int = 0;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][2]++;
#endif
@ -396,7 +396,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
break;
}
mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&bmi.mv,
mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&blockmv,
mb_to_left_edge,
mb_to_right_edge,
mb_to_top_edge,
@ -412,7 +412,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
fill_offset = &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
do {
mi->bmi[ *fill_offset].mv.as_int = bmi.mv.as_int;
mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int;
fill_offset++;
}while (--fill_count);
}

Просмотреть файл

@ -288,7 +288,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
BLOCKD *b = &xd->block[i];
RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
(b, b->bmi.mode, b->predictor);
(b, b->bmi.as_mode, b->predictor);
if (xd->eobs[i] > 1)
{
@ -974,8 +974,6 @@ int vp8_decode_frame(VP8D_COMP *pbi)
vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
vpx_memcpy(&xd->block[0].bmi, &xd->mode_info_context->bmi[0], sizeof(B_MODE_INFO));
#if CONFIG_MULTITHREAD
if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION)
{

Просмотреть файл

@ -186,7 +186,9 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
for (i = 0; i < 16; i++)
{
BLOCKD *b = &xd->block[i];
vp8mt_predict_intra4x4(pbi, xd, b->bmi.mode, b->predictor, mb_row, mb_col, i);
vp8mt_predict_intra4x4(pbi, xd, b->bmi.as_mode, b->predictor, mb_row, mb_col, i);
if (xd->eobs[i] > 1)
{
DEQUANT_INVOKE(&pbi->dequant, idct_add)

Просмотреть файл

@ -1008,28 +1008,32 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
do
{
const B_MODE_INFO *const b = cpi->mb.partition_info->bmi + j;
B_PREDICTION_MODE blockmode;
int_mv blockmv;
const int *const L = vp8_mbsplits [mi->partitioning];
int k = -1; /* first block in subset j */
int mv_contz;
int_mv leftmv, abovemv;
blockmode = cpi->mb.partition_info->bmi[j].mode;
blockmv = cpi->mb.partition_info->bmi[j].mv;
while (j != L[++k])
if (k >= 16)
assert(0);
leftmv.as_int = left_block_mv(m, k);
abovemv.as_int = above_block_mv(m, k, mis);
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
write_sub_mv_ref(w, b->mode, vp8_sub_mv_ref_prob2 [mv_contz]); //pc->fc.sub_mv_ref_prob);
write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2 [mv_contz]);
if (b->mode == NEW4X4)
if (blockmode == NEW4X4)
{
#ifdef ENTROPY_STATS
active_section = 11;
#endif
write_mv(w, &b->mv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
}
}
while (++j < cpi->mb.partition_info->count);

Просмотреть файл

@ -54,7 +54,11 @@ typedef struct
typedef struct
{
int count;
B_MODE_INFO bmi[16];
struct
{
B_PREDICTION_MODE mode;
int_mv mv;
} bmi[16];
} PARTITION_INFO;
typedef struct

Просмотреть файл

@ -272,6 +272,7 @@ static void build_activity_map( VP8_COMP *cpi )
// Activity masking based on Tim T's original code
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
{
unsigned int a;
unsigned int b;
unsigned int act = *(x->mb_activity_ptr);
@ -477,24 +478,9 @@ void encode_mb_row(VP8_COMP *cpi,
x->mb_activity_ptr++;
x->mb_norm_activity_ptr++;
if(cm->frame_type != INTRA_FRAME)
{
if (xd->mode_info_context->mbmi.mode != B_PRED)
{
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].mv.as_int = xd->block[i].bmi.mv.as_int;
}else
{
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
}
}
else
{
if(xd->mode_info_context->mbmi.mode != B_PRED)
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
}
/* save the block info */
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i] = xd->block[i].bmi;
// adjust to the next column of macroblocks
x->src.y_buffer += 16;

Просмотреть файл

@ -36,7 +36,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
BLOCK *be = &x->block[ib];
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
(b, b->bmi.mode, b->predictor);
(b, b->bmi.as_mode, b->predictor);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
@ -89,19 +89,19 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
switch (x->e_mbd.mode_info_context->mbmi.mode)
{
case DC_PRED:
d->bmi.mode = B_DC_PRED;
d->bmi.as_mode = B_DC_PRED;
break;
case V_PRED:
d->bmi.mode = B_VE_PRED;
d->bmi.as_mode = B_VE_PRED;
break;
case H_PRED:
d->bmi.mode = B_HE_PRED;
d->bmi.as_mode = B_HE_PRED;
break;
case TM_PRED:
d->bmi.mode = B_TM_PRED;
d->bmi.as_mode = B_TM_PRED;
break;
default:
d->bmi.mode = B_DC_PRED;
d->bmi.as_mode = B_DC_PRED;
break;
}
}

Просмотреть файл

@ -232,23 +232,9 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
x->mb_activity_ptr++;
x->mb_norm_activity_ptr++;
if(cm->frame_type != INTRA_FRAME)
{
if (xd->mode_info_context->mbmi.mode != B_PRED)
{
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].mv.as_int = xd->block[i].bmi.mv.as_int;
}else
{
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
}
}
else {
if(xd->mode_info_context->mbmi.mode != B_PRED)
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
}
/* save the block info */
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i] = xd->block[i].bmi;
// adjust to the next column of macroblocks
x->src.y_buffer += 16;

Просмотреть файл

@ -100,7 +100,7 @@ static int encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
{
for (i = 0; i < 16; i++)
{
x->e_mbd.block[i].bmi.mode = B_DC_PRED;
x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
}
}

Просмотреть файл

@ -47,7 +47,6 @@ extern unsigned int (*vp8_get16x16pred_error)(unsigned char *src_ptr, int src_st
extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride);
extern int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *best_ref_mv, int best_rd, int *, int *, int *, int, int *mvcost[2], int, int fullpixel);
extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
@ -215,7 +214,8 @@ static int pick_intra4x4block(
*best_mode = mode;
}
}
b->bmi.mode = (B_PREDICTION_MODE)(*best_mode);
b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode);
vp8_encode_intra4x4block(rtcd, x, ib);
return best_rd;
}
@ -251,7 +251,7 @@ int vp8_pick_intra4x4mby_modes
cost += r;
distortion += d;
mic->bmi[i].as_mode = xd->block[i].bmi.mode = best_mode;
mic->bmi[i].as_mode = best_mode;
// Break out case where we have already exceeded best so far value
// that was passed in
@ -443,7 +443,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
MACROBLOCKD *xd = &x->e_mbd;
B_MODE_INFO best_bmodes[16];
union b_mode_info best_bmodes[16];
MB_MODE_INFO best_mbmode;
int_mv best_ref_mv;
@ -485,6 +485,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
vpx_memset(nearest_mv, 0, sizeof(nearest_mv));
vpx_memset(near_mv, 0, sizeof(near_mv));
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
// set up all the refframe dependent pointers.
@ -736,26 +737,26 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
//adjust search range according to sr from mv prediction
if(sr > step_param)
step_param = sr;
col_min = (best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL) >>3;
col_max = (best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL) >>3;
row_min = (best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL) >>3;
row_max = (best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL) >>3;
// Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
if (x->mv_col_min < col_min )
x->mv_col_min = col_min;
if (x->mv_col_max > col_max )
x->mv_col_max = col_max;
if (x->mv_row_min < row_min )
x->mv_row_min = row_min;
if (x->mv_row_max > row_max )
x->mv_row_max = row_max;
}else
{
mvp.as_int = best_ref_mv.as_int;
}
col_min = (best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL) >>3;
col_max = (best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL) >>3;
row_min = (best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL) >>3;
row_max = (best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL) >>3;
// Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
if (x->mv_col_min < col_min )
x->mv_col_min = col_min;
if (x->mv_col_max > col_max )
x->mv_col_max = col_max;
if (x->mv_row_min < row_min )
x->mv_row_min = row_min;
if (x->mv_row_max > row_max )
x->mv_row_max = row_max;
further_steps = (cpi->Speed >= 8)? 0: (cpi->sf.max_step_search_steps - 1 - step_param);
if (cpi->sf.search_method == HEX)
@ -808,13 +809,10 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
}
if(cpi->sf.improved_mv_pred)
{
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
x->mv_row_min = tmp_row_min;
x->mv_row_max = tmp_row_max;
}
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
x->mv_row_min = tmp_row_min;
x->mv_row_max = tmp_row_max;
if (bestsme < INT_MAX)
cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv,
@ -894,7 +892,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (this_mode == B_PRED)
for (i = 0; i < 16; i++)
{
vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
best_bmodes[i].as_mode = x->e_mbd.block[i].bmi.as_mode;
}
// Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
@ -962,10 +960,11 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
x->e_mbd.block[i].bmi.mode = best_bmodes[i].mode;
x->e_mbd.block[i].bmi.as_mode = best_bmodes[i].as_mode;
}
}
update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
}

Просмотреть файл

@ -711,7 +711,7 @@ static int rd_pick_intra4x4block(
vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
}
}
b->bmi.mode = (B_PREDICTION_MODE)(*best_mode);
b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode);
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@ -1464,8 +1464,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
{
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.mv.as_mv = bsi.mvs[i].as_mv;
bd->bmi.mode = bsi.modes[i];
bd->bmi.mv.as_int = bsi.mvs[i].as_int;
bd->eob = bsi.eobs[i];
}
@ -1780,7 +1779,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
MACROBLOCKD *xd = &x->e_mbd;
B_MODE_INFO best_bmodes[16];
union b_mode_info best_bmodes[16];
MB_MODE_INFO best_mbmode;
PARTITION_INFO best_partition;
int_mv best_ref_mv;
@ -1824,6 +1823,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
unsigned char *v_buffer[4];
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
if (cpi->ref_frame_flags & VP8_LAST_FLAG)
{
@ -2396,10 +2396,12 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++)
{
vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
}
if ((this_mode == B_PRED) || (this_mode == SPLITMV))
for (i = 0; i < 16; i++)
{
best_bmodes[i] = x->e_mbd.block[i].bmi;
}
// Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
@ -2473,7 +2475,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (best_mbmode.mode == B_PRED)
{
for (i = 0; i < 16; i++)
x->e_mbd.block[i].bmi.mode = best_bmodes[i].mode;
x->e_mbd.block[i].bmi.as_mode = best_bmodes[i].as_mode;
}
if (best_mbmode.mode == SPLITMV)