Removing unused "ishp" arguments.

Using different variable names "allow_hp" and "use_hp" instead of "usehp".

Change-Id: I0cd5996ddeb46bd754473b680a993c0aaf8eb879
This commit is contained in:
Dmitry Kovalev 2013-07-31 11:27:53 -07:00
Родитель ac7bab7575
Коммит 500ade243a
5 изменённых файлов: 79 добавлений и 103 удалений

Просмотреть файл

@ -228,16 +228,16 @@ static int read_mv_component(vp9_reader *r,
static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref,
const nmv_context *ctx,
nmv_context_counts *counts, int usehp) {
nmv_context_counts *counts, int allow_hp) {
const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints);
const int use_hp = allow_hp && vp9_use_mv_hp(ref);
MV diff = {0, 0};
usehp = usehp && vp9_use_mv_hp(ref);
if (mv_joint_vertical(j))
diff.row = read_mv_component(r, &ctx->comps[0], usehp);
diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
if (mv_joint_horizontal(j))
diff.col = read_mv_component(r, &ctx->comps[1], usehp);
diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
vp9_inc_mv(&diff, counts);
@ -250,7 +250,7 @@ static void update_mv(vp9_reader *r, vp9_prob *p, vp9_prob upd_p) {
*p = (vp9_read_literal(r, 7) << 1) | 1;
}
static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int usehp) {
static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int allow_hp) {
int i, j, k;
for (j = 0; j < MV_JOINTS - 1; ++j)
@ -281,7 +281,7 @@ static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int usehp) {
update_mv(r, &comp->fp[j], VP9_NMV_UPDATE_PROB);
}
if (usehp) {
if (allow_hp) {
for (i = 0; i < 2; ++i) {
update_mv(r, &mvc->comps[i].class0_hp, VP9_NMV_UPDATE_PROB);
update_mv(r, &mvc->comps[i].hp, VP9_NMV_UPDATE_PROB);
@ -443,6 +443,7 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
int_mv *const mv0 = &mbmi->mv[0];
int_mv *const mv1 = &mbmi->mv[1];
const BLOCK_SIZE_TYPE bsize = mbmi->sb_type;
const int allow_hp = xd->allow_high_precision_mv;
int_mv nearest, nearby, best_mv;
int_mv nearest_second, nearby_second, best_mv_second;
@ -508,11 +509,11 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
switch (blockmode) {
case NEWMV:
read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
&cm->counts.mv, xd->allow_high_precision_mv);
&cm->counts.mv, allow_hp);
if (ref1 > 0)
read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
&cm->counts.mv, xd->allow_high_precision_mv);
&cm->counts.mv, allow_hp);
break;
case NEARESTMV:
blockmv.as_int = nearest.as_int;
@ -586,11 +587,10 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
break;
case NEWMV:
read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv,
xd->allow_high_precision_mv);
read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv, allow_hp);
if (ref1 > 0)
read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc,
&cm->counts.mv, xd->allow_high_precision_mv);
read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc, &cm->counts.mv,
allow_hp);
break;
default:
assert(!"Invalid inter mode value");

Просмотреть файл

@ -406,6 +406,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
const int segment_id = mi->segment_id;
int skip_coeff;
const BLOCK_SIZE_TYPE bsize = mi->sb_type;
const int allow_hp = xd->allow_high_precision_mv;
x->partition_info = x->pi + (m - pc->mi);
@ -500,13 +501,13 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
active_section = 11;
#endif
vp9_encode_mv(cpi, bc, &blockmv.as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
nmvc, allow_hp);
if (mi->ref_frame[1] > INTRA_FRAME)
vp9_encode_mv(cpi, bc,
&m->bmi[j].as_mv[1].as_mv,
&mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
nmvc, allow_hp);
}
}
}
@ -514,14 +515,12 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
#ifdef ENTROPY_STATS
active_section = 5;
#endif
vp9_encode_mv(cpi, bc,
&mi->mv[0].as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, &mi->best_mv.as_mv,
nmvc, allow_hp);
if (mi->ref_frame[1] > INTRA_FRAME)
vp9_encode_mv(cpi, bc,
&mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, allow_hp);
}
}
}

Просмотреть файл

@ -58,7 +58,7 @@ int vp9_init_search_range(VP9_COMP *cpi, int size) {
}
int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
int weight, int ishp) {
int weight) {
MV v;
v.row = mv->as_mv.row - ref->as_mv.row;
v.col = mv->as_mv.col - ref->as_mv.col;
@ -68,7 +68,7 @@ int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
}
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
int error_per_bit, int ishp) {
int error_per_bit) {
if (mvcost) {
MV v;
v.row = mv->as_mv.row - ref->as_mv.row;
@ -300,8 +300,7 @@ int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x,
// calculate central point error
besterr = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
// TODO: Each subsequent iteration checks at least one point in
// common with the last iteration could be 2 ( if diag selected)
@ -490,8 +489,7 @@ int vp9_find_best_sub_pixel_comp(MACROBLOCK *x,
comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride);
besterr = vfp->vf(comp_pred, w, z, src_stride, sse1);
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
// Each subsequent iteration checks at least one point in
// common with the last iteration could be 2 ( if diag selected)
@ -654,15 +652,14 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
// calculate central point error
bestmse = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (left < bestmse) {
*bestmv = this_mv;
@ -674,7 +671,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, y_stride, z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
error_per_bit);
if (right < bestmse) {
*bestmv = this_mv;
@ -687,8 +684,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
if (up < bestmse) {
*bestmv = this_mv;
@ -699,8 +695,8 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, y_stride, z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (down < bestmse) {
*bestmv = this_mv;
@ -742,8 +738,8 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
break;
}
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (diag < bestmse) {
*bestmv = this_mv;
@ -784,8 +780,8 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
src_stride, &sse);
}
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (left < bestmse) {
*bestmv = this_mv;
@ -799,7 +795,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
error_per_bit);
if (right < bestmse) {
*bestmv = this_mv;
@ -822,8 +818,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
z, src_stride, &sse);
}
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
if (up < bestmse) {
*bestmv = this_mv;
@ -835,8 +830,9 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
this_mv.as_mv.row += 4;
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (down < bestmse) {
*bestmv = this_mv;
@ -923,8 +919,8 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
break;
}
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (diag < bestmse) {
*bestmv = this_mv;
@ -968,8 +964,8 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
z, src_stride, &sse);
}
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (left < bestmse) {
*bestmv = this_mv;
@ -982,7 +978,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
error_per_bit);
if (right < bestmse) {
*bestmv = this_mv;
@ -1005,8 +1001,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
SP(this_mv.as_mv.col), SP(7), z, src_stride, &sse);
}
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
if (up < bestmse) {
*bestmv = this_mv;
@ -1019,8 +1014,8 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
thismse = vfp->svf(y, y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (down < bestmse) {
*bestmv = this_mv;
@ -1107,8 +1102,8 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
break;
}
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (diag < bestmse) {
*bestmv = this_mv;
@ -1153,15 +1148,14 @@ int vp9_find_best_half_pixel_step(MACROBLOCK *x,
// calculate central point error
bestmse = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (left < bestmse) {
*bestmv = this_mv;
@ -1173,7 +1167,7 @@ int vp9_find_best_half_pixel_step(MACROBLOCK *x,
this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, y_stride, z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
error_per_bit);
if (right < bestmse) {
*bestmv = this_mv;
@ -1186,8 +1180,7 @@ int vp9_find_best_half_pixel_step(MACROBLOCK *x,
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
if (up < bestmse) {
*bestmv = this_mv;
@ -1198,8 +1191,8 @@ int vp9_find_best_half_pixel_step(MACROBLOCK *x,
this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, y_stride, z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (down < bestmse) {
*bestmv = this_mv;
@ -1238,8 +1231,8 @@ int vp9_find_best_half_pixel_step(MACROBLOCK *x,
break;
}
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
if (diag < bestmse) {
*bestmv = this_mv;
@ -1580,11 +1573,9 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x,
if (bestsad == INT_MAX)
return INT_MAX;
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv, center_mv, mvjcost,
mvcost, x->errorperbit);
}
int vp9_diamond_search_sadx4(MACROBLOCK *x,
@ -1754,11 +1745,9 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x,
if (bestsad == INT_MAX)
return INT_MAX;
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv,
center_mv, mvjcost, mvcost, x->errorperbit);
}
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
@ -1914,8 +1903,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
return
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
(unsigned int *)(&thissad)) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
else
return INT_MAX;
}
@ -2042,8 +2030,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
return
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
(unsigned int *)(&thissad)) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
else
return INT_MAX;
}
@ -2197,8 +2184,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
return
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
(unsigned int *)(&thissad)) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
else
return INT_MAX;
}
@ -2274,8 +2260,7 @@ int vp9_refining_search_sad_c(MACROBLOCK *x,
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
else
return INT_MAX;
}
@ -2381,8 +2366,7 @@ int vp9_refining_search_sadx4(MACROBLOCK *x,
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
else
return INT_MAX;
}
@ -2472,12 +2456,10 @@ int vp9_refining_search_8p_c(MACROBLOCK *x,
if (bestsad < INT_MAX) {
// FIXME(rbultje, yunqing): add full-pixel averaging variance functions
// so we don't have to use the subpixel with xoff=0,yoff=0 here.
int besterr = fn_ptr->svaf(best_address, in_what_stride, 0, 0,
return fn_ptr->svaf(best_address, in_what_stride, 0, 0,
what, what_stride, (unsigned int *)(&thissad),
second_pred) +
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
xd->allow_high_precision_mv);
return besterr;
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
} else {
return INT_MAX;
}

Просмотреть файл

@ -25,7 +25,7 @@
void vp9_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv);
int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost,
int *mvcost[2], int weight, int ishp);
int *mvcost[2], int weight);
void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride);
void vp9_init3smotion_compensation(MACROBLOCK *x, int stride);

Просмотреть файл

@ -1613,12 +1613,11 @@ static int labels2mode(MACROBLOCK *x, int i,
case NEWMV:
this_mv->as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost,
102, xd->allow_high_precision_mv);
102);
if (mbmi->ref_frame[1] > 0) {
this_second_mv->as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv,
mvjcost, mvcost, 102,
xd->allow_high_precision_mv);
mvjcost, mvcost, 102);
}
break;
case NEARESTMV:
@ -2565,7 +2564,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
}
*rate_mv = vp9_mv_bit_cost(tmp_mv, &ref_mv,
x->nmvjointcost, x->mvcost,
96, xd->allow_high_precision_mv);
96);
if (scaled_ref_frame) {
int i;
for (i = 0; i < MAX_MB_PLANE; i++)
@ -2720,12 +2719,10 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
}
*rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]],
&mbmi->ref_mvs[refs[0]][0],
x->nmvjointcost, x->mvcost, 96,
x->e_mbd.allow_high_precision_mv);
x->nmvjointcost, x->mvcost, 96);
*rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]],
&mbmi->ref_mvs[refs[1]][0],
x->nmvjointcost, x->mvcost, 96,
x->e_mbd.allow_high_precision_mv);
x->nmvjointcost, x->mvcost, 96);
vpx_free(second_pred);
}
@ -2778,12 +2775,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
} else {
rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]],
&mbmi->ref_mvs[refs[0]][0],
x->nmvjointcost, x->mvcost, 96,
x->e_mbd.allow_high_precision_mv);
x->nmvjointcost, x->mvcost, 96);
rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]],
&mbmi->ref_mvs[refs[1]][0],
x->nmvjointcost, x->mvcost, 96,
x->e_mbd.allow_high_precision_mv);
x->nmvjointcost, x->mvcost, 96);
}
if (frame_mv[refs[0]].as_int == INVALID_MV ||
frame_mv[refs[1]].as_int == INVALID_MV)