Merge "Restore previous motion search bit-error scale."
This commit is contained in:
Коммит
0091fef7a2
|
@ -65,8 +65,14 @@ struct macroblock {
|
||||||
int skip_optimize;
|
int skip_optimize;
|
||||||
int q_index;
|
int q_index;
|
||||||
|
|
||||||
|
// The equivalent error at the current rdmult of one whole bit (not one
|
||||||
|
// bitcost unit).
|
||||||
int errorperbit;
|
int errorperbit;
|
||||||
|
// The equivalend SAD error of one (whole) bit at the current quantizer
|
||||||
|
// for large blocks.
|
||||||
int sadperbit16;
|
int sadperbit16;
|
||||||
|
// The equivalend SAD error of one (whole) bit at the current quantizer
|
||||||
|
// for sub-8x8 blocks.
|
||||||
int sadperbit4;
|
int sadperbit4;
|
||||||
int rddiv;
|
int rddiv;
|
||||||
int rdmult;
|
int rdmult;
|
||||||
|
|
|
@ -80,27 +80,29 @@ int vp9_mv_bit_cost(const MV *mv, const MV *ref,
|
||||||
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
|
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mv_err_cost(const MV *mv, const MV *ref,
|
#define PIXEL_TRANSFORM_ERROR_SCALE 4
|
||||||
const int *mvjcost, int *mvcost[2],
|
static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
|
||||||
int error_per_bit) {
|
int *mvcost[2], int error_per_bit) {
|
||||||
if (mvcost) {
|
if (mvcost) {
|
||||||
const MV diff = { mv->row - ref->row,
|
const MV diff = {mv->row - ref->row, mv->col - ref->col};
|
||||||
mv->col - ref->col };
|
// This product sits at a 32-bit ceiling right now and any additional
|
||||||
// TODO(aconverse): See if this shift needs to be tied to
|
// accuracy in either bit cost or error cost will cause it to overflow.
|
||||||
// VP9_PROB_COST_SHIFT.
|
return ROUND_POWER_OF_TWO(
|
||||||
return ROUND_POWER_OF_TWO((unsigned)mv_cost(&diff, mvjcost, mvcost) *
|
(unsigned)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
|
||||||
error_per_bit, 13);
|
RDDIV_BITS + VP9_PROB_COST_SHIFT - RD_EPB_SHIFT +
|
||||||
|
PIXEL_TRANSFORM_ERROR_SCALE);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
|
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
|
||||||
int error_per_bit) {
|
int sad_per_bit) {
|
||||||
const MV diff = { mv->row - ref->row,
|
const MV diff = { mv->row - ref->row,
|
||||||
mv->col - ref->col };
|
mv->col - ref->col };
|
||||||
// TODO(aconverse): See if this shift needs to be tied to VP9_PROB_COST_SHIFT.
|
return ROUND_POWER_OF_TWO(
|
||||||
return ROUND_POWER_OF_TWO((unsigned)mv_cost(&diff, x->nmvjointsadcost,
|
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) *
|
||||||
x->nmvsadcost) * error_per_bit, 8);
|
sad_per_bit,
|
||||||
|
VP9_PROB_COST_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_init_dsmotion_compensation(search_site_config *cfg, int stride) {
|
void vp9_init_dsmotion_compensation(search_site_config *cfg, int stride) {
|
||||||
|
@ -152,12 +154,13 @@ void vp9_init3smotion_compensation(search_site_config *cfg, int stride) {
|
||||||
* could reduce the area.
|
* could reduce the area.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* estimated cost of a motion vector (r,c) */
|
/* Estimated (square) error cost of a motion vector (r,c). The 14 scale comes
|
||||||
|
* from the same math as in mv_err_cost(). */
|
||||||
#define MVC(r, c) \
|
#define MVC(r, c) \
|
||||||
(mvcost ? \
|
(mvcost ? \
|
||||||
((unsigned)(mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
|
((unsigned)(mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
|
||||||
mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
|
mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
|
||||||
error_per_bit + 4096) >> 13 : 0)
|
error_per_bit + 8192) >> 14 : 0)
|
||||||
|
|
||||||
|
|
||||||
// convert motion vector component to offset for sv[a]f calc
|
// convert motion vector component to offset for sv[a]f calc
|
||||||
|
|
|
@ -342,8 +342,7 @@ void vp9_init_plane_quantizers(VP9_COMP *cpi, MACROBLOCK *x) {
|
||||||
x->skip_block = segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
|
x->skip_block = segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
|
||||||
x->q_index = qindex;
|
x->q_index = qindex;
|
||||||
|
|
||||||
x->errorperbit = rdmult >> 6;
|
set_error_per_bit(x, rdmult);
|
||||||
x->errorperbit += (x->errorperbit == 0);
|
|
||||||
|
|
||||||
vp9_initialize_me_consts(cpi, x, x->q_index);
|
vp9_initialize_me_consts(cpi, x, x->q_index);
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,6 @@
|
||||||
#include "vp9/encoder/vp9_tokenize.h"
|
#include "vp9/encoder/vp9_tokenize.h"
|
||||||
|
|
||||||
#define RD_THRESH_POW 1.25
|
#define RD_THRESH_POW 1.25
|
||||||
#define RD_MULT_EPB_RATIO 64
|
|
||||||
|
|
||||||
// Factor to weigh the rate for switchable interp filters.
|
// Factor to weigh the rate for switchable interp filters.
|
||||||
#define SWITCHABLE_INTERP_RATE_FACTOR 1
|
#define SWITCHABLE_INTERP_RATE_FACTOR 1
|
||||||
|
@ -279,8 +278,7 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi) {
|
||||||
rd->RDDIV = RDDIV_BITS; // In bits (to multiply D by 128).
|
rd->RDDIV = RDDIV_BITS; // In bits (to multiply D by 128).
|
||||||
rd->RDMULT = vp9_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
|
rd->RDMULT = vp9_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
|
||||||
|
|
||||||
x->errorperbit = rd->RDMULT / RD_MULT_EPB_RATIO;
|
set_error_per_bit(x, rd->RDMULT);
|
||||||
x->errorperbit += (x->errorperbit == 0);
|
|
||||||
|
|
||||||
x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
|
x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
|
||||||
cm->frame_type != KEY_FRAME) ? 0 : 1;
|
cm->frame_type != KEY_FRAME) ? 0 : 1;
|
||||||
|
|
|
@ -24,6 +24,7 @@ extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define RDDIV_BITS 7
|
#define RDDIV_BITS 7
|
||||||
|
#define RD_EPB_SHIFT 6
|
||||||
|
|
||||||
#define RDCOST(RM, DM, R, D) \
|
#define RDCOST(RM, DM, R, D) \
|
||||||
(ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP9_PROB_COST_SHIFT) + (D << DM))
|
(ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP9_PROB_COST_SHIFT) + (D << DM))
|
||||||
|
@ -168,6 +169,11 @@ static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
|
||||||
return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
|
return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
|
||||||
|
x->errorperbit = rdmult >> RD_EPB_SHIFT;
|
||||||
|
x->errorperbit += (x->errorperbit == 0);
|
||||||
|
}
|
||||||
|
|
||||||
void vp9_mv_pred(struct VP9_COMP *cpi, MACROBLOCK *x,
|
void vp9_mv_pred(struct VP9_COMP *cpi, MACROBLOCK *x,
|
||||||
uint8_t *ref_y_buffer, int ref_y_stride,
|
uint8_t *ref_y_buffer, int ref_y_stride,
|
||||||
int ref_frame, BLOCK_SIZE block_size);
|
int ref_frame, BLOCK_SIZE block_size);
|
||||||
|
|
|
@ -47,12 +47,12 @@ static INLINE int mv_cost(const int_mv mv,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mvsad_err_cost(const MACROBLOCK *x, const int_mv mv, const MV *ref,
|
static int mvsad_err_cost(const MACROBLOCK *x, const int_mv mv, const MV *ref,
|
||||||
int error_per_bit) {
|
int sad_per_bit) {
|
||||||
const int_mv diff = pack_int_mv(mv.as_mv.row - ref->row,
|
const int_mv diff = pack_int_mv(mv.as_mv.row - ref->row,
|
||||||
mv.as_mv.col - ref->col);
|
mv.as_mv.col - ref->col);
|
||||||
return ROUND_POWER_OF_TWO((unsigned)mv_cost(diff, x->nmvjointsadcost,
|
return ROUND_POWER_OF_TWO((unsigned)mv_cost(diff, x->nmvjointsadcost,
|
||||||
x->nmvsadcost) *
|
x->nmvsadcost) *
|
||||||
error_per_bit, 8);
|
sad_per_bit, VP9_PROB_COST_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
|
|
Загрузка…
Ссылка в новой задаче