Merge "Switch active map implementation to segment based."

This commit is contained in:
Alex Converse 2014-06-23 18:25:51 -07:00 коммит произвёл Gerrit Code Review
Родитель 20adfc5350 aeacaac574
Коммит 2518e33bec
6 изменённых файлов: 19 добавлений и 126 удалений

Просмотреть файл

@ -93,8 +93,6 @@ struct macroblock {
int encode_breakout;
int in_active_map;
// note that token_costs is the cost when eob node is skipped
vp9_coeff_cost token_costs[TX_SIZES];

Просмотреть файл

@ -139,42 +139,6 @@ static INLINE void set_modeinfo_offsets(VP9_COMMON *const cm,
xd->mi[0] = cm->mi + idx_str;
}
static int is_block_in_mb_map(const VP9_COMP *cpi, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
const VP9_COMMON *const cm = &cpi->common;
const int mb_rows = cm->mb_rows;
const int mb_cols = cm->mb_cols;
const int mb_row = mi_row >> 1;
const int mb_col = mi_col >> 1;
const int mb_width = num_8x8_blocks_wide_lookup[bsize] >> 1;
const int mb_height = num_8x8_blocks_high_lookup[bsize] >> 1;
int r, c;
if (bsize <= BLOCK_16X16) {
return cpi->active_map[mb_row * mb_cols + mb_col];
}
for (r = 0; r < mb_height; ++r) {
for (c = 0; c < mb_width; ++c) {
int row = mb_row + r;
int col = mb_col + c;
if (row >= mb_rows || col >= mb_cols)
continue;
if (cpi->active_map[row * mb_cols + col])
return 1;
}
}
return 0;
}
static int check_active_map(const VP9_COMP *cpi, const MACROBLOCK *x,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
if (cpi->active_map_enabled && !x->e_mbd.lossless) {
return is_block_in_mb_map(cpi, mi_row, mi_col, bsize);
} else {
return 1;
}
}
static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
MACROBLOCK *const x = &cpi->mb;
@ -187,9 +151,6 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
set_skip_context(xd, mi_row, mi_col);
// Activity map pointer
x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);
set_modeinfo_offsets(cm, xd, mi_row, mi_col);
mbmi = &xd->mi[0]->mbmi;
@ -1513,20 +1474,8 @@ static void rd_use_partition(VP9_COMP *cpi,
if (bsize == BLOCK_16X16) {
set_offsets(cpi, tile, mi_row, mi_col, bsize);
x->mb_energy = vp9_block_energy(cpi, x, bsize);
} else {
x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);
}
if (!x->in_active_map) {
do_partition_search = 0;
if (mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
pc_tree->partitioning = PARTITION_NONE;
bs_type = mi_8x8[0]->mbmi.sb_type = bsize;
subsize = bsize;
partition = PARTITION_NONE;
}
}
if (do_partition_search &&
cpi->sf.partition_search_type == SEARCH_PARTITION &&
cpi->sf.adjust_partitioning_from_last_frame) {
@ -1989,8 +1938,6 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
if (bsize == BLOCK_16X16) {
set_offsets(cpi, tile, mi_row, mi_col, bsize);
x->mb_energy = vp9_block_energy(cpi, x, bsize);
} else {
x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);
}
// Determine partition types in search according to the speed features.
// The threshold set here has to be of square block size.
@ -2023,8 +1970,6 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
}
}
if (!x->in_active_map && (partition_horz_allowed || partition_vert_allowed))
do_split = 0;
// PARTITION_NONE
if (partition_none_allowed) {
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize,
@ -2058,10 +2003,6 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
}
}
}
if (!x->in_active_map) {
do_split = 0;
do_rect = 0;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
@ -2591,8 +2532,6 @@ static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
assert(num_8x8_blocks_wide_lookup[bsize] ==
num_8x8_blocks_high_lookup[bsize]);
x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);
// Determine partition types in search according to the speed features.
// The threshold set here has to be of square block size.
if (cpi->sf.auto_min_max_partition_size) {
@ -2611,9 +2550,6 @@ static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
partition_vert_allowed &= force_vert_split;
}
if (!x->in_active_map && (partition_horz_allowed || partition_vert_allowed))
do_split = 0;
// PARTITION_NONE
if (partition_none_allowed) {
nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
@ -2649,10 +2585,6 @@ static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
}
}
}
if (!x->in_active_map) {
do_split = 0;
do_rect = 0;
}
}
// store estimated motion vector

Просмотреть файл

@ -174,9 +174,6 @@ static void dealloc_compressor_data(VP9_COMP *cpi) {
vp9_cyclic_refresh_free(cpi->cyclic_refresh);
cpi->cyclic_refresh = NULL;
vpx_free(cpi->active_map);
cpi->active_map = NULL;
vp9_free_frame_buffers(cm);
vp9_free_context_buffers(cm);
@ -760,10 +757,6 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf) {
CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
CHECK_MEM_ERROR(cm, cpi->active_map, vpx_calloc(cm->MBs, 1));
vpx_memset(cpi->active_map, 1, cm->MBs);
cpi->active_map_enabled = 0;
for (i = 0; i < (sizeof(cpi->mbgraph_stats) /
sizeof(cpi->mbgraph_stats[0])); i++) {
CHECK_MEM_ERROR(cm, cpi->mbgraph_stats[i].mb_stats,
@ -2784,16 +2777,23 @@ int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest,
int vp9_set_active_map(VP9_COMP *cpi, unsigned char *map, int rows, int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
const int mi_rows = cpi->common.mi_rows;
const int mi_cols = cpi->common.mi_cols;
if (map) {
vpx_memcpy(cpi->active_map, map, rows * cols);
cpi->active_map_enabled = 1;
int r, c;
for (r = 0; r < mi_rows; r++) {
for (c = 0; c < mi_cols; c++) {
cpi->segmentation_map[r * mi_cols + c] =
!map[(r >> 1) * cols + (c >> 1)];
}
}
vp9_enable_segfeature(&cpi->common.seg, 1, SEG_LVL_SKIP);
vp9_enable_segmentation(&cpi->common.seg);
} else {
cpi->active_map_enabled = 0;
vp9_disable_segmentation(&cpi->common.seg);
}
return 0;
} else {
// cpi->active_map_enabled = 0;
return -1;
}
}

Просмотреть файл

@ -347,9 +347,6 @@ typedef struct VP9_COMP {
unsigned char *complexity_map;
unsigned char *active_map;
unsigned int active_map_enabled;
CYCLIC_REFRESH *cyclic_refresh;
fractional_mv_step_fp *find_fractional_mv_step;

Просмотреть файл

@ -370,9 +370,7 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// Skipping checking: test to see if this block can be reconstructed by
// prediction only.
if (!x->in_active_map) {
x->skip = 1;
} else if (cpi->allow_encode_breakout && x->encode_breakout) {
if (cpi->allow_encode_breakout && x->encode_breakout) {
const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
unsigned int var = var_y, sse = sse_y;
// Skipping threshold for ac.

Просмотреть файл

@ -2791,12 +2791,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
*rate2 += vp9_get_switchable_rate(cpi);
if (!is_comp_pred) {
if (!x->in_active_map) {
if (psse)
*psse = 0;
*distortion = 0;
x->skip = 1;
} else if (cpi->allow_encode_breakout && x->encode_breakout) {
if (cpi->allow_encode_breakout && x->encode_breakout) {
const BLOCK_SIZE y_size = get_plane_block_size(bsize, &xd->plane[0]);
const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
unsigned int var, sse;
@ -3143,21 +3138,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
mode_skip_mask |= all_intra_modes;
}
if (!x->in_active_map) {
int mode_index;
assert(cpi->ref_frame_flags & VP9_LAST_FLAG);
if (frame_mv[NEARESTMV][LAST_FRAME].as_int == 0)
mode_index = THR_NEARESTMV;
else if (frame_mv[NEARMV][LAST_FRAME].as_int == 0)
mode_index = THR_NEARMV;
else
mode_index = THR_ZEROMV;
mode_skip_mask = ~(1 << mode_index);
mode_skip_start = MAX_MODES;
inter_mode_mask = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) |
(1 << NEWMV);
}
for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
int mode_excluded = 0;
int64_t this_rd = INT64_MAX;
@ -3247,16 +3227,14 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
}
} else {
if (x->in_active_map) {
const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
if (!check_best_zero_mv(cpi, mbmi->mode_context, frame_mv,
inter_mode_mask, this_mode, ref_frames))
continue;
}
const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
if (!check_best_zero_mv(cpi, mbmi->mode_context, frame_mv,
inter_mode_mask, this_mode, ref_frames))
continue;
}
mbmi->mode = this_mode;
mbmi->uv_mode = x->in_active_map ? DC_PRED : this_mode;
mbmi->uv_mode = DC_PRED;
mbmi->ref_frame[0] = ref_frame;
mbmi->ref_frame[1] = second_ref_frame;
// Evaluate all sub-pel filters irrespective of whether we can use
@ -3565,16 +3543,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
vp9_zero(best_tx_diff);
}
if (!x->in_active_map) {
assert(mbmi->ref_frame[0] == LAST_FRAME);
assert(mbmi->ref_frame[1] == NONE);
assert(mbmi->mode == NEARESTMV ||
mbmi->mode == NEARMV ||
mbmi->mode == ZEROMV);
assert(frame_mv[mbmi->mode][LAST_FRAME].as_int == 0);
assert(mbmi->mode == mbmi->uv_mode);
}
set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
store_coding_context(x, ctx, best_mode_index,
best_pred_diff, best_tx_diff, best_filter_diff);