Merge "Code cleanup." into experimental

This commit is contained in:
John Koleszar 2013-04-01 21:12:56 -07:00 коммит произвёл Gerrit Code Review
Родитель a417a6e32c 50e54c112d
Коммит 49bc402a94
10 изменённых файлов: 281 добавлений и 322 удалений

Просмотреть файл

@ -19,17 +19,16 @@
#include "vp9/common/vp9_systemdependent.h"
void vp9_update_mode_info_border(VP9_COMMON *cpi, MODE_INFO *mi_base) {
int stride = cpi->mode_info_stride;
void vp9_update_mode_info_border(VP9_COMMON *cpi, MODE_INFO *mi) {
const int stride = cpi->mode_info_stride;
int i;
// Clear down top border row
vpx_memset(mi_base, 0, sizeof(MODE_INFO) * cpi->mode_info_stride);
vpx_memset(mi, 0, sizeof(MODE_INFO) * stride);
// Clear left border column
for (i = 1; i < cpi->mb_rows + 1; i++) {
vpx_memset(&mi_base[i * stride], 0, sizeof(MODE_INFO));
}
for (i = 1; i < cpi->mb_rows + 1; i++)
vpx_memset(&mi[i * stride], 0, sizeof(MODE_INFO));
}
void vp9_update_mode_info_in_image(VP9_COMMON *cpi, MODE_INFO *mi) {
@ -39,14 +38,14 @@ void vp9_update_mode_info_in_image(VP9_COMMON *cpi, MODE_INFO *mi) {
for (i = 0; i < cpi->mb_rows; i++) {
for (j = 0; j < cpi->mb_cols; j++) {
mi->mbmi.mb_in_image = 1;
mi++; // Next element in the row
mi++; // Next element in the row
}
mi++; // Step over border element at start of next row
mi++; // Step over border element at start of next row
}
}
void vp9_de_alloc_frame_buffers(VP9_COMMON *oci) {
void vp9_free_frame_buffers(VP9_COMMON *oci) {
int i;
for (i = 0; i < NUM_YV12_BUFFERS; i++)
@ -67,19 +66,18 @@ void vp9_de_alloc_frame_buffers(VP9_COMMON *oci) {
int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height) {
int i;
int aligned_width, aligned_height;
vp9_de_alloc_frame_buffers(oci);
// Our internal buffers are always multiples of 16
const int aligned_width = multiple16(width);
const int aligned_height = multiple16(height);
/* our internal buffers are always multiples of 16 */
aligned_width = (width + 15) & ~15;
aligned_height = (height + 15) & ~15;
vp9_free_frame_buffers(oci);
for (i = 0; i < NUM_YV12_BUFFERS; i++) {
oci->fb_idx_ref_cnt[i] = 0;
if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height,
VP9BORDERINPIXELS) < 0) {
vp9_de_alloc_frame_buffers(oci);
vp9_free_frame_buffers(oci);
return 1;
}
}
@ -97,13 +95,13 @@ int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height) {
if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16,
VP9BORDERINPIXELS) < 0) {
vp9_de_alloc_frame_buffers(oci);
vp9_free_frame_buffers(oci);
return 1;
}
if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height,
VP9BORDERINPIXELS) < 0) {
vp9_de_alloc_frame_buffers(oci);
vp9_free_frame_buffers(oci);
return 1;
}
@ -114,7 +112,7 @@ int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height) {
oci->mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
if (!oci->mip) {
vp9_de_alloc_frame_buffers(oci);
vp9_free_frame_buffers(oci);
return 1;
}
@ -125,7 +123,7 @@ int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height) {
oci->prev_mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
if (!oci->prev_mip) {
vp9_de_alloc_frame_buffers(oci);
vp9_free_frame_buffers(oci);
return 1;
}
@ -135,7 +133,7 @@ int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height) {
vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * (3 + oci->mb_cols), 1);
if (!oci->above_context) {
vp9_de_alloc_frame_buffers(oci);
vp9_free_frame_buffers(oci);
return 1;
}
@ -200,20 +198,18 @@ void vp9_create_common(VP9_COMMON *oci) {
oci->clr_type = REG_YUV;
oci->clamp_type = RECON_CLAMP_REQUIRED;
/* Initialise reference frame sign bias structure to defaults */
// Initialize reference frame sign bias structure to defaults
vpx_memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
oci->kf_ymode_probs_update = 0;
}
void vp9_remove_common(VP9_COMMON *oci) {
vp9_de_alloc_frame_buffers(oci);
vp9_free_frame_buffers(oci);
}
void vp9_initialize_common() {
vp9_coef_tree_initialize();
vp9_entropy_mode_init();
vp9_entropy_mv_init();
}

Просмотреть файл

@ -14,13 +14,15 @@
#include "vp9/common/vp9_onyxc_int.h"
void vp9_create_common(VP9_COMMON *oci);
void vp9_remove_common(VP9_COMMON *oci);
void vp9_de_alloc_frame_buffers(VP9_COMMON *oci);
int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height);
void vp9_setup_version(VP9_COMMON *oci);
void vp9_update_mode_info_border(VP9_COMMON *cpi, MODE_INFO *mi_base);
void vp9_update_mode_info_border(VP9_COMMON *cpi, MODE_INFO *mi);
void vp9_update_mode_info_in_image(VP9_COMMON *cpi, MODE_INFO *mi);
void vp9_create_common(VP9_COMMON *oci);
void vp9_remove_common(VP9_COMMON *oci);
int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height);
void vp9_free_frame_buffers(VP9_COMMON *oci);
void vp9_setup_version(VP9_COMMON *oci);
#endif // VP9_COMMON_VP9_ALLOCCOMMON_H_

Просмотреть файл

@ -652,25 +652,28 @@ static void update_blockd_bmi(MACROBLOCKD *xd) {
}
static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
TX_SIZE tx_size_uv;
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
tx_size_uv = xd->mode_info_context->mbmi.txfm_size;
} else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32)
tx_size_uv = TX_16X16;
else
tx_size_uv = xd->mode_info_context->mbmi.txfm_size;
} else {
if (xd->mode_info_context->mbmi.txfm_size == TX_16X16)
tx_size_uv = TX_8X8;
else if (xd->mode_info_context->mbmi.txfm_size == TX_8X8 &&
(xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV))
tx_size_uv = TX_4X4;
else
tx_size_uv = xd->mode_info_context->mbmi.txfm_size;
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
const TX_SIZE size = mbmi->txfm_size;
const MB_PREDICTION_MODE mode = mbmi->mode;
switch (mbmi->sb_type) {
case BLOCK_SIZE_SB64X64:
return size;
case BLOCK_SIZE_SB32X32:
if (size == TX_32X32)
return TX_16X16;
else
return size;
default:
if (size == TX_16X16)
return TX_8X8;
else if (size == TX_8X8 && (mode == I8X8_PRED || mode == SPLITMV))
return TX_4X4;
else
return size;
}
return tx_size_uv;
return size;
}
#if CONFIG_CODE_NONZEROCOUNT

Просмотреть файл

@ -55,4 +55,8 @@ static INLINE int clamp(int value, int low, int high) {
return value < low ? low : (value > high ? high : value);
}
static INLINE int multiple16(int value) {
return (value + 15) & ~15;
}
#endif // VP9_COMMON_VP9_COMMON_H_

Просмотреть файл

@ -88,10 +88,14 @@ const nmv_context vp9_default_nmv_context = {
};
MV_JOINT_TYPE vp9_get_mv_joint(MV mv) {
if (mv.row == 0 && mv.col == 0) return MV_JOINT_ZERO;
else if (mv.row == 0 && mv.col != 0) return MV_JOINT_HNZVZ;
else if (mv.row != 0 && mv.col == 0) return MV_JOINT_HZVNZ;
else return MV_JOINT_HNZVNZ;
if (mv.row == 0 && mv.col == 0)
return MV_JOINT_ZERO;
else if (mv.row == 0 && mv.col != 0)
return MV_JOINT_HNZVZ;
else if (mv.row != 0 && mv.col == 0)
return MV_JOINT_HZVNZ;
else
return MV_JOINT_HNZVNZ;
}
#define mv_class_base(c) ((c) ? (CLASS0_SIZE << (c + 2)) : 0)
@ -137,7 +141,8 @@ static void increment_nmv_component(int v,
int incr,
int usehp) {
int s, z, c, o, d, e, f;
if (!incr) return;
if (!incr)
return;
assert (v != 0); /* should not be zero */
s = v < 0;
mvcomp->sign[s] += incr;
@ -152,8 +157,8 @@ static void increment_nmv_component(int v,
if (c == MV_CLASS_0) {
mvcomp->class0[d] += incr;
} else {
int i, b;
b = c + CLASS0_BITS - 1; /* number of bits */
int i;
int b = c + CLASS0_BITS - 1; // number of bits
for (i = 0; i < b; ++i)
mvcomp->bits[i][((d >> i) & 1)] += incr;
}
@ -204,25 +209,22 @@ static void counts_to_context(nmv_component_counts *mvcomp, int usehp) {
void vp9_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
int usehp) {
MV_JOINT_TYPE j = vp9_get_mv_joint(*mv);
mvctx->joints[j]++;
const MV_JOINT_TYPE type = vp9_get_mv_joint(*mv);
mvctx->joints[type]++;
usehp = usehp && vp9_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
if (type == MV_JOINT_HZVNZ || type == MV_JOINT_HNZVNZ)
increment_nmv_component_count(mv->row, &mvctx->comps[0], 1, usehp);
}
if (j == MV_JOINT_HNZVZ || j == MV_JOINT_HNZVNZ) {
if (type == MV_JOINT_HNZVZ || type == MV_JOINT_HNZVNZ)
increment_nmv_component_count(mv->col, &mvctx->comps[1], 1, usehp);
}
}
static void adapt_prob(vp9_prob *dest, vp9_prob prep,
unsigned int ct[2]) {
int count = ct[0] + ct[1];
static void adapt_prob(vp9_prob *dest, vp9_prob prep, unsigned int ct[2]) {
const int count = MIN(ct[0] + ct[1], MV_COUNT_SAT);
if (count) {
vp9_prob newp = get_binary_prob(ct[0], ct[1]);
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
*dest = weighted_prob(prep, newp,
MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
const vp9_prob newp = get_binary_prob(ct[0], ct[1]);
const int factor = MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT;
*dest = weighted_prob(prep, newp, factor);
} else {
*dest = prep;
}
@ -253,10 +255,12 @@ void vp9_counts_to_nmv_context(
branch_ct_joint,
nmv_count->joints, 0);
for (i = 0; i < 2; ++i) {
prob->comps[i].sign = get_binary_prob(nmv_count->comps[i].sign[0],
nmv_count->comps[i].sign[1]);
branch_ct_sign[i][0] = nmv_count->comps[i].sign[0];
branch_ct_sign[i][1] = nmv_count->comps[i].sign[1];
const uint32_t s0 = nmv_count->comps[i].sign[0];
const uint32_t s1 = nmv_count->comps[i].sign[1];
prob->comps[i].sign = get_binary_prob(s0, s1);
branch_ct_sign[i][0] = s0;
branch_ct_sign[i][1] = s1;
vp9_tree_probs_from_distribution(vp9_mv_class_tree,
prob->comps[i].classes,
branch_ct_classes[i],
@ -266,10 +270,12 @@ void vp9_counts_to_nmv_context(
branch_ct_class0[i],
nmv_count->comps[i].class0, 0);
for (j = 0; j < MV_OFFSET_BITS; ++j) {
prob->comps[i].bits[j] = get_binary_prob(nmv_count->comps[i].bits[j][0],
nmv_count->comps[i].bits[j][1]);
branch_ct_bits[i][j][0] = nmv_count->comps[i].bits[j][0];
branch_ct_bits[i][j][1] = nmv_count->comps[i].bits[j][1];
const uint32_t b0 = nmv_count->comps[i].bits[j][0];
const uint32_t b1 = nmv_count->comps[i].bits[j][1];
prob->comps[i].bits[j] = get_binary_prob(b0, b1);
branch_ct_bits[i][j][0] = b0;
branch_ct_bits[i][j][1] = b1;
}
}
for (i = 0; i < 2; ++i) {
@ -286,16 +292,18 @@ void vp9_counts_to_nmv_context(
}
if (usehp) {
for (i = 0; i < 2; ++i) {
prob->comps[i].class0_hp =
get_binary_prob(nmv_count->comps[i].class0_hp[0],
nmv_count->comps[i].class0_hp[1]);
branch_ct_class0_hp[i][0] = nmv_count->comps[i].class0_hp[0];
branch_ct_class0_hp[i][1] = nmv_count->comps[i].class0_hp[1];
const uint32_t c0_hp0 = nmv_count->comps[i].class0_hp[0];
const uint32_t c0_hp1 = nmv_count->comps[i].class0_hp[1];
const uint32_t hp0 = nmv_count->comps[i].hp[0];
const uint32_t hp1 = nmv_count->comps[i].hp[1];
prob->comps[i].hp = get_binary_prob(nmv_count->comps[i].hp[0],
nmv_count->comps[i].hp[1]);
branch_ct_hp[i][0] = nmv_count->comps[i].hp[0];
branch_ct_hp[i][1] = nmv_count->comps[i].hp[1];
prob->comps[i].class0_hp = get_binary_prob(c0_hp0, c0_hp1);
branch_ct_class0_hp[i][0] = c0_hp0;
branch_ct_class0_hp[i][1] = c0_hp1;
prob->comps[i].hp = get_binary_prob(hp0, hp1);
branch_ct_hp[i][0] = hp0;
branch_ct_hp[i][1] = hp1;
}
}
}

Просмотреть файл

@ -191,11 +191,12 @@ void segment_via_mode_info(VP9_COMMON *oci, int how) {
// give new labels to regions
for (i = 1; i < label; i++)
if (labels[i].next->count > min_mbs_in_region && labels[labels[i].next->label].label == 0) {
if (labels[i].next->count > min_mbs_in_region &&
labels[labels[i].next->label].label == 0) {
segment_info *cs = &segments[label_count];
cs->label = label_count;
labels[labels[i].next->label].label = label_count++;
labels[labels[i].next->label].seg_value = labels[i].next->seg_value;
labels[labels[i].next->label].seg_value = labels[i].next->seg_value;
cs->seg_value = labels[labels[i].next->label].seg_value;
cs->min_x = oci->mb_cols;
cs->min_y = oci->mb_rows;
@ -204,24 +205,21 @@ void segment_via_mode_info(VP9_COMMON *oci, int how) {
cs->sum_x = 0;
cs->sum_y = 0;
cs->pixels = 0;
}
lp = labeling;
// this is just to gather stats...
for (i = 0; i < oci->mb_rows; i++, lp += pitch) {
for (j = 0; j < oci->mb_cols; j++) {
segment_info *cs;
int oldlab = labels[lp[j]].next->label;
int lab = labels[oldlab].label;
lp[j] = lab;
const int old_lab = labels[lp[j]].next->label;
const int lab = labels[old_lab].label;
segment_info *cs = &segments[lab];
cs = &segments[lab];
cs->min_x = (j < cs->min_x ? j : cs->min_x);
cs->max_x = (j > cs->max_x ? j : cs->max_x);
cs->min_y = (i < cs->min_y ? i : cs->min_y);
cs->max_y = (i > cs->max_y ? i : cs->max_y);
cs->min_x = MIN(cs->min_x, j);
cs->max_x = MAX(cs->max_x, j);
cs->min_y = MIN(cs->min_y, i);
cs->max_y = MAX(cs->max_y, i);
cs->sum_x += j;
cs->sum_y += i;
cs->pixels++;

Просмотреть файл

@ -19,8 +19,7 @@ static INLINE int8_t signed_char_clamp(int t) {
return (int8_t) t;
}
/* should we apply any filter at all ( 11111111 yes, 00000000 no) */
// should we apply any filter at all: 11111111 yes, 00000000 no
static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit,
uint8_t p3, uint8_t p2,
uint8_t p1, uint8_t p0,
@ -34,11 +33,10 @@ static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit,
mask |= (abs(q2 - q1) > limit) * -1;
mask |= (abs(q3 - q2) > limit) * -1;
mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
mask = ~mask;
return mask;
return ~mask;
}
/* is there high variance internal edge ( 11111111 yes, 00000000 no) */
// is there high variance internal edge: 11111111 yes, 00000000 no
static INLINE int8_t hevmask(uint8_t thresh, uint8_t p1, uint8_t p0,
uint8_t q0, uint8_t q1) {
int8_t hev = 0;
@ -81,25 +79,23 @@ static INLINE void filter(int8_t mask, uint8_t hev, uint8_t *op1,
*op1 = signed_char_clamp(ps1 + filter) ^ 0x80;
}
void vp9_loop_filter_horizontal_edge_c(uint8_t *s,
int p, /* pitch */
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int p /* pitch */,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh,
int count) {
int hev = 0; /* high edge variance */
int8_t mask = 0;
int i = 0;
/* loop filter designed to work using chars so that we can make maximum use
* of 8 bit simd instructions.
*/
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
do {
mask = filter_mask(limit[0], blimit[0],
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[0 * p], s[1 * p], s[2 * p], s[3 * p]);
const int8_t mask = filter_mask(limit[0], blimit[0],
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[0 * p], s[1 * p], s[2 * p], s[3 * p]);
hev = hevmask(thresh[0], s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
// high edge variance
const int8_t hev = hevmask(thresh[0],
s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
filter(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p);
@ -107,36 +103,31 @@ void vp9_loop_filter_horizontal_edge_c(uint8_t *s,
} while (++i < count * 8);
}
void vp9_loop_filter_vertical_edge_c(uint8_t *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh,
int count) {
int hev = 0; /* high edge variance */
int8_t mask = 0;
int i = 0;
/* loop filter designed to work using chars so that we can make maximum use
* of 8 bit simd instructions.
*/
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
do {
mask = filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1],
s[0], s[1], s[2], s[3]);
hev = hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
const int8_t mask = filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1],
s[0], s[1], s[2], s[3]);
// high edge variance
const int8_t hev = hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
filter(mask, hev, s - 2, s - 1, s, s + 1);
s += p;
s += pitch;
} while (++i < count * 8);
}
static INLINE signed char flatmask4(uint8_t thresh,
uint8_t p3, uint8_t p2,
uint8_t p1, uint8_t p0,
uint8_t q0, uint8_t q1,
uint8_t q2, uint8_t q3) {
static INLINE int8_t flatmask4(uint8_t thresh,
uint8_t p3, uint8_t p2,
uint8_t p1, uint8_t p0,
uint8_t q0, uint8_t q1,
uint8_t q2, uint8_t q3) {
int8_t flat = 0;
flat |= (abs(p1 - p0) > thresh) * -1;
flat |= (abs(q1 - q0) > thresh) * -1;
@ -144,8 +135,7 @@ static INLINE signed char flatmask4(uint8_t thresh,
flat |= (abs(q0 - q2) > thresh) * -1;
flat |= (abs(p3 - p0) > thresh) * -1;
flat |= (abs(q3 - q0) > thresh) * -1;
flat = ~flat;
return flat;
return ~flat;
}
static INLINE signed char flatmask5(uint8_t thresh,
uint8_t p4, uint8_t p3, uint8_t p2,
@ -213,29 +203,26 @@ static INLINE void mbfilter(int8_t mask, uint8_t hev, uint8_t flat,
}
}
void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh,
int count) {
int8_t hev = 0; /* high edge variance */
int8_t mask = 0;
int8_t flat = 0;
int i = 0;
/* loop filter designed to work using chars so that we can make maximum use
* of 8 bit simd instructions.
*/
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
do {
mask = filter_mask(limit[0], blimit[0],
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
const int8_t mask = filter_mask(limit[0], blimit[0],
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
hev = hevmask(thresh[0], s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
const int8_t hev = hevmask(thresh[0],
s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
flat = flatmask4(1, s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
const int8_t flat = flatmask4(1,
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
mbfilter(mask, hev, flat,
s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
s, s + 1 * p, s + 2 * p, s + 3 * p);
@ -245,35 +232,29 @@ void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s,
}
void vp9_mbloop_filter_vertical_edge_c(uint8_t *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh,
int count) {
int8_t hev = 0; /* high edge variance */
int8_t mask = 0;
int8_t flat = 0;
int i = 0;
do {
mask = filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1],
s[0], s[1], s[2], s[3]);
const int8_t mask = filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1],
s[0], s[1], s[2], s[3]);
hev = hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
flat = flatmask4(1,
s[-4], s[-3], s[-2], s[-1],
s[ 0], s[ 1], s[ 2], s[ 3]);
mbfilter(mask, hev, flat,
s - 4, s - 3, s - 2, s - 1,
s, s + 1, s + 2, s + 3);
s += p;
const int8_t hev = hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
const int8_t flat = flatmask4(1, s[-4], s[-3], s[-2], s[-1],
s[ 0], s[ 1], s[ 2], s[ 3]);
mbfilter(mask, hev, flat, s - 4, s - 3, s - 2, s - 1,
s, s + 1, s + 2, s + 3);
s += pitch;
} while (++i < count * 8);
}
/* should we apply any filter at all ( 11111111 yes, 00000000 no) */
// should we apply any filter at all: 11111111 yes, 00000000 no
static INLINE int8_t simple_filter_mask(uint8_t blimit,
uint8_t p1, uint8_t p0,
uint8_t q0, uint8_t q1) {
@ -301,31 +282,24 @@ static INLINE void simple_filter(int8_t mask,
*op0 = signed_char_clamp(p0 + filter2) ^ 0x80;
}
void vp9_loop_filter_simple_horizontal_edge_c(uint8_t *s,
int p,
const unsigned char *blimit) {
int8_t mask = 0;
void vp9_loop_filter_simple_horizontal_edge_c(uint8_t *s, int p,
const uint8_t *blimit) {
int i = 0;
do {
mask = simple_filter_mask(blimit[0],
s[-2 * p], s[-1 * p],
s[0 * p], s[1 * p]);
simple_filter(mask,
s - 2 * p, s - 1 * p,
s, s + 1 * p);
const int8_t mask = simple_filter_mask(blimit[0], s[-2 * p], s[-1 * p],
s[0 * p], s[1 * p]);
simple_filter(mask, s - 2 * p, s - 1 * p, s, s + 1 * p);
++s;
} while (++i < 16);
}
void vp9_loop_filter_simple_vertical_edge_c(uint8_t *s,
int p,
const unsigned char *blimit) {
int8_t mask = 0;
void vp9_loop_filter_simple_vertical_edge_c(uint8_t *s, int p,
const uint8_t *blimit) {
int i = 0;
do {
mask = simple_filter_mask(blimit[0], s[-2], s[-1], s[0], s[1]);
const int8_t mask = simple_filter_mask(blimit[0], s[-2], s[-1], s[0], s[1]);
simple_filter(mask, s - 2, s - 1, s, s + 1);
s += p;
} while (++i < 16);
@ -367,87 +341,82 @@ void vp9_loop_filter_bv_c(uint8_t*y_ptr, uint8_t *u_ptr,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
/* Horizontal MB filtering */
void vp9_loop_filter_mbh_c(uint8_t *y_ptr, uint8_t *u_ptr,
uint8_t *v_ptr, int y_stride, int uv_stride,
// Horizontal MB filtering
void vp9_loop_filter_mbh_c(uint8_t *y, uint8_t *u, uint8_t *v,
int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_mbloop_filter_horizontal_edge_c(y_ptr, y_stride,
vp9_mbloop_filter_horizontal_edge_c(y, y_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp9_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride,
if (u)
vp9_mbloop_filter_horizontal_edge_c(u, uv_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp9_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride,
if (v)
vp9_mbloop_filter_horizontal_edge_c(v, uv_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 1);
}
/* Horizontal B Filtering */
void vp9_loop_filter_bh_c(uint8_t *y_ptr, uint8_t *u_ptr,
uint8_t *v_ptr, int y_stride, int uv_stride,
// Horizontal B Filtering
void vp9_loop_filter_bh_c(uint8_t *y, uint8_t *u, uint8_t *v,
int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride,
vp9_loop_filter_horizontal_edge_c(y + 4 * y_stride, y_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp9_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride,
vp9_loop_filter_horizontal_edge_c(y + 8 * y_stride, y_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp9_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride,
vp9_loop_filter_horizontal_edge_c(y + 12 * y_stride, y_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp9_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride,
if (u)
vp9_loop_filter_horizontal_edge_c(u + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp9_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride,
if (v)
vp9_loop_filter_horizontal_edge_c(v + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
void vp9_loop_filter_bh8x8_c(uint8_t *y_ptr, uint8_t *u_ptr,
uint8_t *v_ptr, int y_stride, int uv_stride,
void vp9_loop_filter_bh8x8_c(uint8_t *y, uint8_t *u, uint8_t *v,
int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_mbloop_filter_horizontal_edge_c(
y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp9_mbloop_filter_horizontal_edge_c(y + 8 * y_stride, y_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp9_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride,
if (u)
vp9_loop_filter_horizontal_edge_c(u + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp9_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride,
if (v)
vp9_loop_filter_horizontal_edge_c(v + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
void vp9_loop_filter_bhs_c(uint8_t *y_ptr, int y_stride,
const unsigned char *blimit) {
vp9_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride,
y_stride, blimit);
vp9_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride,
y_stride, blimit);
vp9_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride,
y_stride, blimit);
void vp9_loop_filter_bhs_c(uint8_t *y, int y_stride, const uint8_t *blimit) {
vp9_loop_filter_simple_horizontal_edge_c(y + 4 * y_stride, y_stride, blimit);
vp9_loop_filter_simple_horizontal_edge_c(y + 8 * y_stride, y_stride, blimit);
vp9_loop_filter_simple_horizontal_edge_c(y + 12 * y_stride, y_stride, blimit);
}
void vp9_loop_filter_bv8x8_c(uint8_t *y_ptr, uint8_t *u_ptr,
uint8_t *v_ptr, int y_stride, int uv_stride,
void vp9_loop_filter_bv8x8_c(uint8_t *y, uint8_t *u, uint8_t *v,
int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_mbloop_filter_vertical_edge_c(
y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp9_mbloop_filter_vertical_edge_c(y + 8, y_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp9_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride,
if (u)
vp9_loop_filter_vertical_edge_c(u + 4, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp9_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride,
if (v)
vp9_loop_filter_vertical_edge_c(v + 4, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
void vp9_loop_filter_bvs_c(uint8_t *y_ptr, int y_stride,
const unsigned char *blimit) {
vp9_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit);
vp9_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit);
vp9_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit);
void vp9_loop_filter_bvs_c(uint8_t *y, int y_stride, const uint8_t *blimit) {
vp9_loop_filter_simple_vertical_edge_c(y + 4, y_stride, blimit);
vp9_loop_filter_simple_vertical_edge_c(y + 8, y_stride, blimit);
vp9_loop_filter_simple_vertical_edge_c(y + 12, y_stride, blimit);
}
static INLINE void wide_mbfilter(int8_t mask, uint8_t hev,
@ -551,38 +520,30 @@ static INLINE void wide_mbfilter(int8_t mask, uint8_t hev,
}
}
void vp9_mb_lpf_horizontal_edge_w
(
unsigned char *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
int count
) {
signed char hev = 0; /* high edge variance */
signed char mask = 0;
signed char flat = 0;
signed char flat2 = 0;
void vp9_mb_lpf_horizontal_edge_w(uint8_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh,
int count) {
int i = 0;
/* loop filter designed to work using chars so that we can make maximum use
* of 8 bit simd instructions.
*/
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
do {
mask = filter_mask(limit[0], blimit[0],
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
const int8_t mask = filter_mask(limit[0], blimit[0],
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
hev = hevmask(thresh[0], s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
const int8_t hev = hevmask(thresh[0],
s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
flat = flatmask4(1,
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
const int8_t flat = flatmask4(1,
s[-4 * p], s[-3 * p], s[-2 * p], s[-1 * p],
s[ 0 * p], s[ 1 * p], s[ 2 * p], s[ 3 * p]);
flat2 = flatmask5(1,
s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], s[-1 * p],
s[ 0 * p], s[ 4 * p], s[ 5 * p], s[ 6 * p], s[ 7 * p]);
const int8_t flat2 = flatmask5(1,
s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], s[-1 * p],
s[ 0 * p], s[ 4 * p], s[ 5 * p], s[ 6 * p], s[ 7 * p]);
wide_mbfilter(mask, hev, flat, flat2,
s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p,
@ -593,33 +554,23 @@ void vp9_mb_lpf_horizontal_edge_w
++s;
} while (++i < count * 8);
}
void vp9_mb_lpf_vertical_edge_w
(
unsigned char *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
int count
) {
signed char hev = 0; /* high edge variance */
signed char mask = 0;
signed char flat = 0;
signed char flat2 = 0;
void vp9_mb_lpf_vertical_edge_w(uint8_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh,
int count) {
int i = 0;
do {
mask = filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1],
s[0], s[1], s[2], s[3]);
const int8_t mask = filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1],
s[0], s[1], s[2], s[3]);
hev = hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
flat = flatmask4(1,
s[-4], s[-3], s[-2], s[-1],
s[ 0], s[ 1], s[ 2], s[ 3]);
flat2 = flatmask5(1,
s[-8], s[-7], s[-6], s[-5], s[-1],
s[ 0], s[ 4], s[ 5], s[ 6], s[ 7]);
const int8_t hev = hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
const int8_t flat = flatmask4(1, s[-4], s[-3], s[-2], s[-1],
s[ 0], s[ 1], s[ 2], s[ 3]);
const int8_t flat2 = flatmask5(1, s[-8], s[-7], s[-6], s[-5], s[-1],
s[ 0], s[ 4], s[ 5], s[ 6], s[ 7]);
wide_mbfilter(mask, hev, flat, flat2,
s - 8, s - 7, s - 6, s - 5,
@ -630,32 +581,33 @@ void vp9_mb_lpf_vertical_edge_w
} while (++i < count * 8);
}
void vp9_lpf_mbv_w_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_mb_lpf_vertical_edge_w(y_ptr, y_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 2);
void vp9_lpf_mbv_w_c(uint8_t *y, uint8_t *u, uint8_t *v,
int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_mb_lpf_vertical_edge_w(y, y_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp9_mbloop_filter_vertical_edge_c(u_ptr, uv_stride,
if (u)
vp9_mbloop_filter_vertical_edge_c(u, uv_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp9_mbloop_filter_vertical_edge_c(v_ptr, uv_stride,
if (v)
vp9_mbloop_filter_vertical_edge_c(v, uv_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 1);
}
void vp9_lpf_mbh_w_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_mb_lpf_horizontal_edge_w(y_ptr, y_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp9_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride,
void vp9_lpf_mbh_w_c(uint8_t *y, uint8_t *u, uint8_t *v,
int y_stride, int uv_stride,
struct loop_filter_info *lfi) {
vp9_mb_lpf_horizontal_edge_w(y, y_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u)
vp9_mbloop_filter_horizontal_edge_c(u, uv_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp9_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride,
if (v)
vp9_mbloop_filter_horizontal_edge_c(v, uv_stride,
lfi->mblim, lfi->lim, lfi->hev_thr, 1);
}

Просмотреть файл

@ -1290,9 +1290,8 @@ static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) {
static void update_frame_size(VP9D_COMP *pbi) {
VP9_COMMON *cm = &pbi->common;
/* our internal buffers are always multiples of 16 */
const int width = (cm->width + 15) & ~15;
const int height = (cm->height + 15) & ~15;
const int width = multiple16(cm->width);
const int height = multiple16(cm->height);
cm->mb_rows = height >> 4;
cm->mb_cols = width >> 4;

Просмотреть файл

@ -326,7 +326,7 @@ static void dealloc_compressor_data(VP9_COMP *cpi) {
vpx_free(cpi->active_map);
cpi->active_map = 0;
vp9_de_alloc_frame_buffers(&cpi->common);
vp9_free_frame_buffers(&cpi->common);
vp8_yv12_de_alloc_frame_buffer(&cpi->last_frame_uf);
vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
@ -960,9 +960,8 @@ void vp9_alloc_compressor_data(VP9_COMP *cpi) {
static void update_frame_size(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
/* our internal buffers are always multiples of 16 */
int aligned_width = (cm->width + 15) & ~15;
int aligned_height = (cm->height + 15) & ~15;
const int aligned_width = multiple16(cm->width);
const int aligned_height = multiple16(cm->height);
cm->mb_rows = aligned_height >> 4;
cm->mb_cols = aligned_width >> 4;

Просмотреть файл

@ -10,17 +10,15 @@
#ifndef VP9_VP9_IFACE_COMMON_H_
#define VP9_VP9_IFACE_COMMON_H_
static void yuvconfig2image(vpx_image_t *img,
const YV12_BUFFER_CONFIG *yv12,
void *user_priv) {
static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
void *user_priv) {
/** vpx_img_wrap() doesn't allow specifying independent strides for
* the Y, U, and V planes, nor other alignment adjustments that
* might be representable by a YV12_BUFFER_CONFIG, so we just
* initialize all the fields.*/
img->fmt = yv12->clrtype == REG_YUV ?
VPX_IMG_FMT_I420 : VPX_IMG_FMT_VPXI420;
img->fmt = yv12->clrtype == REG_YUV ? VPX_IMG_FMT_I420 : VPX_IMG_FMT_VPXI420;
img->w = yv12->y_stride;
img->h = (yv12->y_height + 2 * VP9BORDERINPIXELS + 15) & ~15;
img->h = multiple16(yv12->y_height + 2 * VP9BORDERINPIXELS);
img->d_w = yv12->y_width;
img->d_h = yv12->y_height;
img->x_chroma_shift = 1;
@ -40,4 +38,4 @@ static void yuvconfig2image(vpx_image_t *img,
img->self_allocd = 0;
}
#endif
#endif // VP9_VP9_IFACE_COMMON_H_