diff --git a/vp10/common/dering.c b/vp10/common/dering.c index b7b8183d0..6d611016a 100644 --- a/vp10/common/dering.c +++ b/vp10/common/dering.c @@ -70,7 +70,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm, src[pli] = vpx_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64); for (r = 0; r < bsize[pli] * cm->mi_rows; ++r) { for (c = 0; c < bsize[pli] * cm->mi_cols; ++c) { -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { src[pli][r * stride + c] = CONVERT_TO_SHORTPTR( xd->plane[pli].dst.buf)[r * xd->plane[pli].dst.stride + c]; @@ -78,7 +78,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm, #endif src[pli][r * stride + c] = xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c]; -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH } #endif } @@ -124,7 +124,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm, cm->mi_cols, threshold, OD_DERING_NO_CHECK_OVERLAP, coeff_shift); for (r = 0; r < bsize[pli] * nvb; ++r) { for (c = 0; c < bsize[pli] * nhb; ++c) { -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf) [xd->plane[pli].dst.stride * @@ -138,7 +138,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm, (bsize[pli] * MI_BLOCK_SIZE * sbr + r) + sbc * bsize[pli] * MI_BLOCK_SIZE + c] = dst[r * MI_BLOCK_SIZE * bsize[pli] + c]; -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH } #endif } diff --git a/vp10/encoder/pickdering.c b/vp10/encoder/pickdering.c index 45acaa1dc..323567d95 100644 --- a/vp10/encoder/pickdering.c +++ b/vp10/encoder/pickdering.c @@ -64,7 +64,7 @@ int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref, stride = bsize[0] * cm->mi_cols; for (r = 0; r < bsize[0] * cm->mi_rows; ++r) { for (c = 0; c < bsize[0] * cm->mi_cols; ++c) { -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { src[r * stride + c] = CONVERT_TO_SHORTPTR( xd->plane[0].dst.buf)[r * xd->plane[0].dst.stride + c]; @@ -75,7 +75,7 @@ int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref, src[r * stride + c] = xd->plane[0].dst.buf[r * xd->plane[0].dst.stride + c]; ref_coeff[r * stride + c] = ref->y_buffer[r * ref->y_stride + c]; -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH } #endif } diff --git a/vpx_dsp/quantize.c b/vpx_dsp/quantize.c index 683655600..78a7c2b5e 100644 --- a/vpx_dsp/quantize.c +++ b/vpx_dsp/quantize.c @@ -40,7 +40,7 @@ void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, *eob_ptr = eob + 1; } -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, @@ -99,7 +99,7 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, *eob_ptr = eob + 1; } -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, @@ -192,7 +192,7 @@ void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, *eob_ptr = eob + 1; } -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, @@ -316,7 +316,7 @@ void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, *eob_ptr = eob + 1; } -#if CONFIG_VPX_HIGHBITDEPTH +#if CONFIG_VP9_HIGHBITDEPTH void vpx_highbd_quantize_b_32x32_c( const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,