vpx_mem: remove vpx_memcpy
vestigial. replace instances with memcpy() which they already were being defined to. Change-Id: Icfd1b0bc5d95b70efab91b9ae777ace1e81d2d7c
This commit is contained in:
Родитель
fbd3b89488
Коммит
f274c2199b
|
@ -398,9 +398,9 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
|
|||
}
|
||||
|
||||
void CopyOutputToRef() {
|
||||
vpx_memcpy(output_ref_, output_, kOutputBufferSize);
|
||||
memcpy(output_ref_, output_, kOutputBufferSize);
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
vpx_memcpy(output16_ref_, output16_, kOutputBufferSize);
|
||||
memcpy(output16_ref_, output16_, kOutputBufferSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -69,8 +69,7 @@ class QuantizeTestBase {
|
|||
// Copy macroblockd from the reference to get pre-set-up dequant values.
|
||||
macroblockd_dst_ = reinterpret_cast<MACROBLOCKD *>(
|
||||
vpx_memalign(32, sizeof(*macroblockd_dst_)));
|
||||
vpx_memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd,
|
||||
sizeof(*macroblockd_dst_));
|
||||
memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd, sizeof(*macroblockd_dst_));
|
||||
// Fix block pointers - currently they point to the blocks in the reference
|
||||
// structure.
|
||||
vp8_setup_block_dptrs(macroblockd_dst_);
|
||||
|
@ -79,8 +78,7 @@ class QuantizeTestBase {
|
|||
void UpdateQuantizer(int q) {
|
||||
vp8_set_quantizer(vp8_comp_, q);
|
||||
|
||||
vpx_memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd,
|
||||
sizeof(*macroblockd_dst_));
|
||||
memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd, sizeof(*macroblockd_dst_));
|
||||
vp8_setup_block_dptrs(macroblockd_dst_);
|
||||
}
|
||||
|
||||
|
|
|
@ -101,13 +101,13 @@ class VpxScaleBase {
|
|||
|
||||
// The first row was already extended to the left and right. Copy it up.
|
||||
for (int y = 0; y < padding; ++y) {
|
||||
vpx_memcpy(top, left, extend_width);
|
||||
memcpy(top, left, extend_width);
|
||||
top += stride;
|
||||
}
|
||||
|
||||
uint8_t *bottom = left + (crop_height * stride);
|
||||
for (int y = 0; y < bottom_extend; ++y) {
|
||||
vpx_memcpy(bottom, left + (crop_height - 1) * stride, extend_width);
|
||||
memcpy(bottom, left + (crop_height - 1) * stride, extend_width);
|
||||
bottom += stride;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,14 +29,14 @@ extern "C" {
|
|||
|
||||
#define vp8_copy( Dest, Src) { \
|
||||
assert( sizeof( Dest) == sizeof( Src)); \
|
||||
vpx_memcpy( Dest, Src, sizeof( Src)); \
|
||||
memcpy( Dest, Src, sizeof( Src)); \
|
||||
}
|
||||
|
||||
/* Use this for variably-sized arrays. */
|
||||
|
||||
#define vp8_copy_array( Dest, Src, N) { \
|
||||
assert( sizeof( *Dest) == sizeof( *Src)); \
|
||||
vpx_memcpy( Dest, Src, N * sizeof( *Src)); \
|
||||
memcpy( Dest, Src, N * sizeof( *Src)); \
|
||||
}
|
||||
|
||||
#define vp8_zero( Dest) vpx_memset( &Dest, 0, sizeof( Dest));
|
||||
|
|
|
@ -183,7 +183,6 @@ const vp8_extra_bit_struct vp8_extra_bits[12] =
|
|||
|
||||
void vp8_default_coef_probs(VP8_COMMON *pc)
|
||||
{
|
||||
vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
|
||||
sizeof(default_coef_probs));
|
||||
memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs));
|
||||
}
|
||||
|
||||
|
|
|
@ -159,13 +159,13 @@ const vp8_tree_index vp8_small_mvtree [14] =
|
|||
|
||||
void vp8_init_mbmode_probs(VP8_COMMON *x)
|
||||
{
|
||||
vpx_memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
|
||||
vpx_memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
|
||||
vpx_memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
|
||||
memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
|
||||
memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
|
||||
memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
|
||||
}
|
||||
|
||||
void vp8_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES-1])
|
||||
{
|
||||
vpx_memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
|
||||
memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ static void copy_and_extend_plane
|
|||
for (i = 0; i < h; i++)
|
||||
{
|
||||
vpx_memset(dest_ptr1, src_ptr1[0], el);
|
||||
vpx_memcpy(dest_ptr1 + el, src_ptr1, w);
|
||||
memcpy(dest_ptr1 + el, src_ptr1, w);
|
||||
vpx_memset(dest_ptr2, src_ptr2[0], er);
|
||||
src_ptr1 += sp;
|
||||
src_ptr2 += sp;
|
||||
|
@ -60,13 +60,13 @@ static void copy_and_extend_plane
|
|||
|
||||
for (i = 0; i < et; i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr1, src_ptr1, linesize);
|
||||
memcpy(dest_ptr1, src_ptr1, linesize);
|
||||
dest_ptr1 += dp;
|
||||
}
|
||||
|
||||
for (i = 0; i < eb; i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr2, src_ptr2, linesize);
|
||||
memcpy(dest_ptr2, src_ptr2, linesize);
|
||||
dest_ptr2 += dp;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -231,9 +231,9 @@ static void multiframe_quality_enhance_block
|
|||
{
|
||||
vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
|
||||
for (up = u, udp = ud, i = 0; i < uvblksize; ++i, up += uv_stride, udp += uvd_stride)
|
||||
vpx_memcpy(udp, up, uvblksize);
|
||||
memcpy(udp, up, uvblksize);
|
||||
for (vp = v, vdp = vd, i = 0; i < uvblksize; ++i, vp += uv_stride, vdp += uvd_stride)
|
||||
vpx_memcpy(vdp, vp, uvblksize);
|
||||
memcpy(vdp, vp, uvblksize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -341,8 +341,8 @@ void vp8_multiframe_quality_enhance
|
|||
for (k = 0; k < 4; ++k, up += show->uv_stride, udp += dest->uv_stride,
|
||||
vp += show->uv_stride, vdp += dest->uv_stride)
|
||||
{
|
||||
vpx_memcpy(udp, up, 4);
|
||||
vpx_memcpy(vdp, vp, 4);
|
||||
memcpy(udp, up, 4);
|
||||
memcpy(vdp, vp, 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -217,8 +217,8 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
|
|||
{
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
vpx_memcpy(upred_ptr, uabove_row, 8);
|
||||
vpx_memcpy(vpred_ptr, vabove_row, 8);
|
||||
memcpy(upred_ptr, uabove_row, 8);
|
||||
memcpy(vpred_ptr, vabove_row, 8);
|
||||
upred_ptr += pred_stride;
|
||||
vpred_ptr += pred_stride;
|
||||
}
|
||||
|
|
|
@ -323,7 +323,7 @@ static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf)
|
|||
|
||||
for (i = 0; i < (int)Border; i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
|
||||
memcpy(dest_ptr1, src_ptr1, plane_stride);
|
||||
dest_ptr1 += plane_stride;
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf)
|
|||
|
||||
for (i = 0; i < (int)(Border); i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
|
||||
memcpy(dest_ptr1, src_ptr1, plane_stride);
|
||||
dest_ptr1 += plane_stride;
|
||||
}
|
||||
|
||||
|
@ -351,7 +351,7 @@ static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf)
|
|||
|
||||
for (i = 0; i < (int)(Border); i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
|
||||
memcpy(dest_ptr1, src_ptr1, plane_stride);
|
||||
dest_ptr1 += plane_stride;
|
||||
}
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf)
|
|||
|
||||
for (i = 0; i < (int)Border; i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
|
||||
memcpy(dest_ptr2, src_ptr2, plane_stride);
|
||||
dest_ptr2 += plane_stride;
|
||||
}
|
||||
|
||||
|
@ -397,7 +397,7 @@ static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf)
|
|||
|
||||
for (i = 0; i < (int)(Border); i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
|
||||
memcpy(dest_ptr2, src_ptr2, plane_stride);
|
||||
dest_ptr2 += plane_stride;
|
||||
}
|
||||
|
||||
|
@ -411,7 +411,7 @@ static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf)
|
|||
|
||||
for (i = 0; i < (int)(Border); i++)
|
||||
{
|
||||
vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
|
||||
memcpy(dest_ptr2, src_ptr2, plane_stride);
|
||||
dest_ptr2 += plane_stride;
|
||||
}
|
||||
}
|
||||
|
@ -918,7 +918,7 @@ static void init_frame(VP8D_COMP *pbi)
|
|||
if (pc->frame_type == KEY_FRAME)
|
||||
{
|
||||
/* Various keyframe initializations */
|
||||
vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
|
||||
vp8_init_mbmode_probs(pc);
|
||||
|
||||
|
@ -1072,8 +1072,8 @@ int vp8_decode_frame(VP8D_COMP *pbi)
|
|||
}
|
||||
else
|
||||
{
|
||||
vpx_memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
|
||||
vpx_memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
|
||||
memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
|
||||
memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
|
||||
}
|
||||
}
|
||||
if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME))
|
||||
|
@ -1278,7 +1278,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
|
|||
#endif
|
||||
if (pc->refresh_entropy_probs == 0)
|
||||
{
|
||||
vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
|
||||
memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
|
||||
}
|
||||
|
||||
pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc);
|
||||
|
@ -1380,7 +1380,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
|
|||
|
||||
if (pc->refresh_entropy_probs == 0)
|
||||
{
|
||||
vpx_memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc));
|
||||
memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc));
|
||||
pbi->independent_partitions = prev_independent_partitions;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,12 +60,12 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_D
|
|||
|
||||
mbd->segmentation_enabled = xd->segmentation_enabled;
|
||||
mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
|
||||
vpx_memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data));
|
||||
memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data));
|
||||
|
||||
/*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/
|
||||
vpx_memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas));
|
||||
memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas));
|
||||
/*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/
|
||||
vpx_memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas));
|
||||
memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas));
|
||||
/*unsigned char mode_ref_lf_delta_enabled;
|
||||
unsigned char mode_ref_lf_delta_update;*/
|
||||
mbd->mode_ref_lf_delta_enabled = xd->mode_ref_lf_delta_enabled;
|
||||
|
@ -73,10 +73,10 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_D
|
|||
|
||||
mbd->current_bc = &pbi->mbc[0];
|
||||
|
||||
vpx_memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
|
||||
vpx_memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
|
||||
vpx_memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
|
||||
vpx_memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
|
||||
memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
|
||||
memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
|
||||
memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
|
||||
memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
|
||||
|
||||
mbd->fullpixel_mask = 0xffffffff;
|
||||
|
||||
|
@ -499,9 +499,9 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row)
|
|||
if( mb_row != pc->mb_rows-1 )
|
||||
{
|
||||
/* Save decoded MB last row data for next-row decoding */
|
||||
vpx_memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16);
|
||||
vpx_memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8);
|
||||
vpx_memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8);
|
||||
memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16);
|
||||
memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8);
|
||||
memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8);
|
||||
}
|
||||
|
||||
/* save left_col for next MB decoding */
|
||||
|
|
|
@ -1543,7 +1543,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
|
|||
if (pc->refresh_entropy_probs == 0)
|
||||
{
|
||||
/* save a copy for later refresh */
|
||||
vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
}
|
||||
|
||||
vp8_update_coef_probs(cpi);
|
||||
|
|
|
@ -155,8 +155,8 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
|
|||
cpi->common.MBs));
|
||||
|
||||
/* Copy map to sort list */
|
||||
vpx_memcpy( sortlist, cpi->mb_activity_map,
|
||||
sizeof(unsigned int) * cpi->common.MBs );
|
||||
memcpy( sortlist, cpi->mb_activity_map,
|
||||
sizeof(unsigned int) * cpi->common.MBs );
|
||||
|
||||
|
||||
/* Ripple each value down to its correct position */
|
||||
|
|
|
@ -506,8 +506,8 @@ static void optimize_mb(MACROBLOCK *x)
|
|||
ENTROPY_CONTEXT *ta;
|
||||
ENTROPY_CONTEXT *tl;
|
||||
|
||||
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta = (ENTROPY_CONTEXT *)&t_above;
|
||||
tl = (ENTROPY_CONTEXT *)&t_left;
|
||||
|
@ -555,8 +555,8 @@ void vp8_optimize_mby(MACROBLOCK *x)
|
|||
if (!x->e_mbd.left_context)
|
||||
return;
|
||||
|
||||
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta = (ENTROPY_CONTEXT *)&t_above;
|
||||
tl = (ENTROPY_CONTEXT *)&t_left;
|
||||
|
@ -595,8 +595,8 @@ void vp8_optimize_mbuv(MACROBLOCK *x)
|
|||
if (!x->e_mbd.left_context)
|
||||
return;
|
||||
|
||||
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta = (ENTROPY_CONTEXT *)&t_above;
|
||||
tl = (ENTROPY_CONTEXT *)&t_left;
|
||||
|
|
|
@ -416,14 +416,13 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc)
|
|||
zd->subpixel_predict16x16 = xd->subpixel_predict16x16;
|
||||
zd->segmentation_enabled = xd->segmentation_enabled;
|
||||
zd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
|
||||
vpx_memcpy(zd->segment_feature_data, xd->segment_feature_data,
|
||||
sizeof(xd->segment_feature_data));
|
||||
memcpy(zd->segment_feature_data, xd->segment_feature_data,
|
||||
sizeof(xd->segment_feature_data));
|
||||
|
||||
vpx_memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc,
|
||||
sizeof(xd->dequant_y1_dc));
|
||||
vpx_memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
|
||||
vpx_memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
|
||||
vpx_memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
|
||||
memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
|
||||
memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
|
||||
memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
|
||||
memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
|
||||
|
||||
#if 1
|
||||
/*TODO: Remove dequant from BLOCKD. This is a temporary solution until
|
||||
|
@ -438,9 +437,8 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc)
|
|||
#endif
|
||||
|
||||
|
||||
vpx_memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes));
|
||||
vpx_memcpy(z->rd_thresh_mult, x->rd_thresh_mult,
|
||||
sizeof(x->rd_thresh_mult));
|
||||
memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes));
|
||||
memcpy(z->rd_thresh_mult, x->rd_thresh_mult, sizeof(x->rd_thresh_mult));
|
||||
|
||||
z->zbin_over_quant = x->zbin_over_quant;
|
||||
z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
|
||||
|
|
|
@ -573,7 +573,7 @@ void vp8_first_pass(VP8_COMP *cpi)
|
|||
{
|
||||
int flag[2] = {1, 1};
|
||||
vp8_initialize_rd_consts(cpi, x, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
|
||||
vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
|
||||
}
|
||||
|
||||
|
@ -1875,7 +1875,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
|
|||
break;
|
||||
}
|
||||
|
||||
vpx_memcpy(this_frame, &next_frame, sizeof(*this_frame));
|
||||
memcpy(this_frame, &next_frame, sizeof(*this_frame));
|
||||
|
||||
old_boost_score = boost_score;
|
||||
}
|
||||
|
@ -2445,7 +2445,7 @@ void vp8_second_pass(VP8_COMP *cpi)
|
|||
if (cpi->twopass.frames_to_key == 0)
|
||||
{
|
||||
/* Define next KF group and assign bits to it */
|
||||
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
find_next_key_frame(cpi, &this_frame_copy);
|
||||
|
||||
/* Special case: Error error_resilient_mode mode does not make much
|
||||
|
@ -2471,7 +2471,7 @@ void vp8_second_pass(VP8_COMP *cpi)
|
|||
if (cpi->frames_till_gf_update_due == 0)
|
||||
{
|
||||
/* Define next gf group and assign bits to it */
|
||||
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
define_gf_group(cpi, &this_frame_copy);
|
||||
|
||||
/* If we are going to code an altref frame at the end of the group
|
||||
|
@ -2487,7 +2487,7 @@ void vp8_second_pass(VP8_COMP *cpi)
|
|||
* to the GF group
|
||||
*/
|
||||
int bak = cpi->per_frame_bandwidth;
|
||||
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
assign_std_frame_bits(cpi, &this_frame_copy);
|
||||
cpi->per_frame_bandwidth = bak;
|
||||
}
|
||||
|
@ -2510,14 +2510,14 @@ void vp8_second_pass(VP8_COMP *cpi)
|
|||
if (cpi->common.frame_type != KEY_FRAME)
|
||||
{
|
||||
/* Assign bits from those allocated to the GF group */
|
||||
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
assign_std_frame_bits(cpi, &this_frame_copy);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Assign bits from those allocated to the GF group */
|
||||
vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
|
||||
assign_std_frame_bits(cpi, &this_frame_copy);
|
||||
}
|
||||
}
|
||||
|
@ -2658,7 +2658,7 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTP
|
|||
double decay_accumulator = 1.0;
|
||||
double next_iiratio;
|
||||
|
||||
vpx_memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
|
||||
memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
|
||||
|
||||
/* Note the starting file position so we can reset to it */
|
||||
start_pos = cpi->twopass.stats_in;
|
||||
|
@ -2756,7 +2756,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
|
|||
cpi->twopass.frames_to_key = 1;
|
||||
|
||||
/* Take a copy of the initial frame details */
|
||||
vpx_memcpy(&first_frame, this_frame, sizeof(*this_frame));
|
||||
memcpy(&first_frame, this_frame, sizeof(*this_frame));
|
||||
|
||||
cpi->twopass.kf_group_bits = 0;
|
||||
cpi->twopass.kf_group_error_left = 0;
|
||||
|
@ -2779,7 +2779,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
|
|||
kf_group_coded_err += this_frame->coded_error;
|
||||
|
||||
/* Load the next frame's stats. */
|
||||
vpx_memcpy(&last_frame, this_frame, sizeof(*this_frame));
|
||||
memcpy(&last_frame, this_frame, sizeof(*this_frame));
|
||||
input_stats(cpi, this_frame);
|
||||
|
||||
/* Provided that we are not at the end of the file... */
|
||||
|
@ -2847,7 +2847,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
|
|||
cpi->twopass.frames_to_key /= 2;
|
||||
|
||||
/* Copy first frame details */
|
||||
vpx_memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
|
||||
memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
|
||||
|
||||
/* Reset to the start of the group */
|
||||
reset_fpf_position(cpi, start_position);
|
||||
|
|
|
@ -508,7 +508,7 @@ static void disable_segmentation(VP8_COMP *cpi)
|
|||
static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
|
||||
{
|
||||
/* Copy in the new segmentation map */
|
||||
vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
|
||||
memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
|
||||
|
||||
/* Signal that the map should be updated. */
|
||||
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
|
||||
|
@ -530,7 +530,7 @@ static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
|
|||
static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta)
|
||||
{
|
||||
cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
|
||||
vpx_memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
|
||||
memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
|
||||
}
|
||||
|
||||
|
||||
|
@ -4387,7 +4387,7 @@ static void encode_frame_to_data_rate
|
|||
if (cm->refresh_entropy_probs == 0)
|
||||
{
|
||||
/* save a copy for later refresh */
|
||||
vpx_memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
|
||||
memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
|
||||
}
|
||||
|
||||
vp8_update_coef_context(cpi);
|
||||
|
@ -5613,19 +5613,19 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
|
|||
|
||||
if (cm->refresh_entropy_probs == 0)
|
||||
{
|
||||
vpx_memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
|
||||
memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
|
||||
}
|
||||
|
||||
/* Save the contexts separately for alt ref, gold and last. */
|
||||
/* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
|
||||
if(cm->refresh_alt_ref_frame)
|
||||
vpx_memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
|
||||
memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
|
||||
|
||||
if(cm->refresh_golden_frame)
|
||||
vpx_memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
|
||||
memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
|
||||
|
||||
if(cm->refresh_last_frame)
|
||||
vpx_memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
|
||||
memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
|
||||
|
||||
/* if its a dropped frame honor the requests on subsequent frames */
|
||||
if (*size > 0)
|
||||
|
@ -5934,7 +5934,7 @@ int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, uns
|
|||
{
|
||||
if (map)
|
||||
{
|
||||
vpx_memcpy(cpi->active_map, map, rows * cols);
|
||||
memcpy(cpi->active_map, map, rows * cols);
|
||||
cpi->active_map_enabled = 1;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -1348,8 +1348,8 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
|
|||
*returndistortion = distortion2;
|
||||
best_rd_sse = sse;
|
||||
best_rd = this_rd;
|
||||
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
|
||||
sizeof(MB_MODE_INFO));
|
||||
memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
|
||||
sizeof(MB_MODE_INFO));
|
||||
|
||||
/* Testing this mode gave rise to an improvement in best error
|
||||
* score. Lower threshold a bit for next time
|
||||
|
@ -1487,8 +1487,8 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
|
|||
|
||||
if (this_rd < best_rd)
|
||||
{
|
||||
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
|
||||
sizeof(MB_MODE_INFO));
|
||||
memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
|
||||
sizeof(MB_MODE_INFO));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1512,8 +1512,8 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
|
|||
/* set to the best mb mode, this copy can be skip if x->skip since it
|
||||
* already has the right content */
|
||||
if (!x->skip)
|
||||
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
|
||||
sizeof(MB_MODE_INFO));
|
||||
memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
|
||||
sizeof(MB_MODE_INFO));
|
||||
|
||||
if (best_mbmode.mode <= B_PRED)
|
||||
{
|
||||
|
|
|
@ -49,7 +49,7 @@ static void yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
|
|||
src_y = src_ybc->y_buffer + yoffset;
|
||||
dst_y = dst_ybc->y_buffer + yoffset;
|
||||
|
||||
vpx_memcpy(dst_y, src_y, ystride * linestocopy);
|
||||
memcpy(dst_y, src_y, ystride * linestocopy);
|
||||
}
|
||||
|
||||
static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
|
||||
|
|
|
@ -296,7 +296,7 @@ void vp8_setup_key_frame(VP8_COMP *cpi)
|
|||
|
||||
vp8_default_coef_probs(& cpi->common);
|
||||
|
||||
vpx_memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
{
|
||||
int flag[2] = {1, 1};
|
||||
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
|
||||
|
@ -305,9 +305,9 @@ void vp8_setup_key_frame(VP8_COMP *cpi)
|
|||
/* Make sure we initialize separate contexts for altref,gold, and normal.
|
||||
* TODO shouldn't need 3 different copies of structure to do this!
|
||||
*/
|
||||
vpx_memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
vpx_memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
vpx_memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
|
||||
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ;
|
||||
|
||||
|
|
|
@ -555,8 +555,8 @@ static int vp8_rdcost_mby(MACROBLOCK *mb)
|
|||
ENTROPY_CONTEXT *ta;
|
||||
ENTROPY_CONTEXT *tl;
|
||||
|
||||
vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta = (ENTROPY_CONTEXT *)&t_above;
|
||||
tl = (ENTROPY_CONTEXT *)&t_left;
|
||||
|
@ -691,7 +691,7 @@ static int rd_pick_intra4x4block(
|
|||
*a = tempa;
|
||||
*l = templ;
|
||||
copy_predictor(best_predictor, b->predictor);
|
||||
vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
|
||||
memcpy(best_dqcoeff, b->dqcoeff, 32);
|
||||
}
|
||||
}
|
||||
b->bmi.as_mode = *best_mode;
|
||||
|
@ -715,8 +715,8 @@ static int rd_pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate,
|
|||
ENTROPY_CONTEXT *tl;
|
||||
const int *bmode_costs;
|
||||
|
||||
vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta = (ENTROPY_CONTEXT *)&t_above;
|
||||
tl = (ENTROPY_CONTEXT *)&t_left;
|
||||
|
@ -820,8 +820,8 @@ static int rd_cost_mbuv(MACROBLOCK *mb)
|
|||
ENTROPY_CONTEXT *ta;
|
||||
ENTROPY_CONTEXT *tl;
|
||||
|
||||
vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta = (ENTROPY_CONTEXT *)&t_above;
|
||||
tl = (ENTROPY_CONTEXT *)&t_left;
|
||||
|
@ -1128,8 +1128,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
|
|||
ENTROPY_CONTEXT *ta_b;
|
||||
ENTROPY_CONTEXT *tl_b;
|
||||
|
||||
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta = (ENTROPY_CONTEXT *)&t_above;
|
||||
tl = (ENTROPY_CONTEXT *)&t_left;
|
||||
|
@ -1172,8 +1172,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
|
|||
ENTROPY_CONTEXT *ta_s;
|
||||
ENTROPY_CONTEXT *tl_s;
|
||||
|
||||
vpx_memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
ta_s = (ENTROPY_CONTEXT *)&t_above_s;
|
||||
tl_s = (ENTROPY_CONTEXT *)&t_left_s;
|
||||
|
@ -1329,14 +1329,14 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
|
|||
mode_selected = this_mode;
|
||||
best_label_rd = this_rd;
|
||||
|
||||
vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
}
|
||||
} /*for each 4x4 mode*/
|
||||
|
||||
vpx_memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
|
||||
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
|
||||
bsi->ref_mv, x->mvcost);
|
||||
|
@ -1925,8 +1925,8 @@ static void update_best_mode(BEST_MODE* best_mode, int this_rd,
|
|||
(rd->distortion2-rd->distortion_uv));
|
||||
|
||||
best_mode->rd = this_rd;
|
||||
vpx_memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
|
||||
vpx_memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO));
|
||||
memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
|
||||
memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO));
|
||||
|
||||
if ((this_mode == B_PRED) || (this_mode == SPLITMV))
|
||||
{
|
||||
|
@ -2582,7 +2582,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
|
|||
|
||||
|
||||
/* macroblock modes */
|
||||
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, sizeof(MB_MODE_INFO));
|
||||
memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, sizeof(MB_MODE_INFO));
|
||||
|
||||
if (best_mode.mbmode.mode == B_PRED)
|
||||
{
|
||||
|
@ -2595,7 +2595,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
|
|||
for (i = 0; i < 16; i++)
|
||||
xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int;
|
||||
|
||||
vpx_memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));
|
||||
memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));
|
||||
|
||||
x->e_mbd.mode_info_context->mbmi.mv.as_int =
|
||||
x->partition_info->bmi[15].mv.as_int;
|
||||
|
|
|
@ -366,9 +366,9 @@ static vpx_codec_err_t set_vp8e_config(VP8_CONFIG *oxcf,
|
|||
if (oxcf->number_of_layers > 1)
|
||||
{
|
||||
memcpy (oxcf->target_bitrate, cfg.ts_target_bitrate,
|
||||
sizeof(cfg.ts_target_bitrate));
|
||||
sizeof(cfg.ts_target_bitrate));
|
||||
memcpy (oxcf->rate_decimator, cfg.ts_rate_decimator,
|
||||
sizeof(cfg.ts_rate_decimator));
|
||||
sizeof(cfg.ts_rate_decimator));
|
||||
memcpy (oxcf->layer_id, cfg.ts_layer_id, sizeof(cfg.ts_layer_id));
|
||||
}
|
||||
|
||||
|
|
|
@ -36,13 +36,13 @@ extern "C" {
|
|||
// Only need this for fixed-size arrays, for structs just assign.
|
||||
#define vp9_copy(dest, src) { \
|
||||
assert(sizeof(dest) == sizeof(src)); \
|
||||
vpx_memcpy(dest, src, sizeof(src)); \
|
||||
memcpy(dest, src, sizeof(src)); \
|
||||
}
|
||||
|
||||
// Use this for variably-sized arrays.
|
||||
#define vp9_copy_array(dest, src, n) { \
|
||||
assert(sizeof(*dest) == sizeof(*src)); \
|
||||
vpx_memcpy(dest, src, n * sizeof(*src)); \
|
||||
memcpy(dest, src, n * sizeof(*src)); \
|
||||
}
|
||||
|
||||
#define vp9_zero(dest) vpx_memset(&(dest), 0, sizeof(dest))
|
||||
|
|
|
@ -256,7 +256,7 @@ void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride,
|
|||
(void)filter_y; (void)filter_y_stride;
|
||||
|
||||
for (r = h; r > 0; --r) {
|
||||
vpx_memcpy(dst, src, w);
|
||||
memcpy(dst, src, w);
|
||||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
}
|
||||
|
@ -526,7 +526,7 @@ void vp9_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
|
|||
(void)bd;
|
||||
|
||||
for (r = h; r > 0; --r) {
|
||||
vpx_memcpy(dst, src, w * sizeof(uint16_t));
|
||||
memcpy(dst, src, w * sizeof(uint16_t));
|
||||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
}
|
||||
|
|
|
@ -749,13 +749,13 @@ static const vp9_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
|
|||
};
|
||||
|
||||
static void extend_to_full_distribution(vp9_prob *probs, vp9_prob p) {
|
||||
vpx_memcpy(probs, vp9_pareto8_full[p = 0 ? 0 : p - 1],
|
||||
MODEL_NODES * sizeof(vp9_prob));
|
||||
memcpy(probs, vp9_pareto8_full[p = 0 ? 0 : p - 1],
|
||||
MODEL_NODES * sizeof(vp9_prob));
|
||||
}
|
||||
|
||||
void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full) {
|
||||
if (full != model)
|
||||
vpx_memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
|
||||
memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
|
||||
extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]);
|
||||
}
|
||||
|
||||
|
|
|
@ -1603,7 +1603,7 @@ void vp9_loop_filter_data_reset(
|
|||
lf_data->start = 0;
|
||||
lf_data->stop = 0;
|
||||
lf_data->y_only = 0;
|
||||
vpx_memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
|
||||
memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
|
||||
}
|
||||
|
||||
int vp9_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
|
||||
|
|
|
@ -245,7 +245,7 @@ static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride,
|
|||
(void) left;
|
||||
(void) bd;
|
||||
for (r = 0; r < bs; r++) {
|
||||
vpx_memcpy(dst, above, bs * sizeof(uint16_t));
|
||||
memcpy(dst, above, bs * sizeof(uint16_t));
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|||
(void) left;
|
||||
|
||||
for (r = 0; r < bs; r++) {
|
||||
vpx_memcpy(dst, above, bs);
|
||||
memcpy(dst, above, bs);
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
|
@ -718,24 +718,24 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
|
|||
/* slower path if the block needs border extension */
|
||||
if (x0 + 2 * bs <= frame_width) {
|
||||
if (right_available && bs == 4) {
|
||||
vpx_memcpy(above_row, above_ref, 2 * bs * sizeof(uint16_t));
|
||||
memcpy(above_row, above_ref, 2 * bs * sizeof(uint16_t));
|
||||
} else {
|
||||
vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
||||
memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
||||
vpx_memset16(above_row + bs, above_row[bs - 1], bs);
|
||||
}
|
||||
} else if (x0 + bs <= frame_width) {
|
||||
const int r = frame_width - x0;
|
||||
if (right_available && bs == 4) {
|
||||
vpx_memcpy(above_row, above_ref, r * sizeof(uint16_t));
|
||||
memcpy(above_row, above_ref, r * sizeof(uint16_t));
|
||||
vpx_memset16(above_row + r, above_row[r - 1],
|
||||
x0 + 2 * bs - frame_width);
|
||||
} else {
|
||||
vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
||||
memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
||||
vpx_memset16(above_row + bs, above_row[bs - 1], bs);
|
||||
}
|
||||
} else if (x0 <= frame_width) {
|
||||
const int r = frame_width - x0;
|
||||
vpx_memcpy(above_row, above_ref, r * sizeof(uint16_t));
|
||||
memcpy(above_row, above_ref, r * sizeof(uint16_t));
|
||||
vpx_memset16(above_row + r, above_row[r - 1],
|
||||
x0 + 2 * bs - frame_width);
|
||||
}
|
||||
|
@ -746,9 +746,9 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
|
|||
if (bs == 4 && right_available && left_available) {
|
||||
const_above_row = above_ref;
|
||||
} else {
|
||||
vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
||||
memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
||||
if (bs == 4 && right_available)
|
||||
vpx_memcpy(above_row + bs, above_ref + bs, bs * sizeof(uint16_t));
|
||||
memcpy(above_row + bs, above_ref + bs, bs * sizeof(uint16_t));
|
||||
else
|
||||
vpx_memset16(above_row + bs, above_row[bs - 1], bs);
|
||||
// TODO(Peter): this value should probably change for high bitdepth
|
||||
|
@ -841,10 +841,10 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
|
|||
if (xd->mb_to_right_edge < 0) {
|
||||
/* slower path if the block needs border extension */
|
||||
if (x0 + bs <= frame_width) {
|
||||
vpx_memcpy(above_row, above_ref, bs);
|
||||
memcpy(above_row, above_ref, bs);
|
||||
} else if (x0 <= frame_width) {
|
||||
const int r = frame_width - x0;
|
||||
vpx_memcpy(above_row, above_ref, r);
|
||||
memcpy(above_row, above_ref, r);
|
||||
vpx_memset(above_row + r, above_row[r - 1],
|
||||
x0 + bs - frame_width);
|
||||
}
|
||||
|
@ -853,7 +853,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
|
|||
if (bs == 4 && right_available && left_available) {
|
||||
const_above_row = above_ref;
|
||||
} else {
|
||||
vpx_memcpy(above_row, above_ref, bs);
|
||||
memcpy(above_row, above_ref, bs);
|
||||
}
|
||||
}
|
||||
above_row[-1] = left_available ? above_ref[-1] : 129;
|
||||
|
@ -871,24 +871,24 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
|
|||
/* slower path if the block needs border extension */
|
||||
if (x0 + 2 * bs <= frame_width) {
|
||||
if (right_available && bs == 4) {
|
||||
vpx_memcpy(above_row, above_ref, 2 * bs);
|
||||
memcpy(above_row, above_ref, 2 * bs);
|
||||
} else {
|
||||
vpx_memcpy(above_row, above_ref, bs);
|
||||
memcpy(above_row, above_ref, bs);
|
||||
vpx_memset(above_row + bs, above_row[bs - 1], bs);
|
||||
}
|
||||
} else if (x0 + bs <= frame_width) {
|
||||
const int r = frame_width - x0;
|
||||
if (right_available && bs == 4) {
|
||||
vpx_memcpy(above_row, above_ref, r);
|
||||
memcpy(above_row, above_ref, r);
|
||||
vpx_memset(above_row + r, above_row[r - 1],
|
||||
x0 + 2 * bs - frame_width);
|
||||
} else {
|
||||
vpx_memcpy(above_row, above_ref, bs);
|
||||
memcpy(above_row, above_ref, bs);
|
||||
vpx_memset(above_row + bs, above_row[bs - 1], bs);
|
||||
}
|
||||
} else if (x0 <= frame_width) {
|
||||
const int r = frame_width - x0;
|
||||
vpx_memcpy(above_row, above_ref, r);
|
||||
memcpy(above_row, above_ref, r);
|
||||
vpx_memset(above_row + r, above_row[r - 1],
|
||||
x0 + 2 * bs - frame_width);
|
||||
}
|
||||
|
@ -897,9 +897,9 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
|
|||
if (bs == 4 && right_available && left_available) {
|
||||
const_above_row = above_ref;
|
||||
} else {
|
||||
vpx_memcpy(above_row, above_ref, bs);
|
||||
memcpy(above_row, above_ref, bs);
|
||||
if (bs == 4 && right_available)
|
||||
vpx_memcpy(above_row + bs, above_ref + bs, bs);
|
||||
memcpy(above_row + bs, above_ref + bs, bs);
|
||||
else
|
||||
vpx_memset(above_row + bs, above_row[bs - 1], bs);
|
||||
}
|
||||
|
|
|
@ -357,7 +357,7 @@ static void copy_frame(YV12_BUFFER_CONFIG dest, const YV12_BUFFER_CONFIG src) {
|
|||
assert(dest.y_height == src.y_height);
|
||||
|
||||
for (r = 0; r < dest.y_height; ++r) {
|
||||
vpx_memcpy(destbuf, srcbuf, dest.y_width);
|
||||
memcpy(destbuf, srcbuf, dest.y_width);
|
||||
destbuf += dest.y_stride;
|
||||
srcbuf += src.y_stride;
|
||||
}
|
||||
|
|
|
@ -1036,8 +1036,8 @@ static void update_state(VP9_COMP *cpi, ThreadData *td,
|
|||
}
|
||||
|
||||
x->skip = ctx->skip;
|
||||
vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
|
||||
sizeof(uint8_t) * ctx->num_4x4_blk);
|
||||
memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
|
||||
sizeof(uint8_t) * ctx->num_4x4_blk);
|
||||
|
||||
if (!output_enabled)
|
||||
return;
|
||||
|
@ -1341,22 +1341,22 @@ static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
|
|||
int mi_width = num_8x8_blocks_wide_lookup[bsize];
|
||||
int mi_height = num_8x8_blocks_high_lookup[bsize];
|
||||
for (p = 0; p < MAX_MB_PLANE; p++) {
|
||||
vpx_memcpy(
|
||||
memcpy(
|
||||
xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
|
||||
a + num_4x4_blocks_wide * p,
|
||||
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
|
||||
xd->plane[p].subsampling_x);
|
||||
vpx_memcpy(
|
||||
memcpy(
|
||||
xd->left_context[p]
|
||||
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
|
||||
l + num_4x4_blocks_high * p,
|
||||
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
|
||||
xd->plane[p].subsampling_y);
|
||||
}
|
||||
vpx_memcpy(xd->above_seg_context + mi_col, sa,
|
||||
sizeof(*xd->above_seg_context) * mi_width);
|
||||
vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
|
||||
sizeof(xd->left_seg_context[0]) * mi_height);
|
||||
memcpy(xd->above_seg_context + mi_col, sa,
|
||||
sizeof(*xd->above_seg_context) * mi_width);
|
||||
memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
|
||||
sizeof(xd->left_seg_context[0]) * mi_height);
|
||||
}
|
||||
|
||||
static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
|
||||
|
@ -1373,22 +1373,22 @@ static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
|
|||
|
||||
// buffer the above/left context information of the block in search.
|
||||
for (p = 0; p < MAX_MB_PLANE; ++p) {
|
||||
vpx_memcpy(
|
||||
memcpy(
|
||||
a + num_4x4_blocks_wide * p,
|
||||
xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
|
||||
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
|
||||
xd->plane[p].subsampling_x);
|
||||
vpx_memcpy(
|
||||
memcpy(
|
||||
l + num_4x4_blocks_high * p,
|
||||
xd->left_context[p]
|
||||
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
|
||||
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
|
||||
xd->plane[p].subsampling_y);
|
||||
}
|
||||
vpx_memcpy(sa, xd->above_seg_context + mi_col,
|
||||
sizeof(*xd->above_seg_context) * mi_width);
|
||||
vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
|
||||
sizeof(xd->left_seg_context[0]) * mi_height);
|
||||
memcpy(sa, xd->above_seg_context + mi_col,
|
||||
sizeof(*xd->above_seg_context) * mi_width);
|
||||
memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
|
||||
sizeof(xd->left_seg_context[0]) * mi_height);
|
||||
}
|
||||
|
||||
static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
|
||||
|
@ -2353,11 +2353,11 @@ static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
|
|||
}
|
||||
|
||||
static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
|
||||
vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
|
||||
memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
|
||||
}
|
||||
|
||||
static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
|
||||
vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
|
||||
memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
|
||||
}
|
||||
|
||||
#if CONFIG_FP_MB_STATS
|
||||
|
|
|
@ -416,19 +416,19 @@ static void save_coding_context(VP9_COMP *cpi) {
|
|||
// quantizer value is adjusted between loop iterations.
|
||||
vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
|
||||
|
||||
vpx_memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts[0]));
|
||||
vpx_memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts[1]));
|
||||
vpx_memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
|
||||
vpx_memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
|
||||
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts[0]));
|
||||
memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts[1]));
|
||||
memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
|
||||
memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
|
||||
MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
|
||||
|
||||
vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs);
|
||||
|
||||
vpx_memcpy(cpi->coding_context.last_frame_seg_map_copy,
|
||||
cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
|
||||
memcpy(cpi->coding_context.last_frame_seg_map_copy,
|
||||
cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
|
||||
|
||||
vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
|
||||
vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
|
||||
|
@ -444,20 +444,18 @@ static void restore_coding_context(VP9_COMP *cpi) {
|
|||
// previous call to vp9_save_coding_context.
|
||||
vp9_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
|
||||
|
||||
vpx_memcpy(cpi->nmvcosts[0], cc->nmvcosts[0],
|
||||
MV_VALS * sizeof(*cc->nmvcosts[0]));
|
||||
vpx_memcpy(cpi->nmvcosts[1], cc->nmvcosts[1],
|
||||
MV_VALS * sizeof(*cc->nmvcosts[1]));
|
||||
vpx_memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
|
||||
MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
|
||||
vpx_memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
|
||||
MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
|
||||
memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
|
||||
memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
|
||||
memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
|
||||
MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
|
||||
memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
|
||||
MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
|
||||
|
||||
vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs);
|
||||
|
||||
vpx_memcpy(cm->last_frame_seg_map,
|
||||
cpi->coding_context.last_frame_seg_map_copy,
|
||||
(cm->mi_rows * cm->mi_cols));
|
||||
memcpy(cm->last_frame_seg_map,
|
||||
cpi->coding_context.last_frame_seg_map_copy,
|
||||
(cm->mi_rows * cm->mi_cols));
|
||||
|
||||
vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
|
||||
vp9_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
|
||||
|
@ -2606,22 +2604,22 @@ void vp9_update_reference_frames(VP9_COMP *cpi) {
|
|||
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
&cm->ref_frame_map[arf_idx], cm->new_fb_idx);
|
||||
vpx_memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
|
||||
cpi->interp_filter_selected[0],
|
||||
sizeof(cpi->interp_filter_selected[0]));
|
||||
memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
|
||||
cpi->interp_filter_selected[0],
|
||||
sizeof(cpi->interp_filter_selected[0]));
|
||||
}
|
||||
|
||||
if (cpi->refresh_golden_frame) {
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
&cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
|
||||
if (!cpi->rc.is_src_frame_alt_ref)
|
||||
vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
|
||||
cpi->interp_filter_selected[0],
|
||||
sizeof(cpi->interp_filter_selected[0]));
|
||||
memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
|
||||
cpi->interp_filter_selected[0],
|
||||
sizeof(cpi->interp_filter_selected[0]));
|
||||
else
|
||||
vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
|
||||
cpi->interp_filter_selected[ALTREF_FRAME],
|
||||
sizeof(cpi->interp_filter_selected[ALTREF_FRAME]));
|
||||
memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
|
||||
cpi->interp_filter_selected[ALTREF_FRAME],
|
||||
sizeof(cpi->interp_filter_selected[ALTREF_FRAME]));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2629,9 +2627,9 @@ void vp9_update_reference_frames(VP9_COMP *cpi) {
|
|||
ref_cnt_fb(pool->frame_bufs,
|
||||
&cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
|
||||
if (!cpi->rc.is_src_frame_alt_ref)
|
||||
vpx_memcpy(cpi->interp_filter_selected[LAST_FRAME],
|
||||
cpi->interp_filter_selected[0],
|
||||
sizeof(cpi->interp_filter_selected[0]));
|
||||
memcpy(cpi->interp_filter_selected[LAST_FRAME],
|
||||
cpi->interp_filter_selected[0],
|
||||
sizeof(cpi->interp_filter_selected[0]));
|
||||
}
|
||||
#if CONFIG_VP9_TEMPORAL_DENOISING
|
||||
if (cpi->oxcf.noise_sensitivity > 0) {
|
||||
|
|
|
@ -124,8 +124,8 @@ void vp9_encode_tiles_mt(VP9_COMP *cpi) {
|
|||
thread_data->td->rd_counts = cpi->td.rd_counts;
|
||||
}
|
||||
if (thread_data->td->counts != &cpi->common.counts) {
|
||||
vpx_memcpy(thread_data->td->counts, &cpi->common.counts,
|
||||
sizeof(cpi->common.counts));
|
||||
memcpy(thread_data->td->counts, &cpi->common.counts,
|
||||
sizeof(cpi->common.counts));
|
||||
}
|
||||
|
||||
// Handle use_nonrd_pick_mode case.
|
||||
|
|
|
@ -28,7 +28,7 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
|
|||
|
||||
for (i = 0; i < h; i++) {
|
||||
vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
|
||||
vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w);
|
||||
memcpy(dst_ptr1 + extend_left, src_ptr1, w);
|
||||
vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
|
||||
src_ptr1 += src_pitch;
|
||||
src_ptr2 += src_pitch;
|
||||
|
@ -45,12 +45,12 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
|
|||
linesize = extend_left + extend_right + w;
|
||||
|
||||
for (i = 0; i < extend_top; i++) {
|
||||
vpx_memcpy(dst_ptr1, src_ptr1, linesize);
|
||||
memcpy(dst_ptr1, src_ptr1, linesize);
|
||||
dst_ptr1 += dst_pitch;
|
||||
}
|
||||
|
||||
for (i = 0; i < extend_bottom; i++) {
|
||||
vpx_memcpy(dst_ptr2, src_ptr2, linesize);
|
||||
memcpy(dst_ptr2, src_ptr2, linesize);
|
||||
dst_ptr2 += dst_pitch;
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
|
|||
|
||||
for (i = 0; i < h; i++) {
|
||||
vpx_memset16(dst_ptr1, src_ptr1[0], extend_left);
|
||||
vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(uint16_t));
|
||||
memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(uint16_t));
|
||||
vpx_memset16(dst_ptr2, src_ptr2[0], extend_right);
|
||||
src_ptr1 += src_pitch;
|
||||
src_ptr2 += src_pitch;
|
||||
|
@ -90,12 +90,12 @@ static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
|
|||
linesize = extend_left + extend_right + w;
|
||||
|
||||
for (i = 0; i < extend_top; i++) {
|
||||
vpx_memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
|
||||
memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
|
||||
dst_ptr1 += dst_pitch;
|
||||
}
|
||||
|
||||
for (i = 0; i < extend_bottom; i++) {
|
||||
vpx_memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
|
||||
memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
|
||||
dst_ptr2 += dst_pitch;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1669,7 +1669,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
const struct buf_2d orig_src = p->src;
|
||||
const struct buf_2d orig_dst = pd->dst;
|
||||
struct buf_2d orig_pre[2];
|
||||
vpx_memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
|
||||
memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
|
||||
|
||||
// set buffer pointers for sub8x8 motion search.
|
||||
p->src.buf =
|
||||
|
|
|
@ -419,8 +419,8 @@ void vp9_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
|
|||
int i;
|
||||
switch (tx_size) {
|
||||
case TX_4X4:
|
||||
vpx_memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
|
||||
vpx_memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h);
|
||||
memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
|
||||
memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h);
|
||||
break;
|
||||
case TX_8X8:
|
||||
for (i = 0; i < num_4x4_w; i += 2)
|
||||
|
@ -616,8 +616,7 @@ void vp9_set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi) {
|
|||
{2000, 2000, 2000, 4000, 4000, 2000}};
|
||||
RD_OPT *const rd = &cpi->rd;
|
||||
const int idx = cpi->oxcf.mode == BEST;
|
||||
vpx_memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx],
|
||||
sizeof(thresh_mult[idx]));
|
||||
memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
|
||||
}
|
||||
|
||||
void vp9_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
|
||||
|
|
|
@ -790,8 +790,8 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||
|
||||
assert(ib < 4);
|
||||
|
||||
vpx_memcpy(ta, a, sizeof(ta));
|
||||
vpx_memcpy(tl, l, sizeof(tl));
|
||||
memcpy(ta, a, sizeof(ta));
|
||||
memcpy(tl, l, sizeof(tl));
|
||||
xd->mi[0]->mbmi.tx_size = TX_4X4;
|
||||
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
|
@ -812,8 +812,8 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||
continue;
|
||||
}
|
||||
|
||||
vpx_memcpy(tempa, ta, sizeof(ta));
|
||||
vpx_memcpy(templ, tl, sizeof(tl));
|
||||
memcpy(tempa, ta, sizeof(ta));
|
||||
memcpy(templ, tl, sizeof(tl));
|
||||
|
||||
for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
|
||||
for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
|
||||
|
@ -873,12 +873,12 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||
*bestdistortion = distortion;
|
||||
best_rd = this_rd;
|
||||
*best_mode = mode;
|
||||
vpx_memcpy(a, tempa, sizeof(tempa));
|
||||
vpx_memcpy(l, templ, sizeof(templ));
|
||||
memcpy(a, tempa, sizeof(tempa));
|
||||
memcpy(l, templ, sizeof(templ));
|
||||
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
|
||||
vpx_memcpy(best_dst16 + idy * 8,
|
||||
CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
|
||||
num_4x4_blocks_wide * 4 * sizeof(uint16_t));
|
||||
memcpy(best_dst16 + idy * 8,
|
||||
CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
|
||||
num_4x4_blocks_wide * 4 * sizeof(uint16_t));
|
||||
}
|
||||
}
|
||||
next_highbd:
|
||||
|
@ -888,9 +888,9 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||
return best_rd;
|
||||
|
||||
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
|
||||
vpx_memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
|
||||
best_dst16 + idy * 8,
|
||||
num_4x4_blocks_wide * 4 * sizeof(uint16_t));
|
||||
memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
|
||||
best_dst16 + idy * 8,
|
||||
num_4x4_blocks_wide * 4 * sizeof(uint16_t));
|
||||
}
|
||||
|
||||
return best_rd;
|
||||
|
@ -913,8 +913,8 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||
continue;
|
||||
}
|
||||
|
||||
vpx_memcpy(tempa, ta, sizeof(ta));
|
||||
vpx_memcpy(templ, tl, sizeof(tl));
|
||||
memcpy(tempa, ta, sizeof(ta));
|
||||
memcpy(templ, tl, sizeof(tl));
|
||||
|
||||
for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
|
||||
for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
|
||||
|
@ -971,11 +971,11 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||
*bestdistortion = distortion;
|
||||
best_rd = this_rd;
|
||||
*best_mode = mode;
|
||||
vpx_memcpy(a, tempa, sizeof(tempa));
|
||||
vpx_memcpy(l, templ, sizeof(templ));
|
||||
memcpy(a, tempa, sizeof(tempa));
|
||||
memcpy(l, templ, sizeof(templ));
|
||||
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
|
||||
vpx_memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
|
||||
num_4x4_blocks_wide * 4);
|
||||
memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
|
||||
num_4x4_blocks_wide * 4);
|
||||
}
|
||||
next:
|
||||
{}
|
||||
|
@ -985,8 +985,8 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||
return best_rd;
|
||||
|
||||
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
|
||||
vpx_memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
|
||||
num_4x4_blocks_wide * 4);
|
||||
memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
|
||||
num_4x4_blocks_wide * 4);
|
||||
|
||||
return best_rd;
|
||||
}
|
||||
|
@ -1011,8 +1011,8 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb,
|
|||
ENTROPY_CONTEXT t_above[4], t_left[4];
|
||||
const int *bmode_costs = cpi->mbmode_cost;
|
||||
|
||||
vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
|
||||
vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
|
||||
memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
|
||||
memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
|
||||
|
||||
// Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
|
||||
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
|
||||
|
@ -1776,8 +1776,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
for (i = 0; i < 4; i++)
|
||||
bsi->modes[i] = ZEROMV;
|
||||
|
||||
vpx_memcpy(t_above, pd->above_context, sizeof(t_above));
|
||||
vpx_memcpy(t_left, pd->left_context, sizeof(t_left));
|
||||
memcpy(t_above, pd->above_context, sizeof(t_above));
|
||||
memcpy(t_left, pd->left_context, sizeof(t_left));
|
||||
|
||||
// 64 makes this threshold really big effectively
|
||||
// making it so that we very rarely check mvs on
|
||||
|
@ -1819,11 +1819,11 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
this_mode, mbmi->ref_frame))
|
||||
continue;
|
||||
|
||||
vpx_memcpy(orig_pre, pd->pre, sizeof(orig_pre));
|
||||
vpx_memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
|
||||
sizeof(bsi->rdstat[i][mode_idx].ta));
|
||||
vpx_memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
|
||||
sizeof(bsi->rdstat[i][mode_idx].tl));
|
||||
memcpy(orig_pre, pd->pre, sizeof(orig_pre));
|
||||
memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
|
||||
sizeof(bsi->rdstat[i][mode_idx].ta));
|
||||
memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
|
||||
sizeof(bsi->rdstat[i][mode_idx].tl));
|
||||
|
||||
// motion search for newmv (single predictor case only)
|
||||
if (!has_second_rf && this_mode == NEWMV &&
|
||||
|
@ -1999,8 +1999,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
|
||||
if (!subpelmv && have_ref &&
|
||||
ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
|
||||
vpx_memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
|
||||
sizeof(SEG_RDSTAT));
|
||||
memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
|
||||
sizeof(SEG_RDSTAT));
|
||||
if (num_4x4_blocks_wide > 1)
|
||||
bsi->rdstat[i + 1][mode_idx].eobs =
|
||||
ref_bsi->rdstat[i + 1][mode_idx].eobs;
|
||||
|
@ -2052,8 +2052,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
}
|
||||
|
||||
mode_idx = INTER_OFFSET(mode_selected);
|
||||
vpx_memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
|
||||
vpx_memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
|
||||
memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
|
||||
memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
|
||||
|
||||
set_and_cost_bmi_mvs(cpi, xd, i, mode_selected, mode_mv[mode_selected],
|
||||
frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
|
||||
|
@ -2190,9 +2190,9 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
|
|||
ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
|
||||
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
|
||||
|
||||
vpx_memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff));
|
||||
vpx_memcpy(ctx->best_filter_diff, best_filter_diff,
|
||||
sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
|
||||
memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff));
|
||||
memcpy(ctx->best_filter_diff, best_filter_diff,
|
||||
sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
|
||||
}
|
||||
|
||||
static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
|
@ -2658,8 +2658,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
|
||||
skip_txfm_sb = tmp_skip_sb;
|
||||
skip_sse_sb = tmp_skip_sse;
|
||||
vpx_memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
|
||||
vpx_memcpy(bsse, x->bsse, sizeof(bsse));
|
||||
memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
|
||||
memcpy(bsse, x->bsse, sizeof(bsse));
|
||||
}
|
||||
}
|
||||
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
||||
|
@ -2689,8 +2689,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
|
||||
&skip_txfm_sb, &skip_sse_sb);
|
||||
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
|
||||
vpx_memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
|
||||
vpx_memcpy(bsse, x->bsse, sizeof(bsse));
|
||||
memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
|
||||
memcpy(bsse, x->bsse, sizeof(bsse));
|
||||
}
|
||||
|
||||
if (!is_comp_pred)
|
||||
|
@ -2714,8 +2714,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||
if (cm->interp_filter == SWITCHABLE)
|
||||
*rate2 += rs;
|
||||
|
||||
vpx_memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
|
||||
vpx_memcpy(x->bsse, bsse, sizeof(bsse));
|
||||
memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
|
||||
memcpy(x->bsse, bsse, sizeof(bsse));
|
||||
|
||||
if (!skip_txfm_sb) {
|
||||
int skippable_y, skippable_uv;
|
||||
|
@ -3389,8 +3389,8 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi,
|
|||
|
||||
if (!x->select_tx_size)
|
||||
swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
|
||||
vpx_memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
|
||||
sizeof(uint8_t) * ctx->num_4x4_blk);
|
||||
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
|
||||
sizeof(uint8_t) * ctx->num_4x4_blk);
|
||||
|
||||
// TODO(debargha): enhance this test with a better distortion prediction
|
||||
// based on qp, activity mask and history
|
||||
|
@ -4155,8 +4155,8 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi,
|
|||
best_skip2 = this_skip2;
|
||||
if (!x->select_tx_size)
|
||||
swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
|
||||
vpx_memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
|
||||
sizeof(uint8_t) * ctx->num_4x4_blk);
|
||||
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
|
||||
sizeof(uint8_t) * ctx->num_4x4_blk);
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
best_bmodes[i] = xd->mi[0]->bmi[i];
|
||||
|
@ -4280,8 +4280,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi,
|
|||
xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
|
||||
} else {
|
||||
for (i = 0; i < 4; ++i)
|
||||
vpx_memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i],
|
||||
sizeof(b_mode_info));
|
||||
memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
|
||||
|
||||
mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
|
||||
mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
|
||||
|
|
|
@ -36,7 +36,7 @@ void vp9_set_segment_data(struct segmentation *seg,
|
|||
unsigned char abs_delta) {
|
||||
seg->abs_delta = abs_delta;
|
||||
|
||||
vpx_memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
|
||||
memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
|
||||
}
|
||||
void vp9_disable_segfeature(struct segmentation *seg, int segment_id,
|
||||
SEG_LVL_FEATURES feature_id) {
|
||||
|
@ -263,11 +263,11 @@ void vp9_choose_segmap_coding_method(VP9_COMMON *cm, MACROBLOCKD *xd) {
|
|||
// Now choose which coding method to use.
|
||||
if (t_pred_cost < no_pred_cost) {
|
||||
seg->temporal_update = 1;
|
||||
vpx_memcpy(seg->tree_probs, t_pred_tree, sizeof(t_pred_tree));
|
||||
vpx_memcpy(seg->pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
|
||||
memcpy(seg->tree_probs, t_pred_tree, sizeof(t_pred_tree));
|
||||
memcpy(seg->pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
|
||||
} else {
|
||||
seg->temporal_update = 0;
|
||||
vpx_memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree));
|
||||
memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ int vp9_prob_diff_update_savings_search_model(const unsigned int *ct,
|
|||
int newp;
|
||||
vp9_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
|
||||
vp9_model_to_full_probs(oldp, oldplist);
|
||||
vpx_memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
|
||||
memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
|
||||
for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
|
||||
old_b += cost_branch256(ct + 2 * i, oldplist[i]);
|
||||
old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
|
||||
|
|
|
@ -150,38 +150,37 @@ static int vp9_denoiser_NxM_sse2_small(
|
|||
const uint8_t shift = (width == 4) ? 2 : 1;
|
||||
|
||||
for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) {
|
||||
vpx_memcpy(sig_buffer[r], sig, width);
|
||||
vpx_memcpy(sig_buffer[r] + width, sig + sig_stride, width);
|
||||
vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, width);
|
||||
vpx_memcpy(mc_running_buffer[r] + width,
|
||||
mc_running_avg_y + mc_avg_y_stride, width);
|
||||
vpx_memcpy(running_buffer[r], running_avg_y, width);
|
||||
vpx_memcpy(running_buffer[r] + width,
|
||||
running_avg_y + avg_y_stride, width);
|
||||
memcpy(sig_buffer[r], sig, width);
|
||||
memcpy(sig_buffer[r] + width, sig + sig_stride, width);
|
||||
memcpy(mc_running_buffer[r], mc_running_avg_y, width);
|
||||
memcpy(mc_running_buffer[r] + width,
|
||||
mc_running_avg_y + mc_avg_y_stride, width);
|
||||
memcpy(running_buffer[r], running_avg_y, width);
|
||||
memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
|
||||
if (width == 4) {
|
||||
vpx_memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width);
|
||||
vpx_memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width);
|
||||
vpx_memcpy(mc_running_buffer[r] + width * 2,
|
||||
mc_running_avg_y + mc_avg_y_stride * 2, width);
|
||||
vpx_memcpy(mc_running_buffer[r] + width * 3,
|
||||
mc_running_avg_y + mc_avg_y_stride * 3, width);
|
||||
vpx_memcpy(running_buffer[r] + width * 2,
|
||||
running_avg_y + avg_y_stride * 2, width);
|
||||
vpx_memcpy(running_buffer[r] + width * 3,
|
||||
running_avg_y + avg_y_stride * 3, width);
|
||||
memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width);
|
||||
memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width);
|
||||
memcpy(mc_running_buffer[r] + width * 2,
|
||||
mc_running_avg_y + mc_avg_y_stride * 2, width);
|
||||
memcpy(mc_running_buffer[r] + width * 3,
|
||||
mc_running_avg_y + mc_avg_y_stride * 3, width);
|
||||
memcpy(running_buffer[r] + width * 2,
|
||||
running_avg_y + avg_y_stride * 2, width);
|
||||
memcpy(running_buffer[r] + width * 3,
|
||||
running_avg_y + avg_y_stride * 3, width);
|
||||
}
|
||||
acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r],
|
||||
mc_running_buffer[r],
|
||||
running_buffer[r],
|
||||
&k_0, &k_4, &k_8, &k_16,
|
||||
&l3, &l32, &l21, acc_diff);
|
||||
vpx_memcpy(running_avg_y, running_buffer[r], width);
|
||||
vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
|
||||
memcpy(running_avg_y, running_buffer[r], width);
|
||||
memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
|
||||
if (width == 4) {
|
||||
vpx_memcpy(running_avg_y + avg_y_stride * 2,
|
||||
running_buffer[r] + width * 2, width);
|
||||
vpx_memcpy(running_avg_y + avg_y_stride * 3,
|
||||
running_buffer[r] + width * 3, width);
|
||||
memcpy(running_avg_y + avg_y_stride * 2,
|
||||
running_buffer[r] + width * 2, width);
|
||||
memcpy(running_avg_y + avg_y_stride * 3,
|
||||
running_buffer[r] + width * 3, width);
|
||||
}
|
||||
// Update pointers for next iteration.
|
||||
sig += (sig_stride << shift);
|
||||
|
@ -213,14 +212,14 @@ static int vp9_denoiser_NxM_sse2_small(
|
|||
acc_diff = vp9_denoiser_adj_16x1_sse2(
|
||||
sig_buffer[r], mc_running_buffer[r], running_buffer[r],
|
||||
k_0, k_delta, acc_diff);
|
||||
vpx_memcpy(running_avg_y, running_buffer[r], width);
|
||||
vpx_memcpy(running_avg_y + avg_y_stride,
|
||||
running_buffer[r] + width, width);
|
||||
memcpy(running_avg_y, running_buffer[r], width);
|
||||
memcpy(running_avg_y + avg_y_stride,
|
||||
running_buffer[r] + width, width);
|
||||
if (width == 4) {
|
||||
vpx_memcpy(running_avg_y + avg_y_stride * 2,
|
||||
running_buffer[r] + width * 2, width);
|
||||
vpx_memcpy(running_avg_y + avg_y_stride * 3,
|
||||
running_buffer[r] + width * 3, width);
|
||||
memcpy(running_avg_y + avg_y_stride * 2,
|
||||
running_buffer[r] + width * 2, width);
|
||||
memcpy(running_avg_y + avg_y_stride * 3,
|
||||
running_buffer[r] + width * 3, width);
|
||||
}
|
||||
// Update pointers for next iteration.
|
||||
running_avg_y += (avg_y_stride << shift);
|
||||
|
|
|
@ -527,7 +527,7 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
|
|||
frame_worker_data->scratch_buffer_size = data_sz;
|
||||
}
|
||||
frame_worker_data->data_size = data_sz;
|
||||
vpx_memcpy(frame_worker_data->scratch_buffer, *data, data_sz);
|
||||
memcpy(frame_worker_data->scratch_buffer, *data, data_sz);
|
||||
|
||||
frame_worker_data->frame_decoded = 0;
|
||||
frame_worker_data->frame_context_ready = 0;
|
||||
|
|
|
@ -91,10 +91,6 @@ void vpx_free(void *memblk) {
|
|||
}
|
||||
}
|
||||
|
||||
void *vpx_memcpy(void *dest, const void *source, size_t length) {
|
||||
return memcpy(dest, source, length);
|
||||
}
|
||||
|
||||
void *vpx_memset(void *dest, int val, size_t length) {
|
||||
return memset(dest, val, length);
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ extern "C" {
|
|||
void *vpx_realloc(void *memblk, size_t size);
|
||||
void vpx_free(void *memblk);
|
||||
|
||||
void *vpx_memcpy(void *dest, const void *src, size_t length);
|
||||
void *vpx_memset(void *dest, int val, size_t length);
|
||||
#if CONFIG_VP9 && CONFIG_VP9_HIGHBITDEPTH
|
||||
void *vpx_memset16(void *dest, int val, size_t length);
|
||||
|
@ -38,7 +37,6 @@ extern "C" {
|
|||
|
||||
#ifndef __VPX_MEM_C__
|
||||
# include <string.h>
|
||||
# define vpx_memcpy memcpy
|
||||
# define vpx_memset memset
|
||||
#endif
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ void vp8_vertical_band_2_1_scale_c(unsigned char *source,
|
|||
unsigned int dest_width) {
|
||||
(void) dest_pitch;
|
||||
(void) src_pitch;
|
||||
vpx_memcpy(dest, source, dest_width);
|
||||
memcpy(dest, source, dest_width);
|
||||
}
|
||||
|
||||
void vp8_vertical_band_2_1_scale_i_c(unsigned char *source,
|
||||
|
|
|
@ -379,7 +379,7 @@ void Scale2D
|
|||
vert_band_scale(temp_area + dest_pitch, dest_pitch, dest, dest_pitch, dest_width);
|
||||
|
||||
if (interpolation)
|
||||
vpx_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_width);
|
||||
memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_width);
|
||||
|
||||
/* Next band... */
|
||||
source += (unsigned long) source_band_height * source_pitch;
|
||||
|
@ -432,7 +432,7 @@ void Scale2D
|
|||
temp_area + i * dest_pitch, 1, hratio, dest_width);
|
||||
} else { /* Duplicate the last row */
|
||||
/* copy temp_area row 0 over from last row in the past */
|
||||
vpx_memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
|
||||
memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -443,7 +443,7 @@ void Scale2D
|
|||
}
|
||||
|
||||
/* copy temp_area row 0 over from last row in the past */
|
||||
vpx_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
|
||||
memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
|
||||
|
||||
/* move to the next band */
|
||||
source += source_band_height * source_pitch;
|
||||
|
@ -502,7 +502,7 @@ void vpx_scale_frame
|
|||
|
||||
if (dh < (int)dst->y_height)
|
||||
for (i = dh - 1; i < (int)dst->y_height; i++)
|
||||
vpx_memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
|
||||
memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
|
||||
|
||||
Scale2D((unsigned char *) src->u_buffer, src->uv_stride, src->uv_width, src->uv_height,
|
||||
(unsigned char *) dst->u_buffer, dst->uv_stride, dw / 2, dh / 2,
|
||||
|
@ -514,7 +514,7 @@ void vpx_scale_frame
|
|||
|
||||
if (dh / 2 < (int)dst->uv_height)
|
||||
for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
|
||||
vpx_memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
|
||||
memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
|
||||
|
||||
Scale2D((unsigned char *) src->v_buffer, src->uv_stride, src->uv_width, src->uv_height,
|
||||
(unsigned char *) dst->v_buffer, dst->uv_stride, dw / 2, dh / 2,
|
||||
|
@ -526,5 +526,5 @@ void vpx_scale_frame
|
|||
|
||||
if (dh / 2 < (int) dst->uv_height)
|
||||
for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
|
||||
vpx_memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
|
||||
memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
|
||||
}
|
||||
|
|
|
@ -48,12 +48,12 @@ static void extend_plane(uint8_t *const src, int src_stride,
|
|||
dst_ptr2 = src + src_stride * height - extend_left;
|
||||
|
||||
for (i = 0; i < extend_top; ++i) {
|
||||
vpx_memcpy(dst_ptr1, src_ptr1, linesize);
|
||||
memcpy(dst_ptr1, src_ptr1, linesize);
|
||||
dst_ptr1 += src_stride;
|
||||
}
|
||||
|
||||
for (i = 0; i < extend_bottom; ++i) {
|
||||
vpx_memcpy(dst_ptr2, src_ptr2, linesize);
|
||||
memcpy(dst_ptr2, src_ptr2, linesize);
|
||||
dst_ptr2 += src_stride;
|
||||
}
|
||||
}
|
||||
|
@ -91,12 +91,12 @@ static void extend_plane_high(uint8_t *const src8, int src_stride,
|
|||
dst_ptr2 = src + src_stride * height - extend_left;
|
||||
|
||||
for (i = 0; i < extend_top; ++i) {
|
||||
vpx_memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
|
||||
memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
|
||||
dst_ptr1 += src_stride;
|
||||
}
|
||||
|
||||
for (i = 0; i < extend_bottom; ++i) {
|
||||
vpx_memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
|
||||
memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
|
||||
dst_ptr2 += src_stride;
|
||||
}
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ void vp9_extend_frame_inner_borders_c(YV12_BUFFER_CONFIG *ybf) {
|
|||
void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) {
|
||||
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
||||
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
||||
vpx_memcpy(dst, src, num * sizeof(uint16_t));
|
||||
memcpy(dst, src, num * sizeof(uint16_t));
|
||||
}
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
#endif // CONFIG_VP9
|
||||
|
@ -269,7 +269,7 @@ void vp8_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc,
|
|||
#endif
|
||||
|
||||
for (row = 0; row < src_ybc->y_height; ++row) {
|
||||
vpx_memcpy(dst, src, src_ybc->y_width);
|
||||
memcpy(dst, src, src_ybc->y_width);
|
||||
src += src_ybc->y_stride;
|
||||
dst += dst_ybc->y_stride;
|
||||
}
|
||||
|
@ -278,7 +278,7 @@ void vp8_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc,
|
|||
dst = dst_ybc->u_buffer;
|
||||
|
||||
for (row = 0; row < src_ybc->uv_height; ++row) {
|
||||
vpx_memcpy(dst, src, src_ybc->uv_width);
|
||||
memcpy(dst, src, src_ybc->uv_width);
|
||||
src += src_ybc->uv_stride;
|
||||
dst += dst_ybc->uv_stride;
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ void vp8_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc,
|
|||
dst = dst_ybc->v_buffer;
|
||||
|
||||
for (row = 0; row < src_ybc->uv_height; ++row) {
|
||||
vpx_memcpy(dst, src, src_ybc->uv_width);
|
||||
memcpy(dst, src, src_ybc->uv_width);
|
||||
src += src_ybc->uv_stride;
|
||||
dst += dst_ybc->uv_stride;
|
||||
}
|
||||
|
@ -306,7 +306,7 @@ void vpx_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
|
|||
const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
|
||||
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
|
||||
for (row = 0; row < src_ybc->y_height; ++row) {
|
||||
vpx_memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t));
|
||||
memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t));
|
||||
src16 += src_ybc->y_stride;
|
||||
dst16 += dst_ybc->y_stride;
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ void vpx_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
|
|||
#endif
|
||||
|
||||
for (row = 0; row < src_ybc->y_height; ++row) {
|
||||
vpx_memcpy(dst, src, src_ybc->y_width);
|
||||
memcpy(dst, src, src_ybc->y_width);
|
||||
src += src_ybc->y_stride;
|
||||
dst += dst_ybc->y_stride;
|
||||
}
|
||||
|
|
|
@ -94,12 +94,12 @@ static void extend_plane(uint8_t *const src, int src_stride,
|
|||
linesize = extend_left + extend_right + width;
|
||||
|
||||
for (i = 0; i < extend_top; i++) {
|
||||
vpx_memcpy(top_dst, top_src, linesize);
|
||||
memcpy(top_dst, top_src, linesize);
|
||||
top_dst += src_stride;
|
||||
}
|
||||
|
||||
for (i = 0; i < extend_bottom; i++) {
|
||||
vpx_memcpy(bot_dst, bot_src, linesize);
|
||||
memcpy(bot_dst, bot_src, linesize);
|
||||
bot_dst += src_stride;
|
||||
}
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче