2013-01-25 21:47:09 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013 The WebM project authors. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
*/
|
2013-02-12 03:34:08 +04:00
|
|
|
|
2013-01-25 21:47:09 +04:00
|
|
|
#include <assert.h>
|
2015-07-22 20:40:42 +03:00
|
|
|
#include <string.h>
|
2013-01-25 21:47:09 +04:00
|
|
|
|
|
|
|
#include "./vpx_config.h"
|
2015-07-22 20:40:42 +03:00
|
|
|
#include "./vpx_dsp_rtcd.h"
|
2013-01-25 21:47:09 +04:00
|
|
|
#include "vpx/vpx_integer.h"
|
2015-07-22 20:40:42 +03:00
|
|
|
#include "vpx_dsp/vpx_convolve.h"
|
|
|
|
#include "vpx_dsp/vpx_dsp_common.h"
|
|
|
|
#include "vpx_dsp/vpx_filter.h"
|
2013-02-12 03:34:08 +04:00
|
|
|
#include "vpx_ports/mem.h"
|
2013-01-25 21:47:09 +04:00
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *x_filters,
|
2013-12-12 23:14:06 +04:00
|
|
|
int x0_q4, int x_step_q4, int w, int h) {
|
|
|
|
int x, y;
|
|
|
|
src -= SUBPEL_TAPS / 2 - 1;
|
2013-01-25 21:47:09 +04:00
|
|
|
for (y = 0; y < h; ++y) {
|
2013-12-12 23:14:06 +04:00
|
|
|
int x_q4 = x0_q4;
|
2013-01-25 21:47:09 +04:00
|
|
|
for (x = 0; x < w; ++x) {
|
2013-12-12 23:14:06 +04:00
|
|
|
const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
|
|
|
|
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_x[k] * x_filter[k];
|
2013-08-23 05:40:34 +04:00
|
|
|
dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
|
2013-01-25 21:47:09 +04:00
|
|
|
x_q4 += x_step_q4;
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *x_filters,
|
2013-12-12 23:14:06 +04:00
|
|
|
int x0_q4, int x_step_q4, int w, int h) {
|
|
|
|
int x, y;
|
|
|
|
src -= SUBPEL_TAPS / 2 - 1;
|
2013-01-25 21:47:09 +04:00
|
|
|
for (y = 0; y < h; ++y) {
|
2013-12-12 23:14:06 +04:00
|
|
|
int x_q4 = x0_q4;
|
2013-01-25 21:47:09 +04:00
|
|
|
for (x = 0; x < w; ++x) {
|
2013-12-12 23:14:06 +04:00
|
|
|
const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
|
|
|
|
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_x[k] * x_filter[k];
|
2013-08-20 11:42:25 +04:00
|
|
|
dst[x] = ROUND_POWER_OF_TWO(dst[x] +
|
2013-12-12 23:14:06 +04:00
|
|
|
clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
|
2013-01-25 21:47:09 +04:00
|
|
|
x_q4 += x_step_q4;
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *y_filters,
|
2013-12-12 23:14:06 +04:00
|
|
|
int y0_q4, int y_step_q4, int w, int h) {
|
|
|
|
int x, y;
|
|
|
|
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
|
2013-01-25 21:47:09 +04:00
|
|
|
|
2013-08-23 03:02:18 +04:00
|
|
|
for (x = 0; x < w; ++x) {
|
2013-12-12 23:14:06 +04:00
|
|
|
int y_q4 = y0_q4;
|
2013-01-25 21:47:09 +04:00
|
|
|
for (y = 0; y < h; ++y) {
|
2013-12-12 23:14:06 +04:00
|
|
|
const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
|
|
|
|
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_y[k * src_stride] * y_filter[k];
|
|
|
|
dst[y * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
|
2013-01-25 21:47:09 +04:00
|
|
|
y_q4 += y_step_q4;
|
|
|
|
}
|
|
|
|
++src;
|
|
|
|
++dst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *y_filters,
|
2013-12-12 23:14:06 +04:00
|
|
|
int y0_q4, int y_step_q4, int w, int h) {
|
|
|
|
int x, y;
|
|
|
|
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
|
2013-01-25 21:47:09 +04:00
|
|
|
|
2013-08-23 03:02:18 +04:00
|
|
|
for (x = 0; x < w; ++x) {
|
2013-12-12 23:14:06 +04:00
|
|
|
int y_q4 = y0_q4;
|
2013-01-25 21:47:09 +04:00
|
|
|
for (y = 0; y < h; ++y) {
|
2013-12-12 23:14:06 +04:00
|
|
|
const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
|
|
|
|
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_y[k * src_stride] * y_filter[k];
|
2013-08-20 11:42:25 +04:00
|
|
|
dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] +
|
2013-12-12 23:14:06 +04:00
|
|
|
clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
|
2013-01-25 21:47:09 +04:00
|
|
|
y_q4 += y_step_q4;
|
|
|
|
}
|
|
|
|
++src;
|
|
|
|
++dst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
static void convolve(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const x_filters,
|
2013-12-12 23:14:06 +04:00
|
|
|
int x0_q4, int x_step_q4,
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const y_filters,
|
2013-12-12 23:14:06 +04:00
|
|
|
int y0_q4, int y_step_q4,
|
|
|
|
int w, int h) {
|
2014-06-19 01:34:24 +04:00
|
|
|
// Note: Fixed size intermediate buffer, temp, places limits on parameters.
|
|
|
|
// 2d filtering proceeds in 2 steps:
|
|
|
|
// (1) Interpolate horizontally into an intermediate buffer, temp.
|
|
|
|
// (2) Interpolate temp vertically to derive the sub-pixel result.
|
|
|
|
// Deriving the maximum number of rows in the temp buffer (135):
|
|
|
|
// --Smallest scaling factor is x1/2 ==> y_step_q4 = 32 (Normative).
|
|
|
|
// --Largest block size is 64x64 pixels.
|
|
|
|
// --64 rows in the downscaled frame span a distance of (64 - 1) * 32 in the
|
|
|
|
// original frame (in 1/16th pixel units).
|
|
|
|
// --Must round-up because block may be located at sub-pixel position.
|
|
|
|
// --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
|
|
|
|
// --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
|
|
|
|
uint8_t temp[135 * 64];
|
2014-10-02 17:14:12 +04:00
|
|
|
int intermediate_height =
|
|
|
|
(((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 08:55:14 +04:00
|
|
|
|
2013-04-19 00:05:38 +04:00
|
|
|
assert(w <= 64);
|
|
|
|
assert(h <= 64);
|
2014-06-19 01:34:24 +04:00
|
|
|
assert(y_step_q4 <= 32);
|
|
|
|
assert(x_step_q4 <= 32);
|
2013-09-05 19:55:47 +04:00
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1), src_stride, temp, 64,
|
|
|
|
x_filters, x0_q4, x_step_q4, w, intermediate_height);
|
|
|
|
convolve_vert(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst, dst_stride,
|
|
|
|
y_filters, y0_q4, y_step_q4, w, h);
|
|
|
|
}
|
|
|
|
|
2014-02-04 04:48:38 +04:00
|
|
|
static const InterpKernel *get_filter_base(const int16_t *filter) {
|
2013-12-12 23:14:06 +04:00
|
|
|
// NOTE: This assumes that the filter table is 256-byte aligned.
|
|
|
|
// TODO(agrange) Modify to make independent of table alignment.
|
2014-02-04 04:48:38 +04:00
|
|
|
return (const InterpKernel *)(((intptr_t)filter) & ~((intptr_t)0xFF));
|
2013-12-12 23:14:06 +04:00
|
|
|
}
|
|
|
|
|
2014-02-04 04:48:38 +04:00
|
|
|
static int get_filter_offset(const int16_t *f, const InterpKernel *base) {
|
2014-02-07 09:24:08 +04:00
|
|
|
return (int)((const InterpKernel *)(intptr_t)f - base);
|
2013-01-25 21:47:09 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2013-01-25 21:47:09 +04:00
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const filters_x = get_filter_base(filter_x);
|
2013-12-12 23:14:06 +04:00
|
|
|
const int x0_q4 = get_filter_offset(filter_x, filters_x);
|
|
|
|
|
2014-05-13 20:55:41 +04:00
|
|
|
(void)filter_y;
|
|
|
|
(void)y_step_q4;
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
convolve_horiz(src, src_stride, dst, dst_stride, filters_x,
|
|
|
|
x0_q4, x_step_q4, w, h);
|
2013-01-25 21:47:09 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2013-01-25 21:47:09 +04:00
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const filters_x = get_filter_base(filter_x);
|
2013-12-12 23:14:06 +04:00
|
|
|
const int x0_q4 = get_filter_offset(filter_x, filters_x);
|
|
|
|
|
2014-05-13 20:55:41 +04:00
|
|
|
(void)filter_y;
|
|
|
|
(void)y_step_q4;
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x,
|
|
|
|
x0_q4, x_step_q4, w, h);
|
2013-01-25 21:47:09 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2013-01-25 21:47:09 +04:00
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const filters_y = get_filter_base(filter_y);
|
2013-12-12 23:14:06 +04:00
|
|
|
const int y0_q4 = get_filter_offset(filter_y, filters_y);
|
2014-05-13 20:55:41 +04:00
|
|
|
|
|
|
|
(void)filter_x;
|
|
|
|
(void)x_step_q4;
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
convolve_vert(src, src_stride, dst, dst_stride, filters_y,
|
|
|
|
y0_q4, y_step_q4, w, h);
|
2013-01-25 21:47:09 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2013-01-25 21:47:09 +04:00
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const filters_y = get_filter_base(filter_y);
|
2013-12-12 23:14:06 +04:00
|
|
|
const int y0_q4 = get_filter_offset(filter_y, filters_y);
|
2014-05-13 20:55:41 +04:00
|
|
|
|
|
|
|
(void)filter_x;
|
|
|
|
(void)x_step_q4;
|
|
|
|
|
2013-12-12 23:14:06 +04:00
|
|
|
convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y,
|
|
|
|
y0_q4, y_step_q4, w, h);
|
2013-01-25 21:47:09 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2013-01-25 21:47:09 +04:00
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const filters_x = get_filter_base(filter_x);
|
2013-12-12 23:14:06 +04:00
|
|
|
const int x0_q4 = get_filter_offset(filter_x, filters_x);
|
|
|
|
|
2014-02-04 04:48:38 +04:00
|
|
|
const InterpKernel *const filters_y = get_filter_base(filter_y);
|
2013-12-12 23:14:06 +04:00
|
|
|
const int y0_q4 = get_filter_offset(filter_y, filters_y);
|
|
|
|
|
|
|
|
convolve(src, src_stride, dst, dst_stride,
|
|
|
|
filters_x, x0_q4, x_step_q4,
|
|
|
|
filters_y, y0_q4, y_step_q4, w, h);
|
2013-01-25 21:47:09 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
2013-01-25 21:47:09 +04:00
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
2013-02-12 03:34:08 +04:00
|
|
|
/* Fixed size intermediate buffer places limits on parameters. */
|
2015-05-02 23:24:16 +03:00
|
|
|
DECLARE_ALIGNED(16, uint8_t, temp[64 * 64]);
|
2013-04-19 00:05:38 +04:00
|
|
|
assert(w <= 64);
|
|
|
|
assert(h <= 64);
|
2013-02-12 03:34:08 +04:00
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
vpx_convolve8_c(src, src_stride, temp, 64,
|
2013-12-12 23:14:06 +04:00
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4, w, h);
|
2015-07-22 20:40:42 +03:00
|
|
|
vpx_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h);
|
2013-01-25 21:47:09 +04:00
|
|
|
}
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 04:59:03 +04:00
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int filter_x_stride,
|
|
|
|
const int16_t *filter_y, int filter_y_stride,
|
|
|
|
int w, int h) {
|
|
|
|
int r;
|
|
|
|
|
2014-05-13 20:55:41 +04:00
|
|
|
(void)filter_x; (void)filter_x_stride;
|
|
|
|
(void)filter_y; (void)filter_y_stride;
|
|
|
|
|
2013-07-10 22:17:19 +04:00
|
|
|
for (r = h; r > 0; --r) {
|
2015-04-24 06:42:19 +03:00
|
|
|
memcpy(dst, src, w);
|
2013-07-10 22:17:19 +04:00
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 04:59:03 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride,
|
2013-07-10 22:17:19 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int filter_x_stride,
|
|
|
|
const int16_t *filter_y, int filter_y_stride,
|
|
|
|
int w, int h) {
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 04:59:03 +04:00
|
|
|
int x, y;
|
|
|
|
|
2014-05-13 20:55:41 +04:00
|
|
|
(void)filter_x; (void)filter_x_stride;
|
|
|
|
(void)filter_y; (void)filter_y_stride;
|
|
|
|
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 04:59:03 +04:00
|
|
|
for (y = 0; y < h; ++y) {
|
2013-08-13 01:28:00 +04:00
|
|
|
for (x = 0; x < w; ++x)
|
|
|
|
dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1);
|
|
|
|
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 04:59:03 +04:00
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
2014-09-16 23:47:18 +04:00
|
|
|
|
2015-08-03 19:43:34 +03:00
|
|
|
void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
|
|
|
|
filter_y, y_step_q4, w, h);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
|
|
|
|
filter_y, y_step_q4, w, h);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
|
|
|
|
filter_y, y_step_q4, w, h);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
|
|
|
|
x_step_q4, filter_y, y_step_q4, w, h);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
|
|
|
|
x_step_q4, filter_y, y_step_q4, w, h);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
|
|
|
|
filter_y, y_step_q4, w, h);
|
|
|
|
}
|
|
|
|
|
2014-09-16 23:47:18 +04:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2014-10-08 23:43:22 +04:00
|
|
|
static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst8, ptrdiff_t dst_stride,
|
|
|
|
const InterpKernel *x_filters,
|
|
|
|
int x0_q4, int x_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
int x, y;
|
|
|
|
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
|
|
|
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
|
|
|
src -= SUBPEL_TAPS / 2 - 1;
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
int x_q4 = x0_q4;
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
|
|
|
|
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_x[k] * x_filter[k];
|
2014-10-03 02:43:27 +04:00
|
|
|
dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
x_q4 += x_step_q4;
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst8, ptrdiff_t dst_stride,
|
|
|
|
const InterpKernel *x_filters,
|
|
|
|
int x0_q4, int x_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
int x, y;
|
|
|
|
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
|
|
|
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
|
|
|
src -= SUBPEL_TAPS / 2 - 1;
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
int x_q4 = x0_q4;
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
|
|
|
|
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_x[k] * x_filter[k];
|
|
|
|
dst[x] = ROUND_POWER_OF_TWO(dst[x] +
|
2014-10-03 02:43:27 +04:00
|
|
|
clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1);
|
2014-09-16 23:47:18 +04:00
|
|
|
x_q4 += x_step_q4;
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst8, ptrdiff_t dst_stride,
|
|
|
|
const InterpKernel *y_filters,
|
|
|
|
int y0_q4, int y_step_q4, int w, int h,
|
|
|
|
int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
int x, y;
|
|
|
|
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
|
|
|
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
|
|
|
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
int y_q4 = y0_q4;
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
|
|
|
|
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_y[k * src_stride] * y_filter[k];
|
2014-10-03 02:43:27 +04:00
|
|
|
dst[y * dst_stride] = clip_pixel_highbd(
|
2014-09-16 23:47:18 +04:00
|
|
|
ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
|
|
|
|
y_q4 += y_step_q4;
|
|
|
|
}
|
|
|
|
++src;
|
|
|
|
++dst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst8, ptrdiff_t dst_stride,
|
|
|
|
const InterpKernel *y_filters,
|
|
|
|
int y0_q4, int y_step_q4, int w, int h,
|
|
|
|
int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
int x, y;
|
|
|
|
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
|
|
|
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
|
|
|
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
int y_q4 = y0_q4;
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
|
|
|
|
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
|
|
|
|
int k, sum = 0;
|
|
|
|
for (k = 0; k < SUBPEL_TAPS; ++k)
|
|
|
|
sum += src_y[k * src_stride] * y_filter[k];
|
|
|
|
dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] +
|
2014-10-03 02:43:27 +04:00
|
|
|
clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1);
|
2014-09-16 23:47:18 +04:00
|
|
|
y_q4 += y_step_q4;
|
|
|
|
}
|
|
|
|
++src;
|
|
|
|
++dst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
static void highbd_convolve(const uint8_t *src, ptrdiff_t src_stride,
|
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const InterpKernel *const x_filters,
|
|
|
|
int x0_q4, int x_step_q4,
|
|
|
|
const InterpKernel *const y_filters,
|
|
|
|
int y0_q4, int y_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
// Note: Fixed size intermediate buffer, temp, places limits on parameters.
|
|
|
|
// 2d filtering proceeds in 2 steps:
|
|
|
|
// (1) Interpolate horizontally into an intermediate buffer, temp.
|
|
|
|
// (2) Interpolate temp vertically to derive the sub-pixel result.
|
|
|
|
// Deriving the maximum number of rows in the temp buffer (135):
|
|
|
|
// --Smallest scaling factor is x1/2 ==> y_step_q4 = 32 (Normative).
|
|
|
|
// --Largest block size is 64x64 pixels.
|
|
|
|
// --64 rows in the downscaled frame span a distance of (64 - 1) * 32 in the
|
|
|
|
// original frame (in 1/16th pixel units).
|
|
|
|
// --Must round-up because block may be located at sub-pixel position.
|
|
|
|
// --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
|
|
|
|
// --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
|
|
|
|
uint16_t temp[64 * 135];
|
2014-10-02 17:14:12 +04:00
|
|
|
int intermediate_height =
|
|
|
|
(((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
|
2014-09-16 23:47:18 +04:00
|
|
|
|
|
|
|
assert(w <= 64);
|
|
|
|
assert(h <= 64);
|
|
|
|
assert(y_step_q4 <= 32);
|
|
|
|
assert(x_step_q4 <= 32);
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
highbd_convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1),
|
|
|
|
src_stride, CONVERT_TO_BYTEPTR(temp), 64,
|
|
|
|
x_filters, x0_q4, x_step_q4, w,
|
|
|
|
intermediate_height, bd);
|
|
|
|
highbd_convolve_vert(CONVERT_TO_BYTEPTR(temp) + 64 * (SUBPEL_TAPS / 2 - 1),
|
|
|
|
64, dst, dst_stride, y_filters, y0_q4, y_step_q4,
|
|
|
|
w, h, bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
const InterpKernel *const filters_x = get_filter_base(filter_x);
|
|
|
|
const int x0_q4 = get_filter_offset(filter_x, filters_x);
|
|
|
|
(void)filter_y;
|
|
|
|
(void)y_step_q4;
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
highbd_convolve_horiz(src, src_stride, dst, dst_stride, filters_x,
|
|
|
|
x0_q4, x_step_q4, w, h, bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
const InterpKernel *const filters_x = get_filter_base(filter_x);
|
|
|
|
const int x0_q4 = get_filter_offset(filter_x, filters_x);
|
|
|
|
(void)filter_y;
|
|
|
|
(void)y_step_q4;
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
highbd_convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x,
|
|
|
|
x0_q4, x_step_q4, w, h, bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
const InterpKernel *const filters_y = get_filter_base(filter_y);
|
|
|
|
const int y0_q4 = get_filter_offset(filter_y, filters_y);
|
|
|
|
(void)filter_x;
|
|
|
|
(void)x_step_q4;
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
highbd_convolve_vert(src, src_stride, dst, dst_stride, filters_y,
|
|
|
|
y0_q4, y_step_q4, w, h, bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
const InterpKernel *const filters_y = get_filter_base(filter_y);
|
|
|
|
const int y0_q4 = get_filter_offset(filter_y, filters_y);
|
|
|
|
(void)filter_x;
|
|
|
|
(void)x_step_q4;
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
highbd_convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y,
|
|
|
|
y0_q4, y_step_q4, w, h, bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
const InterpKernel *const filters_x = get_filter_base(filter_x);
|
|
|
|
const int x0_q4 = get_filter_offset(filter_x, filters_x);
|
|
|
|
|
|
|
|
const InterpKernel *const filters_y = get_filter_base(filter_y);
|
|
|
|
const int y0_q4 = get_filter_offset(filter_y, filters_y);
|
|
|
|
|
2014-10-08 23:43:22 +04:00
|
|
|
highbd_convolve(src, src_stride, dst, dst_stride,
|
|
|
|
filters_x, x0_q4, x_step_q4,
|
|
|
|
filters_y, y0_q4, y_step_q4, w, h, bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
// Fixed size intermediate buffer places limits on parameters.
|
2015-05-02 23:24:16 +03:00
|
|
|
DECLARE_ALIGNED(16, uint16_t, temp[64 * 64]);
|
2014-09-16 23:47:18 +04:00
|
|
|
assert(w <= 64);
|
|
|
|
assert(h <= 64);
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
vpx_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64,
|
2014-10-08 23:43:22 +04:00
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);
|
2015-07-22 20:40:42 +03:00
|
|
|
vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
NULL, 0, NULL, 0, w, h, bd);
|
2014-09-16 23:47:18 +04:00
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst8, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int filter_x_stride,
|
|
|
|
const int16_t *filter_y, int filter_y_stride,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
int r;
|
|
|
|
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
|
|
|
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
|
|
|
(void)filter_x;
|
|
|
|
(void)filter_y;
|
|
|
|
(void)filter_x_stride;
|
|
|
|
(void)filter_y_stride;
|
|
|
|
(void)bd;
|
|
|
|
|
|
|
|
for (r = h; r > 0; --r) {
|
2015-04-24 06:42:19 +03:00
|
|
|
memcpy(dst, src, w * sizeof(uint16_t));
|
2014-09-16 23:47:18 +04:00
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-22 20:40:42 +03:00
|
|
|
void vpx_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride,
|
2014-10-08 23:43:22 +04:00
|
|
|
uint8_t *dst8, ptrdiff_t dst_stride,
|
|
|
|
const int16_t *filter_x, int filter_x_stride,
|
|
|
|
const int16_t *filter_y, int filter_y_stride,
|
|
|
|
int w, int h, int bd) {
|
2014-09-16 23:47:18 +04:00
|
|
|
int x, y;
|
|
|
|
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
|
|
|
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
|
|
|
(void)filter_x;
|
|
|
|
(void)filter_y;
|
|
|
|
(void)filter_x_stride;
|
|
|
|
(void)filter_y_stride;
|
|
|
|
(void)bd;
|
|
|
|
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1);
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|