зеркало из https://github.com/mozilla/pjs.git
1933 строки
88 KiB
Diff
1933 строки
88 KiB
Diff
|
# HG changeset patch
|
||
|
# Parent 677ab41568f1a8427e3e43a6ce9b0d7c822b1f7e
|
||
|
# User Timothy B. Terriberry <tterribe@vt.edu>
|
||
|
Move SAD and variance functions to common
|
||
|
|
||
|
Upstream Change-Id: I256a37c6de079fe92ce744b1f11e16526d06b50a
|
||
|
|
||
|
This patch contains substantial differences compared to the upstream
|
||
|
one, as it still uses the old RTCD framework and does not include
|
||
|
the extra short-circuiting work done in upstream change
|
||
|
I05ce5b2d34e6d45fb3ec2a450aa99c4f3343bf3a.
|
||
|
|
||
|
diff --git a/media/libvpx/vp8/common/arm/arm_systemdependent.c b/media/libvpx/vp8/common/arm/arm_systemdependent.c
|
||
|
--- a/media/libvpx/vp8/common/arm/arm_systemdependent.c
|
||
|
+++ b/media/libvpx/vp8/common/arm/arm_systemdependent.c
|
||
|
@@ -11,16 +11,17 @@
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
#include "vpx_ports/arm.h"
|
||
|
#include "vp8/common/pragmas.h"
|
||
|
#include "vp8/common/subpixel.h"
|
||
|
#include "vp8/common/loopfilter.h"
|
||
|
#include "vp8/common/recon.h"
|
||
|
#include "vp8/common/idct.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "vp8/common/onyxc_int.h"
|
||
|
|
||
|
void vp8_arch_arm_common_init(VP8_COMMON *ctx)
|
||
|
{
|
||
|
#if CONFIG_RUNTIME_CPU_DETECT
|
||
|
VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
|
||
|
int flags = arm_cpu_caps();
|
||
|
rtcd->flags = flags;
|
||
|
@@ -63,16 +64,41 @@ void vp8_arch_arm_common_init(VP8_COMMON
|
||
|
rtcd->recon.copy8x4 = vp8_copy_mem8x4_v6;
|
||
|
rtcd->recon.intra4x4_predict = vp8_intra4x4_predict_armv6;
|
||
|
|
||
|
rtcd->dequant.block = vp8_dequantize_b_v6;
|
||
|
rtcd->dequant.idct_add = vp8_dequant_idct_add_v6;
|
||
|
rtcd->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_v6;
|
||
|
rtcd->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_v6;
|
||
|
|
||
|
+ rtcd->variance.sad16x16 = vp8_sad16x16_armv6;
|
||
|
+ /*rtcd->variance.sad16x8 = vp8_sad16x8_c;
|
||
|
+ rtcd->variance.sad8x16 = vp8_sad8x16_c;
|
||
|
+ rtcd->variance.sad8x8 = vp8_sad8x8_c;
|
||
|
+ rtcd->variance.sad4x4 = vp8_sad4x4_c;*/
|
||
|
+
|
||
|
+ /*rtcd->variance.var4x4 = vp8_variance4x4_c;*/
|
||
|
+ rtcd->variance.var8x8 = vp8_variance8x8_armv6;
|
||
|
+ /*rtcd->variance.var8x16 = vp8_variance8x16_c;
|
||
|
+ rtcd->variance.var16x8 = vp8_variance16x8_c;*/
|
||
|
+ rtcd->variance.var16x16 = vp8_variance16x16_armv6;
|
||
|
+
|
||
|
+ /*rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
|
||
|
+ rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
|
||
|
+ /*rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
|
||
|
+ rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
|
||
|
+ rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
|
||
|
+ rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
|
||
|
+ rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
|
||
|
+ rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
|
||
|
+
|
||
|
+ rtcd->variance.mse16x16 = vp8_mse16x16_armv6;
|
||
|
+ /*rtcd->variance.getmbss = vp8_get_mb_ss_c;*/
|
||
|
+
|
||
|
+ /*rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_c;*/
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#if HAVE_ARMV7
|
||
|
if (flags & HAS_NEON)
|
||
|
{
|
||
|
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_neon;
|
||
|
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_neon;
|
||
|
@@ -103,13 +129,38 @@ void vp8_arch_arm_common_init(VP8_COMMON
|
||
|
rtcd->recon.build_intra_predictors_mby_s =
|
||
|
vp8_build_intra_predictors_mby_s_neon;
|
||
|
|
||
|
rtcd->dequant.block = vp8_dequantize_b_neon;
|
||
|
rtcd->dequant.idct_add = vp8_dequant_idct_add_neon;
|
||
|
rtcd->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_neon;
|
||
|
rtcd->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_neon;
|
||
|
|
||
|
+ rtcd->variance.sad16x16 = vp8_sad16x16_neon;
|
||
|
+ rtcd->variance.sad16x8 = vp8_sad16x8_neon;
|
||
|
+ rtcd->variance.sad8x16 = vp8_sad8x16_neon;
|
||
|
+ rtcd->variance.sad8x8 = vp8_sad8x8_neon;
|
||
|
+ rtcd->variance.sad4x4 = vp8_sad4x4_neon;
|
||
|
+
|
||
|
+ /*rtcd->variance.var4x4 = vp8_variance4x4_c;*/
|
||
|
+ rtcd->variance.var8x8 = vp8_variance8x8_neon;
|
||
|
+ rtcd->variance.var8x16 = vp8_variance8x16_neon;
|
||
|
+ rtcd->variance.var16x8 = vp8_variance16x8_neon;
|
||
|
+ rtcd->variance.var16x16 = vp8_variance16x16_neon;
|
||
|
+
|
||
|
+ /*rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
|
||
|
+ rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
|
||
|
+ /*rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
|
||
|
+ rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
|
||
|
+ rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
|
||
|
+ rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
|
||
|
+ rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
|
||
|
+ rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
|
||
|
+
|
||
|
+ rtcd->variance.mse16x16 = vp8_mse16x16_neon;
|
||
|
+ /*rtcd->variance.getmbss = vp8_get_mb_ss_c;*/
|
||
|
+
|
||
|
+ rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_neon;
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#endif
|
||
|
}
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_mse16x16_armv6.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm
|
||
|
rename to media/libvpx/vp8/common/arm/armv6/vp8_mse16x16_armv6.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_sad16x16_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_sad16x16_armv6.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/armv6/vp8_sad16x16_armv6.asm
|
||
|
rename to media/libvpx/vp8/common/arm/armv6/vp8_sad16x16_armv6.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance16x16_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance16x16_armv6.asm
|
||
|
rename to media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance8x8_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance8x8_armv6.asm
|
||
|
rename to media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
|
||
|
rename to media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
|
||
|
rename to media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
|
||
|
rename to media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/neon/sad16_neon.asm b/media/libvpx/vp8/common/arm/neon/sad16_neon.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/neon/sad16_neon.asm
|
||
|
rename to media/libvpx/vp8/common/arm/neon/sad16_neon.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/neon/sad8_neon.asm b/media/libvpx/vp8/common/arm/neon/sad8_neon.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/neon/sad8_neon.asm
|
||
|
rename to media/libvpx/vp8/common/arm/neon/sad8_neon.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/neon/variance_neon.asm b/media/libvpx/vp8/common/arm/neon/variance_neon.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/neon/variance_neon.asm
|
||
|
rename to media/libvpx/vp8/common/arm/neon/variance_neon.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_mse16x16_neon.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm
|
||
|
rename to media/libvpx/vp8/common/arm/neon/vp8_mse16x16_neon.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm
|
||
|
rename to media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16s_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16s_neon.asm
|
||
|
rename to media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance8x8_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
|
||
|
rename from media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance8x8_neon.asm
|
||
|
rename to media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/variance_arm.c b/media/libvpx/vp8/common/arm/variance_arm.c
|
||
|
rename from media/libvpx/vp8/encoder/arm/variance_arm.c
|
||
|
rename to media/libvpx/vp8/common/arm/variance_arm.c
|
||
|
--- a/media/libvpx/vp8/encoder/arm/variance_arm.c
|
||
|
+++ b/media/libvpx/vp8/common/arm/variance_arm.c
|
||
|
@@ -4,17 +4,17 @@
|
||
|
* Use of this source code is governed by a BSD-style license
|
||
|
* that can be found in the LICENSE file in the root of the source
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
-#include "vp8/encoder/variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "vp8/common/filter.h"
|
||
|
|
||
|
#if HAVE_ARMV6
|
||
|
#include "vp8/common/arm/bilinearfilter_arm.h"
|
||
|
|
||
|
unsigned int vp8_sub_pixel_variance8x8_armv6
|
||
|
(
|
||
|
const unsigned char *src_ptr,
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/variance_arm.h b/media/libvpx/vp8/common/arm/variance_arm.h
|
||
|
rename from media/libvpx/vp8/encoder/arm/variance_arm.h
|
||
|
rename to media/libvpx/vp8/common/arm/variance_arm.h
|
||
|
diff --git a/media/libvpx/vp8/common/generic/systemdependent.c b/media/libvpx/vp8/common/generic/systemdependent.c
|
||
|
--- a/media/libvpx/vp8/common/generic/systemdependent.c
|
||
|
+++ b/media/libvpx/vp8/common/generic/systemdependent.c
|
||
|
@@ -9,16 +9,17 @@
|
||
|
*/
|
||
|
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
#include "vp8/common/subpixel.h"
|
||
|
#include "vp8/common/loopfilter.h"
|
||
|
#include "vp8/common/recon.h"
|
||
|
#include "vp8/common/idct.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "vp8/common/onyxc_int.h"
|
||
|
|
||
|
#if CONFIG_MULTITHREAD
|
||
|
#if HAVE_UNISTD_H
|
||
|
#include <unistd.h>
|
||
|
#elif defined(_WIN32)
|
||
|
#include <windows.h>
|
||
|
typedef void (WINAPI *PGNSI)(LPSYSTEM_INFO);
|
||
|
@@ -110,16 +111,67 @@ void vp8_machine_specific_config(VP8_COM
|
||
|
rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_c;
|
||
|
rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_c;
|
||
|
rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_c;
|
||
|
rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_c;
|
||
|
rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_c;
|
||
|
rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_c;
|
||
|
rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_c;
|
||
|
|
||
|
+ rtcd->variance.sad16x16 = vp8_sad16x16_c;
|
||
|
+ rtcd->variance.sad16x8 = vp8_sad16x8_c;
|
||
|
+ rtcd->variance.sad8x16 = vp8_sad8x16_c;
|
||
|
+ rtcd->variance.sad8x8 = vp8_sad8x8_c;
|
||
|
+ rtcd->variance.sad4x4 = vp8_sad4x4_c;
|
||
|
+
|
||
|
+ rtcd->variance.sad16x16x3 = vp8_sad16x16x3_c;
|
||
|
+ rtcd->variance.sad16x8x3 = vp8_sad16x8x3_c;
|
||
|
+ rtcd->variance.sad8x16x3 = vp8_sad8x16x3_c;
|
||
|
+ rtcd->variance.sad8x8x3 = vp8_sad8x8x3_c;
|
||
|
+ rtcd->variance.sad4x4x3 = vp8_sad4x4x3_c;
|
||
|
+
|
||
|
+ rtcd->variance.sad16x16x8 = vp8_sad16x16x8_c;
|
||
|
+ rtcd->variance.sad16x8x8 = vp8_sad16x8x8_c;
|
||
|
+ rtcd->variance.sad8x16x8 = vp8_sad8x16x8_c;
|
||
|
+ rtcd->variance.sad8x8x8 = vp8_sad8x8x8_c;
|
||
|
+ rtcd->variance.sad4x4x8 = vp8_sad4x4x8_c;
|
||
|
+
|
||
|
+ rtcd->variance.sad16x16x4d = vp8_sad16x16x4d_c;
|
||
|
+ rtcd->variance.sad16x8x4d = vp8_sad16x8x4d_c;
|
||
|
+ rtcd->variance.sad8x16x4d = vp8_sad8x16x4d_c;
|
||
|
+ rtcd->variance.sad8x8x4d = vp8_sad8x8x4d_c;
|
||
|
+ rtcd->variance.sad4x4x4d = vp8_sad4x4x4d_c;
|
||
|
+#if ARCH_X86 || ARCH_X86_64
|
||
|
+ rtcd->variance.copy32xn = vp8_copy32xn_c;
|
||
|
+#endif
|
||
|
+ rtcd->variance.var4x4 = vp8_variance4x4_c;
|
||
|
+ rtcd->variance.var8x8 = vp8_variance8x8_c;
|
||
|
+ rtcd->variance.var8x16 = vp8_variance8x16_c;
|
||
|
+ rtcd->variance.var16x8 = vp8_variance16x8_c;
|
||
|
+ rtcd->variance.var16x16 = vp8_variance16x16_c;
|
||
|
+
|
||
|
+ rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
|
||
|
+ rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
|
||
|
+ rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
|
||
|
+ rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
|
||
|
+ rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
|
||
|
+ rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
|
||
|
+ rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
|
||
|
+ rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
|
||
|
+ rtcd->variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
|
||
|
+
|
||
|
+ rtcd->variance.mse16x16 = vp8_mse16x16_c;
|
||
|
+ rtcd->variance.getmbss = vp8_get_mb_ss_c;
|
||
|
+
|
||
|
+ rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_c;
|
||
|
+#if CONFIG_INTERNAL_STATS
|
||
|
+ rtcd->variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
|
||
|
+ rtcd->variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
|
||
|
+#endif
|
||
|
+
|
||
|
#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_INTERNAL_STATS)
|
||
|
rtcd->postproc.down = vp8_mbpost_proc_down_c;
|
||
|
rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;
|
||
|
rtcd->postproc.downacross = vp8_post_proc_down_and_across_c;
|
||
|
rtcd->postproc.addnoise = vp8_plane_add_noise_c;
|
||
|
rtcd->postproc.blend_mb_inner = vp8_blend_mb_inner_c;
|
||
|
rtcd->postproc.blend_mb_outer = vp8_blend_mb_outer_c;
|
||
|
rtcd->postproc.blend_b = vp8_blend_b_c;
|
||
|
diff --git a/media/libvpx/vp8/common/onyxc_int.h b/media/libvpx/vp8/common/onyxc_int.h
|
||
|
--- a/media/libvpx/vp8/common/onyxc_int.h
|
||
|
+++ b/media/libvpx/vp8/common/onyxc_int.h
|
||
|
@@ -14,16 +14,17 @@
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
#include "vpx/internal/vpx_codec_internal.h"
|
||
|
#include "loopfilter.h"
|
||
|
#include "entropymv.h"
|
||
|
#include "entropy.h"
|
||
|
#include "idct.h"
|
||
|
#include "recon.h"
|
||
|
+#include "variance.h"
|
||
|
#if CONFIG_POSTPROC
|
||
|
#include "postproc.h"
|
||
|
#endif
|
||
|
#include "dequantize.h"
|
||
|
|
||
|
/*#ifdef PACKET_TESTING*/
|
||
|
#include "header.h"
|
||
|
/*#endif*/
|
||
|
@@ -74,16 +75,17 @@ typedef enum
|
||
|
typedef struct VP8_COMMON_RTCD
|
||
|
{
|
||
|
#if CONFIG_RUNTIME_CPU_DETECT
|
||
|
vp8_dequant_rtcd_vtable_t dequant;
|
||
|
vp8_idct_rtcd_vtable_t idct;
|
||
|
vp8_recon_rtcd_vtable_t recon;
|
||
|
vp8_subpix_rtcd_vtable_t subpix;
|
||
|
vp8_loopfilter_rtcd_vtable_t loopfilter;
|
||
|
+ vp8_variance_rtcd_vtable_t variance;
|
||
|
#if CONFIG_POSTPROC
|
||
|
vp8_postproc_rtcd_vtable_t postproc;
|
||
|
#endif
|
||
|
int flags;
|
||
|
#else
|
||
|
int unused;
|
||
|
#endif
|
||
|
} VP8_COMMON_RTCD;
|
||
|
diff --git a/media/libvpx/vp8/common/postproc.c b/media/libvpx/vp8/common/postproc.c
|
||
|
--- a/media/libvpx/vp8/common/postproc.c
|
||
|
+++ b/media/libvpx/vp8/common/postproc.c
|
||
|
@@ -12,17 +12,17 @@
|
||
|
#include "vpx_config.h"
|
||
|
#include "vpx_scale/yv12config.h"
|
||
|
#include "postproc.h"
|
||
|
#include "common.h"
|
||
|
#include "recon.h"
|
||
|
#include "vpx_scale/yv12extend.h"
|
||
|
#include "vpx_scale/vpxscale.h"
|
||
|
#include "systemdependent.h"
|
||
|
-#include "../encoder/variance.h"
|
||
|
+#include "variance.h"
|
||
|
|
||
|
#include <math.h>
|
||
|
#include <stdlib.h>
|
||
|
#include <stdio.h>
|
||
|
|
||
|
#define RGB_TO_YUV(t) \
|
||
|
( (0.257*(float)(t>>16)) + (0.504*(float)(t>>8&0xff)) + (0.098*(float)(t&0xff)) + 16), \
|
||
|
(-(0.148*(float)(t>>16)) - (0.291*(float)(t>>8&0xff)) + (0.439*(float)(t&0xff)) + 128), \
|
||
|
diff --git a/media/libvpx/vp8/encoder/sad_c.c b/media/libvpx/vp8/common/sad_c.c
|
||
|
rename from media/libvpx/vp8/encoder/sad_c.c
|
||
|
rename to media/libvpx/vp8/common/sad_c.c
|
||
|
diff --git a/media/libvpx/vp8/encoder/variance.h b/media/libvpx/vp8/common/variance.h
|
||
|
rename from media/libvpx/vp8/encoder/variance.h
|
||
|
rename to media/libvpx/vp8/common/variance.h
|
||
|
--- a/media/libvpx/vp8/encoder/variance.h
|
||
|
+++ b/media/libvpx/vp8/common/variance.h
|
||
|
@@ -78,31 +78,31 @@
|
||
|
( \
|
||
|
const unsigned char *src_ptr, \
|
||
|
int source_stride, \
|
||
|
int xoffset, \
|
||
|
int yoffset, \
|
||
|
const unsigned char *ref_ptr, \
|
||
|
int Refstride, \
|
||
|
unsigned int *sse \
|
||
|
- );
|
||
|
+ )
|
||
|
|
||
|
#define prototype_ssimpf(sym) \
|
||
|
void (sym) \
|
||
|
( \
|
||
|
unsigned char *s, \
|
||
|
int sp, \
|
||
|
unsigned char *r, \
|
||
|
int rp, \
|
||
|
unsigned long *sum_s, \
|
||
|
unsigned long *sum_r, \
|
||
|
unsigned long *sum_sq_s, \
|
||
|
unsigned long *sum_sq_r, \
|
||
|
unsigned long *sum_sxr \
|
||
|
- );
|
||
|
+ )
|
||
|
|
||
|
#define prototype_getmbss(sym) unsigned int (sym)(const short *)
|
||
|
|
||
|
#define prototype_get16x16prederror(sym)\
|
||
|
unsigned int (sym)\
|
||
|
(\
|
||
|
const unsigned char *src_ptr, \
|
||
|
int source_stride, \
|
||
|
@@ -318,22 +318,22 @@ extern prototype_variance(vp8_variance_m
|
||
|
#ifndef vp8_variance_get4x4sse_cs
|
||
|
#define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_c
|
||
|
#endif
|
||
|
extern prototype_get16x16prederror(vp8_variance_get4x4sse_cs);
|
||
|
|
||
|
#ifndef vp8_ssimpf_8x8
|
||
|
#define vp8_ssimpf_8x8 vp8_ssim_parms_8x8_c
|
||
|
#endif
|
||
|
-extern prototype_ssimpf(vp8_ssimpf_8x8)
|
||
|
+extern prototype_ssimpf(vp8_ssimpf_8x8);
|
||
|
|
||
|
#ifndef vp8_ssimpf_16x16
|
||
|
#define vp8_ssimpf_16x16 vp8_ssim_parms_16x16_c
|
||
|
#endif
|
||
|
-extern prototype_ssimpf(vp8_ssimpf_16x16)
|
||
|
+extern prototype_ssimpf(vp8_ssimpf_16x16);
|
||
|
|
||
|
typedef prototype_sad(*vp8_sad_fn_t);
|
||
|
typedef prototype_sad_multi_same_address(*vp8_sad_multi_fn_t);
|
||
|
typedef prototype_sad_multi_same_address_1(*vp8_sad_multi1_fn_t);
|
||
|
typedef prototype_sad_multi_dif_address(*vp8_sad_multi_d_fn_t);
|
||
|
typedef prototype_variance(*vp8_variance_fn_t);
|
||
|
typedef prototype_variance2(*vp8_variance2_fn_t);
|
||
|
typedef prototype_subpixvariance(*vp8_subpixvariance_fn_t);
|
||
|
diff --git a/media/libvpx/vp8/encoder/variance_c.c b/media/libvpx/vp8/common/variance_c.c
|
||
|
rename from media/libvpx/vp8/encoder/variance_c.c
|
||
|
rename to media/libvpx/vp8/common/variance_c.c
|
||
|
--- a/media/libvpx/vp8/encoder/variance_c.c
|
||
|
+++ b/media/libvpx/vp8/common/variance_c.c
|
||
|
@@ -5,17 +5,17 @@
|
||
|
* that can be found in the LICENSE file in the root of the source
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
|
||
|
#include "variance.h"
|
||
|
-#include "vp8/common/filter.h"
|
||
|
+#include "filter.h"
|
||
|
|
||
|
|
||
|
unsigned int vp8_get_mb_ss_c
|
||
|
(
|
||
|
const short *src_ptr
|
||
|
)
|
||
|
{
|
||
|
unsigned int i = 0, sum = 0;
|
||
|
@@ -451,8 +451,34 @@ unsigned int vp8_sub_pixel_variance8x16_
|
||
|
VFilter = vp8_bilinear_filters[yoffset];
|
||
|
|
||
|
|
||
|
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
|
||
|
var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
|
||
|
|
||
|
return vp8_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
|
||
|
}
|
||
|
+
|
||
|
+unsigned int vp8_get4x4sse_cs_c
|
||
|
+(
|
||
|
+ const unsigned char *src_ptr,
|
||
|
+ int source_stride,
|
||
|
+ const unsigned char *ref_ptr,
|
||
|
+ int recon_stride
|
||
|
+)
|
||
|
+{
|
||
|
+ int distortion = 0;
|
||
|
+ int r, c;
|
||
|
+
|
||
|
+ for (r = 0; r < 4; r++)
|
||
|
+ {
|
||
|
+ for (c = 0; c < 4; c++)
|
||
|
+ {
|
||
|
+ int diff = src_ptr[c] - ref_ptr[c];
|
||
|
+ distortion += diff * diff;
|
||
|
+ }
|
||
|
+
|
||
|
+ src_ptr += source_stride;
|
||
|
+ ref_ptr += recon_stride;
|
||
|
+ }
|
||
|
+
|
||
|
+ return distortion;
|
||
|
+}
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/sad_mmx.asm b/media/libvpx/vp8/common/x86/sad_mmx.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/sad_mmx.asm
|
||
|
rename to media/libvpx/vp8/common/x86/sad_mmx.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/sad_sse2.asm b/media/libvpx/vp8/common/x86/sad_sse2.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/sad_sse2.asm
|
||
|
rename to media/libvpx/vp8/common/x86/sad_sse2.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/sad_sse3.asm b/media/libvpx/vp8/common/x86/sad_sse3.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/sad_sse3.asm
|
||
|
rename to media/libvpx/vp8/common/x86/sad_sse3.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/sad_sse4.asm b/media/libvpx/vp8/common/x86/sad_sse4.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/sad_sse4.asm
|
||
|
rename to media/libvpx/vp8/common/x86/sad_sse4.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/sad_ssse3.asm b/media/libvpx/vp8/common/x86/sad_ssse3.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/sad_ssse3.asm
|
||
|
rename to media/libvpx/vp8/common/x86/sad_ssse3.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/variance_impl_mmx.asm b/media/libvpx/vp8/common/x86/variance_impl_mmx.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/variance_impl_mmx.asm
|
||
|
rename to media/libvpx/vp8/common/x86/variance_impl_mmx.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/variance_impl_sse2.asm b/media/libvpx/vp8/common/x86/variance_impl_sse2.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/variance_impl_sse2.asm
|
||
|
rename to media/libvpx/vp8/common/x86/variance_impl_sse2.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/variance_impl_ssse3.asm b/media/libvpx/vp8/common/x86/variance_impl_ssse3.asm
|
||
|
rename from media/libvpx/vp8/encoder/x86/variance_impl_ssse3.asm
|
||
|
rename to media/libvpx/vp8/common/x86/variance_impl_ssse3.asm
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/variance_mmx.c b/media/libvpx/vp8/common/x86/variance_mmx.c
|
||
|
rename from media/libvpx/vp8/encoder/x86/variance_mmx.c
|
||
|
rename to media/libvpx/vp8/common/x86/variance_mmx.c
|
||
|
--- a/media/libvpx/vp8/encoder/x86/variance_mmx.c
|
||
|
+++ b/media/libvpx/vp8/common/x86/variance_mmx.c
|
||
|
@@ -4,17 +4,17 @@
|
||
|
* Use of this source code is governed by a BSD-style license
|
||
|
* that can be found in the LICENSE file in the root of the source
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
-#include "vp8/encoder/variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "vp8/common/pragmas.h"
|
||
|
#include "vpx_ports/mem.h"
|
||
|
#include "vp8/common/x86/filter_x86.h"
|
||
|
|
||
|
extern void filter_block1d_h6_mmx
|
||
|
(
|
||
|
const unsigned char *src_ptr,
|
||
|
unsigned short *output_ptr,
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/variance_sse2.c b/media/libvpx/vp8/common/x86/variance_sse2.c
|
||
|
rename from media/libvpx/vp8/encoder/x86/variance_sse2.c
|
||
|
rename to media/libvpx/vp8/common/x86/variance_sse2.c
|
||
|
--- a/media/libvpx/vp8/encoder/x86/variance_sse2.c
|
||
|
+++ b/media/libvpx/vp8/common/x86/variance_sse2.c
|
||
|
@@ -4,17 +4,17 @@
|
||
|
* Use of this source code is governed by a BSD-style license
|
||
|
* that can be found in the LICENSE file in the root of the source
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
-#include "vp8/encoder/variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "vp8/common/pragmas.h"
|
||
|
#include "vpx_ports/mem.h"
|
||
|
#include "vp8/common/x86/filter_x86.h"
|
||
|
|
||
|
extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
|
||
|
extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
|
||
|
extern void filter_block1d8_h6_sse2(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
|
||
|
extern void filter_block1d8_v6_sse2(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/variance_ssse3.c b/media/libvpx/vp8/common/x86/variance_ssse3.c
|
||
|
rename from media/libvpx/vp8/encoder/x86/variance_ssse3.c
|
||
|
rename to media/libvpx/vp8/common/x86/variance_ssse3.c
|
||
|
--- a/media/libvpx/vp8/encoder/x86/variance_ssse3.c
|
||
|
+++ b/media/libvpx/vp8/common/x86/variance_ssse3.c
|
||
|
@@ -4,17 +4,17 @@
|
||
|
* Use of this source code is governed by a BSD-style license
|
||
|
* that can be found in the LICENSE file in the root of the source
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
-#include "vp8/encoder/variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "vp8/common/pragmas.h"
|
||
|
#include "vpx_ports/mem.h"
|
||
|
|
||
|
extern unsigned int vp8_get16x16var_sse2
|
||
|
(
|
||
|
const unsigned char *src_ptr,
|
||
|
int source_stride,
|
||
|
const unsigned char *ref_ptr,
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/variance_x86.h b/media/libvpx/vp8/common/x86/variance_x86.h
|
||
|
rename from media/libvpx/vp8/encoder/x86/variance_x86.h
|
||
|
rename to media/libvpx/vp8/common/x86/variance_x86.h
|
||
|
--- a/media/libvpx/vp8/encoder/x86/variance_x86.h
|
||
|
+++ b/media/libvpx/vp8/common/x86/variance_x86.h
|
||
|
@@ -135,18 +135,18 @@ extern prototype_subpixvariance(vp8_sub_
|
||
|
extern prototype_variance(vp8_variance_halfpixvar16x16_h_wmt);
|
||
|
extern prototype_variance(vp8_variance_halfpixvar16x16_v_wmt);
|
||
|
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_wmt);
|
||
|
extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_wmt);
|
||
|
extern prototype_getmbss(vp8_get_mb_ss_sse2);
|
||
|
extern prototype_variance(vp8_mse16x16_wmt);
|
||
|
extern prototype_variance2(vp8_get8x8var_sse2);
|
||
|
extern prototype_variance2(vp8_get16x16var_sse2);
|
||
|
-extern prototype_ssimpf(vp8_ssim_parms_8x8_sse2)
|
||
|
-extern prototype_ssimpf(vp8_ssim_parms_16x16_sse2)
|
||
|
+extern prototype_ssimpf(vp8_ssim_parms_8x8_sse2);
|
||
|
+extern prototype_ssimpf(vp8_ssim_parms_16x16_sse2);
|
||
|
|
||
|
#if !CONFIG_RUNTIME_CPU_DETECT
|
||
|
#undef vp8_variance_sad4x4
|
||
|
#define vp8_variance_sad4x4 vp8_sad4x4_wmt
|
||
|
|
||
|
#undef vp8_variance_sad8x8
|
||
|
#define vp8_variance_sad8x8 vp8_sad8x8_wmt
|
||
|
|
||
|
diff --git a/media/libvpx/vp8/common/x86/x86_systemdependent.c b/media/libvpx/vp8/common/x86/x86_systemdependent.c
|
||
|
--- a/media/libvpx/vp8/common/x86/x86_systemdependent.c
|
||
|
+++ b/media/libvpx/vp8/common/x86/x86_systemdependent.c
|
||
|
@@ -10,16 +10,17 @@
|
||
|
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
#include "vpx_ports/x86.h"
|
||
|
#include "vp8/common/subpixel.h"
|
||
|
#include "vp8/common/loopfilter.h"
|
||
|
#include "vp8/common/recon.h"
|
||
|
#include "vp8/common/idct.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "vp8/common/pragmas.h"
|
||
|
#include "vp8/common/onyxc_int.h"
|
||
|
|
||
|
void vp8_arch_x86_common_init(VP8_COMMON *ctx)
|
||
|
{
|
||
|
#if CONFIG_RUNTIME_CPU_DETECT
|
||
|
VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
|
||
|
int flags = x86_simd_caps();
|
||
|
@@ -62,16 +63,43 @@ void vp8_arch_x86_common_init(VP8_COMMON
|
||
|
rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_mmx;
|
||
|
rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_mmx;
|
||
|
rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_mmx;
|
||
|
rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_mmx;
|
||
|
rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_mmx;
|
||
|
rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_mmx;
|
||
|
rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_mmx;
|
||
|
|
||
|
+ rtcd->variance.sad16x16 = vp8_sad16x16_mmx;
|
||
|
+ rtcd->variance.sad16x8 = vp8_sad16x8_mmx;
|
||
|
+ rtcd->variance.sad8x16 = vp8_sad8x16_mmx;
|
||
|
+ rtcd->variance.sad8x8 = vp8_sad8x8_mmx;
|
||
|
+ rtcd->variance.sad4x4 = vp8_sad4x4_mmx;
|
||
|
+
|
||
|
+ rtcd->variance.var4x4 = vp8_variance4x4_mmx;
|
||
|
+ rtcd->variance.var8x8 = vp8_variance8x8_mmx;
|
||
|
+ rtcd->variance.var8x16 = vp8_variance8x16_mmx;
|
||
|
+ rtcd->variance.var16x8 = vp8_variance16x8_mmx;
|
||
|
+ rtcd->variance.var16x16 = vp8_variance16x16_mmx;
|
||
|
+
|
||
|
+ rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
|
||
|
+ rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
|
||
|
+ rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
|
||
|
+ rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
|
||
|
+ rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
|
||
|
+ rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
|
||
|
+ rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
|
||
|
+ rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
|
||
|
+ rtcd->variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
|
||
|
+
|
||
|
+ rtcd->variance.mse16x16 = vp8_mse16x16_mmx;
|
||
|
+ rtcd->variance.getmbss = vp8_get_mb_ss_mmx;
|
||
|
+
|
||
|
+ rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_mmx;
|
||
|
+
|
||
|
#if CONFIG_POSTPROC
|
||
|
rtcd->postproc.down = vp8_mbpost_proc_down_mmx;
|
||
|
/*rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;*/
|
||
|
rtcd->postproc.downacross = vp8_post_proc_down_and_across_mmx;
|
||
|
rtcd->postproc.addnoise = vp8_plane_add_noise_mmx;
|
||
|
#endif
|
||
|
}
|
||
|
|
||
|
@@ -105,26 +133,81 @@ void vp8_arch_x86_common_init(VP8_COMMON
|
||
|
rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_sse2;
|
||
|
rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_sse2;
|
||
|
rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_sse2;
|
||
|
rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_sse2;
|
||
|
rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_sse2;
|
||
|
rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_sse2;
|
||
|
rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_sse2;
|
||
|
|
||
|
+ rtcd->variance.sad16x16 = vp8_sad16x16_wmt;
|
||
|
+ rtcd->variance.sad16x8 = vp8_sad16x8_wmt;
|
||
|
+ rtcd->variance.sad8x16 = vp8_sad8x16_wmt;
|
||
|
+ rtcd->variance.sad8x8 = vp8_sad8x8_wmt;
|
||
|
+ rtcd->variance.sad4x4 = vp8_sad4x4_wmt;
|
||
|
+ rtcd->variance.copy32xn = vp8_copy32xn_sse2;
|
||
|
+
|
||
|
+ rtcd->variance.var4x4 = vp8_variance4x4_wmt;
|
||
|
+ rtcd->variance.var8x8 = vp8_variance8x8_wmt;
|
||
|
+ rtcd->variance.var8x16 = vp8_variance8x16_wmt;
|
||
|
+ rtcd->variance.var16x8 = vp8_variance16x8_wmt;
|
||
|
+ rtcd->variance.var16x16 = vp8_variance16x16_wmt;
|
||
|
+
|
||
|
+ rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
|
||
|
+ rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
|
||
|
+ rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
|
||
|
+ rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
|
||
|
+ rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
|
||
|
+ rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
|
||
|
+ rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
|
||
|
+ rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
|
||
|
+ rtcd->variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
|
||
|
+
|
||
|
+ rtcd->variance.mse16x16 = vp8_mse16x16_wmt;
|
||
|
+ rtcd->variance.getmbss = vp8_get_mb_ss_sse2;
|
||
|
+
|
||
|
+ /* rtcd->variance.get4x4sse_cs not implemented for wmt */;
|
||
|
+
|
||
|
+#if CONFIG_INTERNAL_STATS
|
||
|
+#if ARCH_X86_64
|
||
|
+ rtcd->variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse2;
|
||
|
+ rtcd->variance.ssimpf_16x16 = vp8_ssim_parms_16x16_sse2;
|
||
|
+#endif
|
||
|
+#endif
|
||
|
+
|
||
|
#if CONFIG_POSTPROC
|
||
|
rtcd->postproc.down = vp8_mbpost_proc_down_xmm;
|
||
|
rtcd->postproc.across = vp8_mbpost_proc_across_ip_xmm;
|
||
|
rtcd->postproc.downacross = vp8_post_proc_down_and_across_xmm;
|
||
|
rtcd->postproc.addnoise = vp8_plane_add_noise_wmt;
|
||
|
#endif
|
||
|
}
|
||
|
|
||
|
#endif
|
||
|
|
||
|
+#if HAVE_SSE3
|
||
|
+
|
||
|
+ if (flags & HAS_SSE3)
|
||
|
+ {
|
||
|
+ rtcd->variance.sad16x16 = vp8_sad16x16_sse3;
|
||
|
+ rtcd->variance.sad16x16x3 = vp8_sad16x16x3_sse3;
|
||
|
+ rtcd->variance.sad16x8x3 = vp8_sad16x8x3_sse3;
|
||
|
+ rtcd->variance.sad8x16x3 = vp8_sad8x16x3_sse3;
|
||
|
+ rtcd->variance.sad8x8x3 = vp8_sad8x8x3_sse3;
|
||
|
+ rtcd->variance.sad4x4x3 = vp8_sad4x4x3_sse3;
|
||
|
+ rtcd->variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
|
||
|
+ rtcd->variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
|
||
|
+ rtcd->variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
|
||
|
+ rtcd->variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
|
||
|
+ rtcd->variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
|
||
|
+ rtcd->variance.copy32xn = vp8_copy32xn_sse3;
|
||
|
+
|
||
|
+ }
|
||
|
+#endif
|
||
|
+
|
||
|
#if HAVE_SSSE3
|
||
|
|
||
|
if (flags & HAS_SSSE3)
|
||
|
{
|
||
|
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_ssse3;
|
||
|
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_ssse3;
|
||
|
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_ssse3;
|
||
|
rtcd->subpix.sixtap4x4 = vp8_sixtap_predict4x4_ssse3;
|
||
|
@@ -134,13 +217,30 @@ void vp8_arch_x86_common_init(VP8_COMMON
|
||
|
rtcd->recon.build_intra_predictors_mbuv =
|
||
|
vp8_build_intra_predictors_mbuv_ssse3;
|
||
|
rtcd->recon.build_intra_predictors_mbuv_s =
|
||
|
vp8_build_intra_predictors_mbuv_s_ssse3;
|
||
|
rtcd->recon.build_intra_predictors_mby =
|
||
|
vp8_build_intra_predictors_mby_ssse3;
|
||
|
rtcd->recon.build_intra_predictors_mby_s =
|
||
|
vp8_build_intra_predictors_mby_s_ssse3;
|
||
|
+
|
||
|
+ rtcd->variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
|
||
|
+ rtcd->variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
|
||
|
+
|
||
|
+ rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
|
||
|
+ rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
|
||
|
+ }
|
||
|
+#endif
|
||
|
+
|
||
|
+#if HAVE_SSE4_1
|
||
|
+ if (flags & HAS_SSE4_1)
|
||
|
+ {
|
||
|
+ rtcd->variance.sad16x16x8 = vp8_sad16x16x8_sse4;
|
||
|
+ rtcd->variance.sad16x8x8 = vp8_sad16x8x8_sse4;
|
||
|
+ rtcd->variance.sad8x16x8 = vp8_sad8x16x8_sse4;
|
||
|
+ rtcd->variance.sad8x8x8 = vp8_sad8x8x8_sse4;
|
||
|
+ rtcd->variance.sad4x4x8 = vp8_sad4x4x8_sse4;
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#endif
|
||
|
}
|
||
|
diff --git a/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c b/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c
|
||
|
--- a/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c
|
||
|
+++ b/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c
|
||
|
@@ -6,17 +6,16 @@
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
#include "vpx_ports/arm.h"
|
||
|
-#include "vp8/encoder/variance.h"
|
||
|
#include "vp8/encoder/onyx_int.h"
|
||
|
|
||
|
extern void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
|
||
|
extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
|
||
|
extern void vp8_yv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
|
||
|
|
||
|
void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
|
||
|
{
|
||
|
@@ -27,42 +26,16 @@ void vp8_arch_arm_encoder_init(VP8_COMP
|
||
|
if (flags & HAS_EDSP)
|
||
|
{
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#if HAVE_ARMV6
|
||
|
if (flags & HAS_MEDIA)
|
||
|
{
|
||
|
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
|
||
|
- /*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
|
||
|
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
|
||
|
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
|
||
|
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
|
||
|
-
|
||
|
- /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
|
||
|
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
|
||
|
- /*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
|
||
|
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
|
||
|
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_armv6;
|
||
|
-
|
||
|
- /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
|
||
|
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
|
||
|
- /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
|
||
|
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
|
||
|
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
|
||
|
-
|
||
|
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_armv6;
|
||
|
- /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
|
||
|
-
|
||
|
- /*cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;*/
|
||
|
-
|
||
|
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_armv6;
|
||
|
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_armv6;
|
||
|
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_armv6;
|
||
|
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_armv6;
|
||
|
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_armv6;
|
||
|
|
||
|
/*cpi->rtcd.encodemb.berr = vp8_block_error_c;
|
||
|
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
|
||
|
@@ -74,42 +47,16 @@ void vp8_arch_arm_encoder_init(VP8_COMP
|
||
|
/*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;*/
|
||
|
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_armv6;
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#if HAVE_ARMV7
|
||
|
if (flags & HAS_NEON)
|
||
|
{
|
||
|
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
|
||
|
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
|
||
|
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
|
||
|
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
|
||
|
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
|
||
|
-
|
||
|
- /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
|
||
|
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
|
||
|
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon;
|
||
|
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_neon;
|
||
|
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_neon;
|
||
|
-
|
||
|
- /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
|
||
|
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
|
||
|
- /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
|
||
|
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
|
||
|
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
|
||
|
-
|
||
|
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_neon;
|
||
|
- /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
|
||
|
-
|
||
|
- cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_neon;
|
||
|
-
|
||
|
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_neon;
|
||
|
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_neon;
|
||
|
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_neon;
|
||
|
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_neon;
|
||
|
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_neon;
|
||
|
|
||
|
/*cpi->rtcd.encodemb.berr = vp8_block_error_c;
|
||
|
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
|
||
|
diff --git a/media/libvpx/vp8/encoder/encodeframe.c b/media/libvpx/vp8/encoder/encodeframe.c
|
||
|
--- a/media/libvpx/vp8/encoder/encodeframe.c
|
||
|
+++ b/media/libvpx/vp8/encoder/encodeframe.c
|
||
|
@@ -93,17 +93,17 @@ static unsigned int tt_activity_measure(
|
||
|
unsigned int sse;
|
||
|
/* TODO: This could also be done over smaller areas (8x8), but that would
|
||
|
* require extensive changes elsewhere, as lambda is assumed to be fixed
|
||
|
* over an entire MB in most of the code.
|
||
|
* Another option is to compute four 8x8 variances, and pick a single
|
||
|
* lambda using a non-linear combination (e.g., the smallest, or second
|
||
|
* smallest, etc.).
|
||
|
*/
|
||
|
- act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
|
||
|
+ act = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)(x->src.y_buffer,
|
||
|
x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
|
||
|
act = act<<4;
|
||
|
|
||
|
/* If the region is flat, lower the activity some more. */
|
||
|
if (act < 8<<12)
|
||
|
act = act < 5<<12 ? act : 5<<12;
|
||
|
|
||
|
return act;
|
||
|
diff --git a/media/libvpx/vp8/encoder/encodeintra.c b/media/libvpx/vp8/encoder/encodeintra.c
|
||
|
--- a/media/libvpx/vp8/encoder/encodeintra.c
|
||
|
+++ b/media/libvpx/vp8/encoder/encodeintra.c
|
||
|
@@ -50,17 +50,17 @@ int vp8_encode_intra(VP8_COMP *cpi, MACR
|
||
|
{
|
||
|
for (i = 0; i < 16; i++)
|
||
|
{
|
||
|
x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
|
||
|
vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
- intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
|
||
|
+ intra_pred_var = VARIANCE_INVOKE(&cpi->common.rtcd.variance, getmbss)(x->src_diff);
|
||
|
|
||
|
return intra_pred_var;
|
||
|
}
|
||
|
|
||
|
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
|
||
|
MACROBLOCK *x, int ib)
|
||
|
{
|
||
|
BLOCKD *b = &x->e_mbd.block[ib];
|
||
|
diff --git a/media/libvpx/vp8/encoder/firstpass.c b/media/libvpx/vp8/encoder/firstpass.c
|
||
|
--- a/media/libvpx/vp8/encoder/firstpass.c
|
||
|
+++ b/media/libvpx/vp8/encoder/firstpass.c
|
||
|
@@ -7,17 +7,17 @@
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
#include "math.h"
|
||
|
#include "limits.h"
|
||
|
#include "block.h"
|
||
|
#include "onyx_int.h"
|
||
|
-#include "variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "encodeintra.h"
|
||
|
#include "vp8/common/setupintrarecon.h"
|
||
|
#include "mcomp.h"
|
||
|
#include "firstpass.h"
|
||
|
#include "vpx_scale/vpxscale.h"
|
||
|
#include "encodemb.h"
|
||
|
#include "vp8/common/extend.h"
|
||
|
#include "vp8/common/systemdependent.h"
|
||
|
@@ -404,17 +404,17 @@ static void zz_motion_search( VP8_COMP *
|
||
|
unsigned char *ref_ptr;
|
||
|
int ref_stride=d->pre_stride;
|
||
|
|
||
|
// Set up pointers for this macro block recon buffer
|
||
|
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
|
||
|
|
||
|
ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre );
|
||
|
|
||
|
- VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
|
||
|
+ VARIANCE_INVOKE(IF_RTCD(&cpi->common.rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
|
||
|
}
|
||
|
|
||
|
static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
|
||
|
int_mv *ref_mv, MV *best_mv,
|
||
|
YV12_BUFFER_CONFIG *recon_buffer,
|
||
|
int *best_motion_err, int recon_yoffset )
|
||
|
{
|
||
|
MACROBLOCKD *const xd = & x->e_mbd;
|
||
|
@@ -428,17 +428,17 @@ static void first_pass_motion_search(VP8
|
||
|
int tmp_err;
|
||
|
int step_param = 3; //3; // Dont search over full range for first pass
|
||
|
int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; //3;
|
||
|
int n;
|
||
|
vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
|
||
|
int new_mv_mode_penalty = 256;
|
||
|
|
||
|
// override the default variance function to use MSE
|
||
|
- v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
|
||
|
+ v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->common.rtcd.variance), mse16x16);
|
||
|
|
||
|
// Set up pointers for this macro block recon buffer
|
||
|
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
|
||
|
|
||
|
// Initial step/diamond search centred on best mv
|
||
|
tmp_mv.as_int = 0;
|
||
|
ref_mv_full.as_mv.col = ref_mv->as_mv.col>>3;
|
||
|
ref_mv_full.as_mv.row = ref_mv->as_mv.row>>3;
|
||
|
diff --git a/media/libvpx/vp8/encoder/generic/csystemdependent.c b/media/libvpx/vp8/encoder/generic/csystemdependent.c
|
||
|
--- a/media/libvpx/vp8/encoder/generic/csystemdependent.c
|
||
|
+++ b/media/libvpx/vp8/encoder/generic/csystemdependent.c
|
||
|
@@ -5,78 +5,31 @@
|
||
|
* that can be found in the LICENSE file in the root of the source
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
-#include "vp8/encoder/variance.h"
|
||
|
#include "vp8/encoder/onyx_int.h"
|
||
|
|
||
|
|
||
|
void vp8_arch_x86_encoder_init(VP8_COMP *cpi);
|
||
|
void vp8_arch_arm_encoder_init(VP8_COMP *cpi);
|
||
|
|
||
|
void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc,
|
||
|
YV12_BUFFER_CONFIG *dst_ybc);
|
||
|
extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
|
||
|
YV12_BUFFER_CONFIG *dst_ybc);
|
||
|
|
||
|
void vp8_cmachine_specific_config(VP8_COMP *cpi)
|
||
|
{
|
||
|
#if CONFIG_RUNTIME_CPU_DETECT
|
||
|
cpi->rtcd.common = &cpi->common.rtcd;
|
||
|
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
|
||
|
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
|
||
|
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
|
||
|
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
|
||
|
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
|
||
|
-
|
||
|
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
|
||
|
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
|
||
|
- cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
|
||
|
- cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
|
||
|
- cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
|
||
|
-
|
||
|
- cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
|
||
|
- cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
|
||
|
- cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
|
||
|
- cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
|
||
|
- cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
|
||
|
-
|
||
|
- cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
|
||
|
- cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
|
||
|
- cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
|
||
|
- cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
|
||
|
- cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
|
||
|
-#if ARCH_X86 || ARCH_X86_64
|
||
|
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
|
||
|
-#endif
|
||
|
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
|
||
|
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
|
||
|
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
|
||
|
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
|
||
|
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
|
||
|
-
|
||
|
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
|
||
|
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
|
||
|
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
|
||
|
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
|
||
|
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
|
||
|
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
|
||
|
-
|
||
|
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
|
||
|
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
|
||
|
-
|
||
|
- cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;
|
||
|
|
||
|
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
|
||
|
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
|
||
|
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
|
||
|
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
|
||
|
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
|
||
|
|
||
|
cpi->rtcd.encodemb.berr = vp8_block_error_c;
|
||
|
@@ -91,20 +44,16 @@ void vp8_cmachine_specific_config(VP8_CO
|
||
|
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c;
|
||
|
cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_c;
|
||
|
cpi->rtcd.search.full_search = vp8_full_search_sad;
|
||
|
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
|
||
|
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
|
||
|
#if !(CONFIG_REALTIME_ONLY)
|
||
|
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
|
||
|
#endif
|
||
|
-#if CONFIG_INTERNAL_STATS
|
||
|
- cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
|
||
|
- cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
|
||
|
-#endif
|
||
|
#endif
|
||
|
|
||
|
// Pure C:
|
||
|
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
|
||
|
|
||
|
#if ARCH_X86 || ARCH_X86_64
|
||
|
vp8_arch_x86_encoder_init(cpi);
|
||
|
#endif
|
||
|
diff --git a/media/libvpx/vp8/encoder/mcomp.h b/media/libvpx/vp8/encoder/mcomp.h
|
||
|
--- a/media/libvpx/vp8/encoder/mcomp.h
|
||
|
+++ b/media/libvpx/vp8/encoder/mcomp.h
|
||
|
@@ -8,17 +8,17 @@
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
|
||
|
#ifndef __INC_MCOMP_H
|
||
|
#define __INC_MCOMP_H
|
||
|
|
||
|
#include "block.h"
|
||
|
-#include "variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
|
||
|
#ifdef ENTROPY_STATS
|
||
|
extern void init_mv_ref_counts();
|
||
|
extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
|
||
|
#endif
|
||
|
|
||
|
|
||
|
#define MAX_MVSEARCH_STEPS 8 // The maximum number of steps in a step search given the largest allowed initial step
|
||
|
diff --git a/media/libvpx/vp8/encoder/onyx_if.c b/media/libvpx/vp8/encoder/onyx_if.c
|
||
|
--- a/media/libvpx/vp8/encoder/onyx_if.c
|
||
|
+++ b/media/libvpx/vp8/encoder/onyx_if.c
|
||
|
@@ -1948,72 +1948,72 @@ struct VP8_COMP* vp8_create_compressor(V
|
||
|
#ifdef ENTROPY_STATS
|
||
|
init_mv_ref_counts();
|
||
|
#endif
|
||
|
|
||
|
#if CONFIG_MULTITHREAD
|
||
|
vp8cx_create_encoder_threads(cpi);
|
||
|
#endif
|
||
|
|
||
|
- cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
|
||
|
- cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
|
||
|
-
|
||
|
- cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
|
||
|
- cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
|
||
|
- cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar16x16);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->common.rtcd.variance, halfpixvar16x16_h);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->common.rtcd.variance, halfpixvar16x16_v);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->common.rtcd.variance, halfpixvar16x16_hv);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16x3);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16x8);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16x4d);
|
||
|
+
|
||
|
+ cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8);
|
||
|
+ cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x8);
|
||
|
+ cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar16x8);
|
||
|
cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
|
||
|
cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
|
||
|
cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
|
||
|
- cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3);
|
||
|
- cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8);
|
||
|
- cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d);
|
||
|
-
|
||
|
- cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16);
|
||
|
- cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16);
|
||
|
- cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16);
|
||
|
+ cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8x3);
|
||
|
+ cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8x8);
|
||
|
+ cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8x4d);
|
||
|
+
|
||
|
+ cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16);
|
||
|
+ cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var8x16);
|
||
|
+ cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar8x16);
|
||
|
cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
|
||
|
cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
|
||
|
cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
|
||
|
- cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3);
|
||
|
- cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8);
|
||
|
- cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d);
|
||
|
-
|
||
|
- cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8);
|
||
|
- cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8);
|
||
|
- cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8);
|
||
|
+ cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16x3);
|
||
|
+ cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16x8);
|
||
|
+ cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16x4d);
|
||
|
+
|
||
|
+ cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8);
|
||
|
+ cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var8x8);
|
||
|
+ cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar8x8);
|
||
|
cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
|
||
|
cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
|
||
|
cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
|
||
|
- cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3);
|
||
|
- cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8);
|
||
|
- cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d);
|
||
|
-
|
||
|
- cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4);
|
||
|
- cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4);
|
||
|
- cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4);
|
||
|
+ cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8x3);
|
||
|
+ cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8x8);
|
||
|
+ cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8x4d);
|
||
|
+
|
||
|
+ cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4);
|
||
|
+ cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var4x4);
|
||
|
+ cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar4x4);
|
||
|
cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
|
||
|
cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
|
||
|
cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
|
||
|
- cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3);
|
||
|
- cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
|
||
|
- cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
|
||
|
+ cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4x3);
|
||
|
+ cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4x8);
|
||
|
+ cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4x4d);
|
||
|
|
||
|
#if ARCH_X86 || ARCH_X86_64
|
||
|
- cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
|
||
|
- cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
|
||
|
- cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
|
||
|
- cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
|
||
|
- cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
|
||
|
+ cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
|
||
|
+ cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
|
||
|
+ cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
|
||
|
+ cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
|
||
|
+ cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
|
||
|
#endif
|
||
|
|
||
|
cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
|
||
|
cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search);
|
||
|
cpi->refining_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, refining_search);
|
||
|
|
||
|
// make sure frame 1 is okay
|
||
|
cpi->error_bins[0] = cpi->common.MBs;
|
||
|
@@ -2410,38 +2410,38 @@ static void generate_psnr_packet(VP8_COM
|
||
|
int i;
|
||
|
unsigned int width = cpi->common.Width;
|
||
|
unsigned int height = cpi->common.Height;
|
||
|
|
||
|
pkt.kind = VPX_CODEC_PSNR_PKT;
|
||
|
sse = calc_plane_error(orig->y_buffer, orig->y_stride,
|
||
|
recon->y_buffer, recon->y_stride,
|
||
|
width, height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
pkt.data.psnr.sse[0] = sse;
|
||
|
pkt.data.psnr.sse[1] = sse;
|
||
|
pkt.data.psnr.samples[0] = width * height;
|
||
|
pkt.data.psnr.samples[1] = width * height;
|
||
|
|
||
|
width = (width + 1) / 2;
|
||
|
height = (height + 1) / 2;
|
||
|
|
||
|
sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
|
||
|
recon->u_buffer, recon->uv_stride,
|
||
|
width, height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
pkt.data.psnr.sse[0] += sse;
|
||
|
pkt.data.psnr.sse[2] = sse;
|
||
|
pkt.data.psnr.samples[0] += width * height;
|
||
|
pkt.data.psnr.samples[2] = width * height;
|
||
|
|
||
|
sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
|
||
|
recon->v_buffer, recon->uv_stride,
|
||
|
width, height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
pkt.data.psnr.sse[0] += sse;
|
||
|
pkt.data.psnr.sse[3] = sse;
|
||
|
pkt.data.psnr.samples[0] += width * height;
|
||
|
pkt.data.psnr.samples[3] = width * height;
|
||
|
|
||
|
for (i = 0; i < 4; i++)
|
||
|
pkt.data.psnr.psnr[i] = vp8_mse2psnr(pkt.data.psnr.samples[i], 255.0,
|
||
|
pkt.data.psnr.sse[i]);
|
||
|
@@ -3821,17 +3821,17 @@ static void encode_frame_to_data_rate
|
||
|
|
||
|
#if !(CONFIG_REALTIME_ONLY)
|
||
|
// Special case handling for forced key frames
|
||
|
if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
|
||
|
{
|
||
|
int last_q = Q;
|
||
|
int kf_err = vp8_calc_ss_err(cpi->Source,
|
||
|
&cm->yv12_fb[cm->new_fb_idx],
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
// The key frame is not good enough
|
||
|
if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
|
||
|
{
|
||
|
// Lower q_high
|
||
|
q_high = (Q > q_low) ? (Q - 1) : q_low;
|
||
|
|
||
|
// Adjust Q
|
||
|
@@ -4018,17 +4018,17 @@ static void encode_frame_to_data_rate
|
||
|
|
||
|
// Special case code to reduce pulsing when key frames are forced at a
|
||
|
// fixed interval. Note the reconstruction error if it is the frame before
|
||
|
// the force key frame
|
||
|
if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
|
||
|
{
|
||
|
cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
|
||
|
&cm->yv12_fb[cm->new_fb_idx],
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
}
|
||
|
|
||
|
/* This frame's MVs are saved and will be used in next frame's MV predictor.
|
||
|
* Last frame has one more line(add to bottom) and one more column(add to
|
||
|
* right) than cm->mip. The edge elements are initialized to 0.
|
||
|
*/
|
||
|
#if CONFIG_MULTI_RES_ENCODING
|
||
|
if(!cpi->oxcf.mr_encoder_id && cm->show_frame)
|
||
|
@@ -4963,25 +4963,25 @@ int vp8_get_compressed_data(VP8_COMP *cp
|
||
|
YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
|
||
|
int y_samples = orig->y_height * orig->y_width ;
|
||
|
int uv_samples = orig->uv_height * orig->uv_width ;
|
||
|
int t_samples = y_samples + 2 * uv_samples;
|
||
|
int64_t sq_error, sq_error2;
|
||
|
|
||
|
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
|
||
|
recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
|
||
|
recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
|
||
|
recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
sq_error = ye + ue + ve;
|
||
|
|
||
|
frame_psnr = vp8_mse2psnr(t_samples, 255.0, sq_error);
|
||
|
|
||
|
cpi->total_y += vp8_mse2psnr(y_samples, 255.0, ye);
|
||
|
cpi->total_u += vp8_mse2psnr(uv_samples, 255.0, ue);
|
||
|
cpi->total_v += vp8_mse2psnr(uv_samples, 255.0, ve);
|
||
|
@@ -4991,39 +4991,39 @@ int vp8_get_compressed_data(VP8_COMP *cp
|
||
|
double frame_psnr2, frame_ssim2 = 0;
|
||
|
double weight = 0;
|
||
|
|
||
|
vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
|
||
|
vp8_clear_system_state();
|
||
|
|
||
|
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
|
||
|
pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
|
||
|
pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
|
||
|
pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
sq_error2 = ye + ue + ve;
|
||
|
|
||
|
frame_psnr2 = vp8_mse2psnr(t_samples, 255.0, sq_error2);
|
||
|
|
||
|
cpi->totalp_y += vp8_mse2psnr(y_samples, 255.0, ye);
|
||
|
cpi->totalp_u += vp8_mse2psnr(uv_samples, 255.0, ue);
|
||
|
cpi->totalp_v += vp8_mse2psnr(uv_samples, 255.0, ve);
|
||
|
cpi->total_sq_error2 += sq_error2;
|
||
|
cpi->totalp += frame_psnr2;
|
||
|
|
||
|
frame_ssim2 = vp8_calc_ssim(cpi->Source,
|
||
|
&cm->post_proc_buffer, 1, &weight,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
cpi->summed_quality += frame_ssim2 * weight;
|
||
|
cpi->summed_weights += weight;
|
||
|
|
||
|
if (cpi->oxcf.number_of_layers > 1)
|
||
|
{
|
||
|
int i;
|
||
|
|
||
|
@@ -5043,17 +5043,17 @@ int vp8_get_compressed_data(VP8_COMP *cp
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
if (cpi->b_calculate_ssimg)
|
||
|
{
|
||
|
double y, u, v, frame_all;
|
||
|
frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
|
||
|
- &y, &u, &v, IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ &y, &u, &v, IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
if (cpi->oxcf.number_of_layers > 1)
|
||
|
{
|
||
|
int i;
|
||
|
|
||
|
for (i=cpi->current_layer;
|
||
|
i<cpi->oxcf.number_of_layers; i++)
|
||
|
{
|
||
|
diff --git a/media/libvpx/vp8/encoder/onyx_int.h b/media/libvpx/vp8/encoder/onyx_int.h
|
||
|
--- a/media/libvpx/vp8/encoder/onyx_int.h
|
||
|
+++ b/media/libvpx/vp8/encoder/onyx_int.h
|
||
|
@@ -13,17 +13,17 @@
|
||
|
#define __INC_VP8_INT_H
|
||
|
|
||
|
#include <stdio.h>
|
||
|
#include "vpx_config.h"
|
||
|
#include "vp8/common/onyx.h"
|
||
|
#include "treewriter.h"
|
||
|
#include "tokenize.h"
|
||
|
#include "vp8/common/onyxc_int.h"
|
||
|
-#include "variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "dct.h"
|
||
|
#include "encodemb.h"
|
||
|
#include "quantize.h"
|
||
|
#include "vp8/common/entropy.h"
|
||
|
#include "vp8/common/threading.h"
|
||
|
#include "vpx_ports/mem.h"
|
||
|
#include "vpx/internal/vpx_codec_internal.h"
|
||
|
#include "mcomp.h"
|
||
|
@@ -220,17 +220,16 @@ typedef struct
|
||
|
int ithread;
|
||
|
void *ptr1;
|
||
|
} LPFTHREAD_DATA;
|
||
|
|
||
|
|
||
|
typedef struct VP8_ENCODER_RTCD
|
||
|
{
|
||
|
VP8_COMMON_RTCD *common;
|
||
|
- vp8_variance_rtcd_vtable_t variance;
|
||
|
vp8_fdct_rtcd_vtable_t fdct;
|
||
|
vp8_encodemb_rtcd_vtable_t encodemb;
|
||
|
vp8_quantize_rtcd_vtable_t quantize;
|
||
|
vp8_search_rtcd_vtable_t search;
|
||
|
vp8_temporal_rtcd_vtable_t temporal;
|
||
|
} VP8_ENCODER_RTCD;
|
||
|
|
||
|
enum
|
||
|
diff --git a/media/libvpx/vp8/encoder/pickinter.c b/media/libvpx/vp8/encoder/pickinter.c
|
||
|
--- a/media/libvpx/vp8/encoder/pickinter.c
|
||
|
+++ b/media/libvpx/vp8/encoder/pickinter.c
|
||
|
@@ -16,17 +16,17 @@
|
||
|
#include "encodeintra.h"
|
||
|
#include "vp8/common/entropymode.h"
|
||
|
#include "pickinter.h"
|
||
|
#include "vp8/common/findnearmv.h"
|
||
|
#include "encodemb.h"
|
||
|
#include "vp8/common/reconinter.h"
|
||
|
#include "vp8/common/reconintra.h"
|
||
|
#include "vp8/common/reconintra4x4.h"
|
||
|
-#include "variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "mcomp.h"
|
||
|
#include "rdopt.h"
|
||
|
#include "vpx_mem/vpx_mem.h"
|
||
|
|
||
|
#if CONFIG_RUNTIME_CPU_DETECT
|
||
|
#define IF_RTCD(x) (x)
|
||
|
#else
|
||
|
#define IF_RTCD(x) NULL
|
||
|
@@ -90,42 +90,16 @@ static int get_inter_mbpred_error(MACROB
|
||
|
else
|
||
|
{
|
||
|
return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
|
||
|
}
|
||
|
|
||
|
}
|
||
|
|
||
|
|
||
|
-unsigned int vp8_get4x4sse_cs_c
|
||
|
-(
|
||
|
- const unsigned char *src_ptr,
|
||
|
- int source_stride,
|
||
|
- const unsigned char *ref_ptr,
|
||
|
- int recon_stride
|
||
|
-)
|
||
|
-{
|
||
|
- int distortion = 0;
|
||
|
- int r, c;
|
||
|
-
|
||
|
- for (r = 0; r < 4; r++)
|
||
|
- {
|
||
|
- for (c = 0; c < 4; c++)
|
||
|
- {
|
||
|
- int diff = src_ptr[c] - ref_ptr[c];
|
||
|
- distortion += diff * diff;
|
||
|
- }
|
||
|
-
|
||
|
- src_ptr += source_stride;
|
||
|
- ref_ptr += recon_stride;
|
||
|
- }
|
||
|
-
|
||
|
- return distortion;
|
||
|
-}
|
||
|
-
|
||
|
static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vtable_t *rtcd)
|
||
|
{
|
||
|
unsigned char *sptr;
|
||
|
unsigned char *dptr;
|
||
|
sptr = (*(be->base_src) + be->src);
|
||
|
dptr = b->predictor;
|
||
|
|
||
|
return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16);
|
||
|
@@ -153,17 +127,17 @@ static int pick_intra4x4block(
|
||
|
for (mode = B_DC_PRED; mode <= B_HE_PRED /*B_HU_PRED*/; mode++)
|
||
|
{
|
||
|
int this_rd;
|
||
|
|
||
|
rate = mode_costs[mode];
|
||
|
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
|
||
|
(*(b->base_dst) + b->dst, b->dst_stride,
|
||
|
mode, b->predictor, 16);
|
||
|
- distortion = get_prediction_error(be, b, &rtcd->variance);
|
||
|
+ distortion = get_prediction_error(be, b, &rtcd->common->variance);
|
||
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
||
|
|
||
|
if (this_rd < best_rd)
|
||
|
{
|
||
|
*bestrate = rate;
|
||
|
*bestdistortion = distortion;
|
||
|
best_rd = this_rd;
|
||
|
*best_mode = mode;
|
||
|
@@ -671,17 +645,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi,
|
||
|
if (distortion2 == INT_MAX)
|
||
|
{
|
||
|
this_rd = INT_MAX;
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
rate2 += rate;
|
||
|
distortion2 = VARIANCE_INVOKE
|
||
|
- (&cpi->rtcd.variance, var16x16)(
|
||
|
+ (&cpi->common.rtcd.variance, var16x16)(
|
||
|
*(b->base_src), b->src_stride,
|
||
|
x->e_mbd.predictor, 16, &sse);
|
||
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
||
|
|
||
|
if (this_rd < best_intra_rd)
|
||
|
{
|
||
|
best_intra_rd = this_rd;
|
||
|
*returnintra = distortion2;
|
||
|
@@ -696,17 +670,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi,
|
||
|
break;
|
||
|
|
||
|
case DC_PRED:
|
||
|
case V_PRED:
|
||
|
case H_PRED:
|
||
|
case TM_PRED:
|
||
|
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
|
||
|
(&x->e_mbd);
|
||
|
- distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
|
||
|
+ distortion2 = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)
|
||
|
(*(b->base_src), b->src_stride,
|
||
|
x->e_mbd.predictor, 16, &sse);
|
||
|
rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
|
||
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
||
|
|
||
|
if (this_rd < best_intra_rd)
|
||
|
{
|
||
|
best_intra_rd = this_rd;
|
||
|
@@ -933,17 +907,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi,
|
||
|
|
||
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
||
|
|
||
|
if (sse < x->encode_breakout)
|
||
|
{
|
||
|
// Check u and v to make sure skip is ok
|
||
|
int sse2 = 0;
|
||
|
|
||
|
- sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
if (sse2 * 2 < x->encode_breakout)
|
||
|
x->skip = 1;
|
||
|
else
|
||
|
x->skip = 0;
|
||
|
}
|
||
|
|
||
|
break;
|
||
|
@@ -1067,17 +1041,17 @@ void vp8_pick_intra_mode(VP8_COMP *cpi,
|
||
|
|
||
|
pick_intra_mbuv_mode(x);
|
||
|
|
||
|
for (mode = DC_PRED; mode <= TM_PRED; mode ++)
|
||
|
{
|
||
|
x->e_mbd.mode_info_context->mbmi.mode = mode;
|
||
|
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
|
||
|
(&x->e_mbd);
|
||
|
- distortion = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
|
||
|
+ distortion = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)
|
||
|
(*(b->base_src), b->src_stride, x->e_mbd.predictor, 16, &sse);
|
||
|
rate = x->mbmode_cost[x->e_mbd.frame_type][mode];
|
||
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
||
|
|
||
|
if (error16x16 > this_rd)
|
||
|
{
|
||
|
error16x16 = this_rd;
|
||
|
best_mode = mode;
|
||
|
diff --git a/media/libvpx/vp8/encoder/picklpf.c b/media/libvpx/vp8/encoder/picklpf.c
|
||
|
--- a/media/libvpx/vp8/encoder/picklpf.c
|
||
|
+++ b/media/libvpx/vp8/encoder/picklpf.c
|
||
|
@@ -179,30 +179,30 @@ void vp8cx_pick_filter_level_fast(YV12_B
|
||
|
|
||
|
// Get the err using the previous frame's filter value.
|
||
|
|
||
|
/* Copy the unfiltered / processed recon buffer to the new buffer */
|
||
|
vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
|
||
|
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||
|
|
||
|
best_err = calc_partial_ssl_err(sd, cm->frame_to_show,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
filt_val -= 1 + (filt_val > 10);
|
||
|
|
||
|
// Search lower filter levels
|
||
|
while (filt_val >= min_filter_level)
|
||
|
{
|
||
|
// Apply the loop filter
|
||
|
vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
|
||
|
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||
|
|
||
|
// Get the err for filtered frame
|
||
|
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
// Update the best case record or exit loop.
|
||
|
if (filt_err < best_err)
|
||
|
{
|
||
|
best_err = filt_err;
|
||
|
best_filt_val = filt_val;
|
||
|
}
|
||
|
else
|
||
|
@@ -224,17 +224,17 @@ void vp8cx_pick_filter_level_fast(YV12_B
|
||
|
{
|
||
|
// Apply the loop filter
|
||
|
vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
|
||
|
|
||
|
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||
|
|
||
|
// Get the err for filtered frame
|
||
|
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
// Update the best case record or exit loop.
|
||
|
if (filt_err < best_err)
|
||
|
{
|
||
|
// Do not raise filter level if improvement is < 1 part in 4096
|
||
|
best_err = filt_err - (filt_err >> 10);
|
||
|
|
||
|
best_filt_val = filt_val;
|
||
|
@@ -318,17 +318,17 @@ void vp8cx_pick_filter_level(YV12_BUFFER
|
||
|
|
||
|
/* Copy the unfiltered / processed recon buffer to the new buffer */
|
||
|
vp8_yv12_copy_y_ptr(saved_frame, cm->frame_to_show);
|
||
|
|
||
|
vp8cx_set_alt_lf_level(cpi, filt_mid);
|
||
|
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
|
||
|
|
||
|
best_err = vp8_calc_ss_err(sd, cm->frame_to_show,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
|
||
|
ss_err[filt_mid] = best_err;
|
||
|
|
||
|
filt_best = filt_mid;
|
||
|
|
||
|
while (filter_step > 0)
|
||
|
{
|
||
|
Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
|
||
|
@@ -345,17 +345,17 @@ void vp8cx_pick_filter_level(YV12_BUFFER
|
||
|
if(ss_err[filt_low] == 0)
|
||
|
{
|
||
|
// Get Low filter error score
|
||
|
vp8_yv12_copy_y_ptr(saved_frame, cm->frame_to_show);
|
||
|
vp8cx_set_alt_lf_level(cpi, filt_low);
|
||
|
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
|
||
|
|
||
|
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
ss_err[filt_low] = filt_err;
|
||
|
}
|
||
|
else
|
||
|
filt_err = ss_err[filt_low];
|
||
|
|
||
|
// If value is close to the best so far then bias towards a lower loop filter value.
|
||
|
if ((filt_err - Bias) < best_err)
|
||
|
{
|
||
|
@@ -372,17 +372,17 @@ void vp8cx_pick_filter_level(YV12_BUFFER
|
||
|
{
|
||
|
if(ss_err[filt_high] == 0)
|
||
|
{
|
||
|
vp8_yv12_copy_y_ptr(saved_frame, cm->frame_to_show);
|
||
|
vp8cx_set_alt_lf_level(cpi, filt_high);
|
||
|
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
|
||
|
|
||
|
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
|
||
|
- IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
ss_err[filt_high] = filt_err;
|
||
|
}
|
||
|
else
|
||
|
filt_err = ss_err[filt_high];
|
||
|
|
||
|
// Was it better than the previous best?
|
||
|
if (filt_err < (best_err - Bias))
|
||
|
{
|
||
|
diff --git a/media/libvpx/vp8/encoder/rdopt.c b/media/libvpx/vp8/encoder/rdopt.c
|
||
|
--- a/media/libvpx/vp8/encoder/rdopt.c
|
||
|
+++ b/media/libvpx/vp8/encoder/rdopt.c
|
||
|
@@ -23,17 +23,17 @@
|
||
|
#include "vp8/common/entropymode.h"
|
||
|
#include "vp8/common/reconinter.h"
|
||
|
#include "vp8/common/reconintra.h"
|
||
|
#include "vp8/common/reconintra4x4.h"
|
||
|
#include "vp8/common/findnearmv.h"
|
||
|
#include "encodemb.h"
|
||
|
#include "quantize.h"
|
||
|
#include "vp8/common/idct.h"
|
||
|
-#include "variance.h"
|
||
|
+#include "vp8/common/variance.h"
|
||
|
#include "mcomp.h"
|
||
|
#include "rdopt.h"
|
||
|
#include "vpx_mem/vpx_mem.h"
|
||
|
#include "dct.h"
|
||
|
#include "vp8/common/systemdependent.h"
|
||
|
|
||
|
#if CONFIG_RUNTIME_CPU_DETECT
|
||
|
#define IF_RTCD(x) (x)
|
||
|
@@ -2132,30 +2132,30 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cp
|
||
|
unsigned int sse;
|
||
|
unsigned int var;
|
||
|
int threshold = (xd->block[0].dequant[1]
|
||
|
* xd->block[0].dequant[1] >>4);
|
||
|
|
||
|
if(threshold < x->encode_breakout)
|
||
|
threshold = x->encode_breakout;
|
||
|
|
||
|
- var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
|
||
|
+ var = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)
|
||
|
(*(b->base_src), b->src_stride,
|
||
|
x->e_mbd.predictor, 16, &sse);
|
||
|
|
||
|
if (sse < threshold)
|
||
|
{
|
||
|
unsigned int q2dc = xd->block[24].dequant[0];
|
||
|
/* If theres is no codeable 2nd order dc
|
||
|
or a very small uniform pixel change change */
|
||
|
if ((sse - var < q2dc * q2dc >>4) ||
|
||
|
(sse /2 > var && sse-var < 64))
|
||
|
{
|
||
|
// Check u and v to make sure skip is ok
|
||
|
- int sse2= VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
|
||
|
+ int sse2= VP8_UVSSE(x, IF_RTCD(&cpi->common.rtcd.variance));
|
||
|
if (sse2 * 2 < threshold)
|
||
|
{
|
||
|
x->skip = 1;
|
||
|
distortion2 = sse + sse2;
|
||
|
rate2 = 500;
|
||
|
|
||
|
/* for best_yrd calculation */
|
||
|
rate_uv = 0;
|
||
|
diff --git a/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c b/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c
|
||
|
--- a/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c
|
||
|
+++ b/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c
|
||
|
@@ -6,17 +6,16 @@
|
||
|
* tree. An additional intellectual property rights grant can be found
|
||
|
* in the file PATENTS. All contributing project authors may
|
||
|
* be found in the AUTHORS file in the root of the source tree.
|
||
|
*/
|
||
|
|
||
|
|
||
|
#include "vpx_config.h"
|
||
|
#include "vpx_ports/x86.h"
|
||
|
-#include "vp8/encoder/variance.h"
|
||
|
#include "vp8/encoder/onyx_int.h"
|
||
|
|
||
|
|
||
|
#if HAVE_MMX
|
||
|
void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch)
|
||
|
{
|
||
|
vp8_short_fdct4x4_mmx(input, output, pitch);
|
||
|
vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
|
||
|
@@ -122,43 +121,16 @@ void vp8_arch_x86_encoder_init(VP8_COMP
|
||
|
* you modify any of the function mappings present in this file, be sure
|
||
|
* to also update them in static mapings (<arch>/filename_<arch>.h)
|
||
|
*/
|
||
|
|
||
|
/* Override default functions with fastest ones for this CPU. */
|
||
|
#if HAVE_MMX
|
||
|
if (flags & HAS_MMX)
|
||
|
{
|
||
|
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
|
||
|
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
|
||
|
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
|
||
|
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
|
||
|
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
|
||
|
-
|
||
|
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
|
||
|
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
|
||
|
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
|
||
|
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
|
||
|
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx;
|
||
|
-
|
||
|
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
|
||
|
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
|
||
|
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
|
||
|
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
|
||
|
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
|
||
|
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
|
||
|
-
|
||
|
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
|
||
|
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
|
||
|
-
|
||
|
- cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_mmx;
|
||
|
-
|
||
|
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
|
||
|
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
|
||
|
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
|
||
|
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
|
||
|
|
||
|
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
|
||
|
|
||
|
cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
|
||
|
@@ -170,44 +142,16 @@ void vp8_arch_x86_encoder_init(VP8_COMP
|
||
|
|
||
|
/*cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_mmx;*/
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#if HAVE_SSE2
|
||
|
if (flags & HAS_SSE2)
|
||
|
{
|
||
|
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_wmt;
|
||
|
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_wmt;
|
||
|
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_wmt;
|
||
|
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_wmt;
|
||
|
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_wmt;
|
||
|
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse2;
|
||
|
-
|
||
|
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_wmt;
|
||
|
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_wmt;
|
||
|
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_wmt;
|
||
|
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_wmt;
|
||
|
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_wmt;
|
||
|
-
|
||
|
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
|
||
|
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
|
||
|
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
|
||
|
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
|
||
|
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
|
||
|
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
|
||
|
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
|
||
|
-
|
||
|
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
|
||
|
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
|
||
|
-
|
||
|
- /* cpi->rtcd.variance.get4x4sse_cs not implemented for wmt */;
|
||
|
-
|
||
|
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_sse2;
|
||
|
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_sse2;
|
||
|
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_sse2;
|
||
|
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_sse2;
|
||
|
|
||
|
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2 ;
|
||
|
|
||
|
cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
|
||
|
@@ -219,69 +163,40 @@ void vp8_arch_x86_encoder_init(VP8_COMP
|
||
|
|
||
|
cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse2;
|
||
|
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_sse2;
|
||
|
|
||
|
#if !(CONFIG_REALTIME_ONLY)
|
||
|
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
|
||
|
#endif
|
||
|
|
||
|
-#if CONFIG_INTERNAL_STATS
|
||
|
-#if ARCH_X86_64
|
||
|
- cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse2;
|
||
|
- cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_sse2;
|
||
|
-#endif
|
||
|
-#endif
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#if HAVE_SSE3
|
||
|
if (flags & HAS_SSE3)
|
||
|
{
|
||
|
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_sse3;
|
||
|
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_sse3;
|
||
|
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_sse3;
|
||
|
- cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_sse3;
|
||
|
- cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_sse3;
|
||
|
- cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_sse3;
|
||
|
cpi->rtcd.search.full_search = vp8_full_search_sadx3;
|
||
|
- cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
|
||
|
- cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
|
||
|
- cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
|
||
|
- cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
|
||
|
- cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
|
||
|
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse3;
|
||
|
cpi->rtcd.search.diamond_search = vp8_diamond_search_sadx4;
|
||
|
cpi->rtcd.search.refining_search = vp8_refining_search_sadx4;
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#if HAVE_SSSE3
|
||
|
if (flags & HAS_SSSE3)
|
||
|
{
|
||
|
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
|
||
|
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
|
||
|
-
|
||
|
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
|
||
|
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
|
||
|
-
|
||
|
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_ssse3;
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
|
||
|
|
||
|
#if HAVE_SSE4_1
|
||
|
if (flags & HAS_SSE4_1)
|
||
|
{
|
||
|
- cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_sse4;
|
||
|
- cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_sse4;
|
||
|
- cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_sse4;
|
||
|
- cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_sse4;
|
||
|
- cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
|
||
|
cpi->rtcd.search.full_search = vp8_full_search_sadx8;
|
||
|
|
||
|
cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse4;
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#endif
|
||
|
}
|