Use CONFIG_FAST_UNALIGNED consistently in codec
CONFIG_FAST_UNALIGNED is enabled by default. Disable it if it is not supported by hardware. Change-Id: I7d6905ed79fed918bca074bd62820b0c929d81ab
This commit is contained in:
Родитель
773bcc300d
Коммит
65dfcf4696
|
@ -629,7 +629,7 @@ process_common_toolchain() {
|
|||
case ${toolchain} in
|
||||
sparc-solaris-*)
|
||||
add_extralibs -lposix4
|
||||
add_cflags "-DMUST_BE_ALIGNED"
|
||||
disable fast_unaligned
|
||||
;;
|
||||
*-solaris-*)
|
||||
add_extralibs -lposix4
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
|
||||
#include "vpx_ports/config.h"
|
||||
#include "vpx/vpx_integer.h"
|
||||
#include "recon.h"
|
||||
#include "subpixel.h"
|
||||
#include "blockd.h"
|
||||
|
@ -18,12 +19,6 @@
|
|||
#include "onyxc_int.h"
|
||||
#endif
|
||||
|
||||
/* use this define on systems where unaligned int reads and writes are
|
||||
* not allowed, i.e. ARM architectures
|
||||
*/
|
||||
/*#define MUST_BE_ALIGNED*/
|
||||
|
||||
|
||||
static const int bbb[4] = {0, 2, 8, 10};
|
||||
|
||||
|
||||
|
@ -39,7 +34,7 @@ void vp8_copy_mem16x16_c(
|
|||
|
||||
for (r = 0; r < 16; r++)
|
||||
{
|
||||
#ifdef MUST_BE_ALIGNED
|
||||
#if !(CONFIG_FAST_UNALIGNED)
|
||||
dst[0] = src[0];
|
||||
dst[1] = src[1];
|
||||
dst[2] = src[2];
|
||||
|
@ -58,10 +53,10 @@ void vp8_copy_mem16x16_c(
|
|||
dst[15] = src[15];
|
||||
|
||||
#else
|
||||
((int *)dst)[0] = ((int *)src)[0] ;
|
||||
((int *)dst)[1] = ((int *)src)[1] ;
|
||||
((int *)dst)[2] = ((int *)src)[2] ;
|
||||
((int *)dst)[3] = ((int *)src)[3] ;
|
||||
((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
|
||||
((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
|
||||
((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ;
|
||||
((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ;
|
||||
|
||||
#endif
|
||||
src += src_stride;
|
||||
|
@ -81,7 +76,7 @@ void vp8_copy_mem8x8_c(
|
|||
|
||||
for (r = 0; r < 8; r++)
|
||||
{
|
||||
#ifdef MUST_BE_ALIGNED
|
||||
#if !(CONFIG_FAST_UNALIGNED)
|
||||
dst[0] = src[0];
|
||||
dst[1] = src[1];
|
||||
dst[2] = src[2];
|
||||
|
@ -91,8 +86,8 @@ void vp8_copy_mem8x8_c(
|
|||
dst[6] = src[6];
|
||||
dst[7] = src[7];
|
||||
#else
|
||||
((int *)dst)[0] = ((int *)src)[0] ;
|
||||
((int *)dst)[1] = ((int *)src)[1] ;
|
||||
((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
|
||||
((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
|
||||
#endif
|
||||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
|
@ -111,7 +106,7 @@ void vp8_copy_mem8x4_c(
|
|||
|
||||
for (r = 0; r < 4; r++)
|
||||
{
|
||||
#ifdef MUST_BE_ALIGNED
|
||||
#if !(CONFIG_FAST_UNALIGNED)
|
||||
dst[0] = src[0];
|
||||
dst[1] = src[1];
|
||||
dst[2] = src[2];
|
||||
|
@ -121,8 +116,8 @@ void vp8_copy_mem8x4_c(
|
|||
dst[6] = src[6];
|
||||
dst[7] = src[7];
|
||||
#else
|
||||
((int *)dst)[0] = ((int *)src)[0] ;
|
||||
((int *)dst)[1] = ((int *)src)[1] ;
|
||||
((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
|
||||
((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
|
||||
#endif
|
||||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
|
@ -154,13 +149,13 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf)
|
|||
|
||||
for (r = 0; r < 4; r++)
|
||||
{
|
||||
#ifdef MUST_BE_ALIGNED
|
||||
#if !(CONFIG_FAST_UNALIGNED)
|
||||
pred_ptr[0] = ptr[0];
|
||||
pred_ptr[1] = ptr[1];
|
||||
pred_ptr[2] = ptr[2];
|
||||
pred_ptr[3] = ptr[3];
|
||||
#else
|
||||
*(int *)pred_ptr = *(int *)ptr ;
|
||||
*(uint32_t *)pred_ptr = *(uint32_t *)ptr ;
|
||||
#endif
|
||||
pred_ptr += pitch;
|
||||
ptr += d->pre_stride;
|
||||
|
|
Загрузка…
Ссылка в новой задаче