VP8 optimizations for MIPS dspr2
Signed-off-by: Raghu Gandham <raghu@mips.com> Change-Id: I3a8bca425cd3dab746a6328c8fc8843c8e87aea6
This commit is contained in:
Родитель
bb3d510a18
Коммит
07ff7fa811
|
@ -943,13 +943,16 @@ process_common_toolchain() {
|
|||
esac
|
||||
;;
|
||||
mips*)
|
||||
CROSS=${CROSS:-mipsel-linux-uclibc-}
|
||||
link_with_cc=gcc
|
||||
setup_gnu_toolchain
|
||||
tune_cflags="-mtune="
|
||||
if enabled dspr2; then
|
||||
check_add_cflags -mips32r2 -mdspr2
|
||||
disable fast_unaligned
|
||||
fi
|
||||
check_add_cflags -march=${tgt_isa}
|
||||
check_add_asflags -march=${tgt_isa}
|
||||
check_add_asflags -KPIC
|
||||
check_add_asflags -march=${tgt_isa}
|
||||
check_add_asflags -KPIC
|
||||
;;
|
||||
ppc*)
|
||||
enable ppc
|
||||
|
|
|
@ -278,6 +278,29 @@ EOF
|
|||
}
|
||||
|
||||
|
||||
mips() {
|
||||
determine_indirection c $ALL_ARCHS
|
||||
cat <<EOF
|
||||
$(common_top)
|
||||
#include "vpx_config.h"
|
||||
|
||||
void ${symbol:-rtcd}(void);
|
||||
|
||||
#ifdef RTCD_C
|
||||
void ${symbol:-rtcd}(void)
|
||||
{
|
||||
$(set_function_pointers c)
|
||||
#if HAVE_DSPR2
|
||||
void dsputil_static_init();
|
||||
dsputil_static_init();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
$(common_bottom)
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
unoptimized() {
|
||||
determine_indirection c
|
||||
cat <<EOF
|
||||
|
@ -309,6 +332,15 @@ case $arch in
|
|||
require $(filter $REQUIRES)
|
||||
x86
|
||||
;;
|
||||
mips32)
|
||||
ALL_ARCHS=$(filter mips32)
|
||||
dspr2=$([ -f "$config_file" ] && eval echo $(grep HAVE_DSPR2 "$config_file"))
|
||||
HAVE_DSPR2="${dspr2#*=}"
|
||||
if [ "$HAVE_DSPR2" = "yes" ]; then
|
||||
ALL_ARCHS=$(filter mips32 dspr2)
|
||||
fi
|
||||
mips
|
||||
;;
|
||||
armv5te)
|
||||
ALL_ARCHS=$(filter edsp)
|
||||
arm
|
||||
|
|
|
@ -209,6 +209,7 @@ ARCH_EXT_LIST="
|
|||
neon
|
||||
|
||||
mips32
|
||||
dspr2
|
||||
|
||||
mmx
|
||||
sse
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
|
||||
#include "vpx_config.h"
|
||||
#include "vpx_rtcd.h"
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
|
||||
#if HAVE_DSPR2
|
||||
void vp8_dequant_idct_add_dspr2(short *input, short *dq,
|
||||
unsigned char *dest, int stride)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
{
|
||||
input[i] = dq[i] * input[i];
|
||||
}
|
||||
|
||||
vp8_short_idct4x4llm_dspr2(input, dest, stride, dest, stride);
|
||||
|
||||
vpx_memset(input, 0, 32);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "vpx_config.h"
|
||||
#include "vpx_rtcd.h"
|
||||
|
||||
#if HAVE_DSPR2
|
||||
|
||||
void vp8_dequant_idct_add_y_block_dspr2
|
||||
(short *q, short *dq,
|
||||
unsigned char *dst, int stride, char *eobs)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
{
|
||||
for (j = 0; j < 4; j++)
|
||||
{
|
||||
if (*eobs++ > 1)
|
||||
vp8_dequant_idct_add_dspr2(q, dq, dst, stride);
|
||||
else
|
||||
{
|
||||
vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dst, stride, dst, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
q += 16;
|
||||
dst += 4;
|
||||
}
|
||||
|
||||
dst += 4 * stride - 16;
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_dequant_idct_add_uv_block_dspr2
|
||||
(short *q, short *dq,
|
||||
unsigned char *dstu, unsigned char *dstv, int stride, char *eobs)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
{
|
||||
for (j = 0; j < 2; j++)
|
||||
{
|
||||
if (*eobs++ > 1)
|
||||
vp8_dequant_idct_add_dspr2(q, dq, dstu, stride);
|
||||
else
|
||||
{
|
||||
vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dstu, stride, dstu, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
q += 16;
|
||||
dstu += 4;
|
||||
}
|
||||
|
||||
dstu += 4 * stride - 8;
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
{
|
||||
for (j = 0; j < 2; j++)
|
||||
{
|
||||
if (*eobs++ > 1)
|
||||
vp8_dequant_idct_add_dspr2(q, dq, dstv, stride);
|
||||
else
|
||||
{
|
||||
vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dstv, stride, dstv, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
q += 16;
|
||||
dstv += 4;
|
||||
}
|
||||
|
||||
dstv += 4 * stride - 8;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,369 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "vpx_rtcd.h"
|
||||
|
||||
#if HAVE_DSPR2
|
||||
#define CROP_WIDTH 256
|
||||
|
||||
/******************************************************************************
|
||||
* Notes:
|
||||
*
|
||||
* This implementation makes use of 16 bit fixed point version of two multiply
|
||||
* constants:
|
||||
* 1. sqrt(2) * cos (pi/8)
|
||||
* 2. sqrt(2) * sin (pi/8)
|
||||
* Since the first constant is bigger than 1, to maintain the same 16 bit
|
||||
* fixed point precision as the second one, we use a trick of
|
||||
* x * a = x + x*(a-1)
|
||||
* so
|
||||
* x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1).
|
||||
****************************************************************************/
|
||||
extern unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH];
|
||||
static const int cospi8sqrt2minus1 = 20091;
|
||||
static const int sinpi8sqrt2 = 35468;
|
||||
|
||||
inline void prefetch_load_short(short *src)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"pref 0, 0(%[src]) \n\t"
|
||||
:
|
||||
: [src] "r" (src)
|
||||
);
|
||||
}
|
||||
|
||||
void vp8_short_idct4x4llm_dspr2(short *input, unsigned char *pred_ptr,
|
||||
int pred_stride, unsigned char *dst_ptr,
|
||||
int dst_stride)
|
||||
{
|
||||
int r, c;
|
||||
int a1, b1, c1, d1;
|
||||
short output[16];
|
||||
short *ip = input;
|
||||
short *op = output;
|
||||
int temp1, temp2;
|
||||
int shortpitch = 4;
|
||||
|
||||
int c2, d2;
|
||||
int temp3, temp4;
|
||||
unsigned char *cm = ff_cropTbl + CROP_WIDTH;
|
||||
|
||||
/* prepare data for load */
|
||||
prefetch_load_short(ip + 8);
|
||||
|
||||
/* first loop is unrolled */
|
||||
a1 = ip[0] + ip[8];
|
||||
b1 = ip[0] - ip[8];
|
||||
|
||||
temp1 = (ip[4] * sinpi8sqrt2) >> 16;
|
||||
temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
|
||||
c1 = temp1 - temp2;
|
||||
|
||||
temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
|
||||
temp2 = (ip[12] * sinpi8sqrt2) >> 16;
|
||||
d1 = temp1 + temp2;
|
||||
|
||||
temp3 = (ip[5] * sinpi8sqrt2) >> 16;
|
||||
temp4 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
|
||||
c2 = temp3 - temp4;
|
||||
|
||||
temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
|
||||
temp4 = (ip[13] * sinpi8sqrt2) >> 16;
|
||||
d2 = temp3 + temp4;
|
||||
|
||||
op[0] = a1 + d1;
|
||||
op[12] = a1 - d1;
|
||||
op[4] = b1 + c1;
|
||||
op[8] = b1 - c1;
|
||||
|
||||
a1 = ip[1] + ip[9];
|
||||
b1 = ip[1] - ip[9];
|
||||
|
||||
op[1] = a1 + d2;
|
||||
op[13] = a1 - d2;
|
||||
op[5] = b1 + c2;
|
||||
op[9] = b1 - c2;
|
||||
|
||||
a1 = ip[2] + ip[10];
|
||||
b1 = ip[2] - ip[10];
|
||||
|
||||
temp1 = (ip[6] * sinpi8sqrt2) >> 16;
|
||||
temp2 = ip[14] + ((ip[14] * cospi8sqrt2minus1) >> 16);
|
||||
c1 = temp1 - temp2;
|
||||
|
||||
temp1 = ip[6] + ((ip[6] * cospi8sqrt2minus1) >> 16);
|
||||
temp2 = (ip[14] * sinpi8sqrt2) >> 16;
|
||||
d1 = temp1 + temp2;
|
||||
|
||||
temp3 = (ip[7] * sinpi8sqrt2) >> 16;
|
||||
temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
|
||||
c2 = temp3 - temp4;
|
||||
|
||||
temp3 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
|
||||
temp4 = (ip[15] * sinpi8sqrt2) >> 16;
|
||||
d2 = temp3 + temp4;
|
||||
|
||||
op[2] = a1 + d1;
|
||||
op[14] = a1 - d1;
|
||||
op[6] = b1 + c1;
|
||||
op[10] = b1 - c1;
|
||||
|
||||
a1 = ip[3] + ip[11];
|
||||
b1 = ip[3] - ip[11];
|
||||
|
||||
op[3] = a1 + d2;
|
||||
op[15] = a1 - d2;
|
||||
op[7] = b1 + c2;
|
||||
op[11] = b1 - c2;
|
||||
|
||||
ip = output;
|
||||
|
||||
/* prepare data for load */
|
||||
prefetch_load_short(ip + shortpitch);
|
||||
|
||||
/* second loop is unrolled */
|
||||
a1 = ip[0] + ip[2];
|
||||
b1 = ip[0] - ip[2];
|
||||
|
||||
temp1 = (ip[1] * sinpi8sqrt2) >> 16;
|
||||
temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
|
||||
c1 = temp1 - temp2;
|
||||
|
||||
temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
|
||||
temp2 = (ip[3] * sinpi8sqrt2) >> 16;
|
||||
d1 = temp1 + temp2;
|
||||
|
||||
temp3 = (ip[5] * sinpi8sqrt2) >> 16;
|
||||
temp4 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
|
||||
c2 = temp3 - temp4;
|
||||
|
||||
temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
|
||||
temp4 = (ip[7] * sinpi8sqrt2) >> 16;
|
||||
d2 = temp3 + temp4;
|
||||
|
||||
op[0] = (a1 + d1 + 4) >> 3;
|
||||
op[3] = (a1 - d1 + 4) >> 3;
|
||||
op[1] = (b1 + c1 + 4) >> 3;
|
||||
op[2] = (b1 - c1 + 4) >> 3;
|
||||
|
||||
a1 = ip[4] + ip[6];
|
||||
b1 = ip[4] - ip[6];
|
||||
|
||||
op[4] = (a1 + d2 + 4) >> 3;
|
||||
op[7] = (a1 - d2 + 4) >> 3;
|
||||
op[5] = (b1 + c2 + 4) >> 3;
|
||||
op[6] = (b1 - c2 + 4) >> 3;
|
||||
|
||||
a1 = ip[8] + ip[10];
|
||||
b1 = ip[8] - ip[10];
|
||||
|
||||
temp1 = (ip[9] * sinpi8sqrt2) >> 16;
|
||||
temp2 = ip[11] + ((ip[11] * cospi8sqrt2minus1) >> 16);
|
||||
c1 = temp1 - temp2;
|
||||
|
||||
temp1 = ip[9] + ((ip[9] * cospi8sqrt2minus1) >> 16);
|
||||
temp2 = (ip[11] * sinpi8sqrt2) >> 16;
|
||||
d1 = temp1 + temp2;
|
||||
|
||||
temp3 = (ip[13] * sinpi8sqrt2) >> 16;
|
||||
temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
|
||||
c2 = temp3 - temp4;
|
||||
|
||||
temp3 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
|
||||
temp4 = (ip[15] * sinpi8sqrt2) >> 16;
|
||||
d2 = temp3 + temp4;
|
||||
|
||||
op[8] = (a1 + d1 + 4) >> 3;
|
||||
op[11] = (a1 - d1 + 4) >> 3;
|
||||
op[9] = (b1 + c1 + 4) >> 3;
|
||||
op[10] = (b1 - c1 + 4) >> 3;
|
||||
|
||||
a1 = ip[12] + ip[14];
|
||||
b1 = ip[12] - ip[14];
|
||||
|
||||
op[12] = (a1 + d2 + 4) >> 3;
|
||||
op[15] = (a1 - d2 + 4) >> 3;
|
||||
op[13] = (b1 + c2 + 4) >> 3;
|
||||
op[14] = (b1 - c2 + 4) >> 3;
|
||||
|
||||
ip = output;
|
||||
|
||||
for (r = 0; r < 4; r++)
|
||||
{
|
||||
for (c = 0; c < 4; c++)
|
||||
{
|
||||
short a = ip[c] + pred_ptr[c] ;
|
||||
dst_ptr[c] = cm[a] ;
|
||||
}
|
||||
|
||||
ip += 4;
|
||||
dst_ptr += dst_stride;
|
||||
pred_ptr += pred_stride;
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_dc_only_idct_add_dspr2(short input_dc, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride)
|
||||
{
|
||||
int a1;
|
||||
int i, absa1;
|
||||
int t2, vector_a1, vector_a;
|
||||
|
||||
/* a1 = ((input_dc + 4) >> 3); */
|
||||
__asm__ __volatile__ (
|
||||
"addi %[a1], %[input_dc], 4 \n\t"
|
||||
"sra %[a1], %[a1], 3 \n\t"
|
||||
: [a1] "=r" (a1)
|
||||
: [input_dc] "r" (input_dc)
|
||||
);
|
||||
|
||||
if (a1 < 0)
|
||||
{
|
||||
/* use quad-byte
|
||||
* input and output memory are four byte aligned
|
||||
*/
|
||||
__asm__ __volatile__ (
|
||||
"abs %[absa1], %[a1] \n\t"
|
||||
"replv.qb %[vector_a1], %[absa1] \n\t"
|
||||
: [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
|
||||
: [a1] "r" (a1)
|
||||
);
|
||||
|
||||
/* use (a1 - predptr[c]) instead a1 + predptr[c] */
|
||||
for (i = 4; i--;)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lw %[t2], 0(%[pred_ptr]) \n\t"
|
||||
"add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t"
|
||||
"subu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t"
|
||||
"sw %[vector_a], 0(%[dst_ptr]) \n\t"
|
||||
"add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
|
||||
: [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
|
||||
[dst_ptr] "+&r" (dst_ptr), [pred_ptr] "+&r" (pred_ptr)
|
||||
: [dst_stride] "r" (dst_stride), [pred_stride] "r" (pred_stride), [vector_a1] "r" (vector_a1)
|
||||
);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* use quad-byte
|
||||
* input and output memory are four byte aligned
|
||||
*/
|
||||
__asm__ __volatile__ (
|
||||
"replv.qb %[vector_a1], %[a1] \n\t"
|
||||
: [vector_a1] "=r" (vector_a1)
|
||||
: [a1] "r" (a1)
|
||||
);
|
||||
|
||||
for (i = 4; i--;)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lw %[t2], 0(%[pred_ptr]) \n\t"
|
||||
"add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t"
|
||||
"addu_s.qb %[vector_a], %[vector_a1], %[t2] \n\t"
|
||||
"sw %[vector_a], 0(%[dst_ptr]) \n\t"
|
||||
"add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
|
||||
: [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
|
||||
[dst_ptr] "+&r" (dst_ptr), [pred_ptr] "+&r" (pred_ptr)
|
||||
: [dst_stride] "r" (dst_stride), [pred_stride] "r" (pred_stride), [vector_a1] "r" (vector_a1)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void vp8_short_inv_walsh4x4_dspr2(short *input, short *mb_dqcoeff)
|
||||
{
|
||||
short output[16];
|
||||
int i;
|
||||
int a1, b1, c1, d1;
|
||||
int a2, b2, c2, d2;
|
||||
short *ip = input;
|
||||
short *op = output;
|
||||
|
||||
prefetch_load_short(ip);
|
||||
|
||||
for (i = 4; i--;)
|
||||
{
|
||||
a1 = ip[0] + ip[12];
|
||||
b1 = ip[4] + ip[8];
|
||||
c1 = ip[4] - ip[8];
|
||||
d1 = ip[0] - ip[12];
|
||||
|
||||
op[0] = a1 + b1;
|
||||
op[4] = c1 + d1;
|
||||
op[8] = a1 - b1;
|
||||
op[12] = d1 - c1;
|
||||
|
||||
ip++;
|
||||
op++;
|
||||
}
|
||||
|
||||
ip = output;
|
||||
op = output;
|
||||
|
||||
prefetch_load_short(ip);
|
||||
|
||||
for (i = 4; i--;)
|
||||
{
|
||||
a1 = ip[0] + ip[3] + 3;
|
||||
b1 = ip[1] + ip[2];
|
||||
c1 = ip[1] - ip[2];
|
||||
d1 = ip[0] - ip[3] + 3;
|
||||
|
||||
a2 = a1 + b1;
|
||||
b2 = d1 + c1;
|
||||
c2 = a1 - b1;
|
||||
d2 = d1 - c1;
|
||||
|
||||
op[0] = a2 >> 3;
|
||||
op[1] = b2 >> 3;
|
||||
op[2] = c2 >> 3;
|
||||
op[3] = d2 >> 3;
|
||||
|
||||
ip += 4;
|
||||
op += 4;
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
{
|
||||
mb_dqcoeff[i * 16] = output[i];
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_short_inv_walsh4x4_1_dspr2(short *input, short *mb_dqcoeff)
|
||||
{
|
||||
int a1;
|
||||
|
||||
a1 = ((input[0] + 3) >> 3);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"sh %[a1], 0(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 32(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 64(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 96(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 128(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 160(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 192(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 224(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 256(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 288(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 320(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 352(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 384(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 416(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 448(%[mb_dqcoeff]) \n\t"
|
||||
"sh %[a1], 480(%[mb_dqcoeff]) \n\t"
|
||||
|
||||
:
|
||||
: [a1] "r" (a1), [mb_dqcoeff] "r" (mb_dqcoeff)
|
||||
);
|
||||
}
|
||||
|
||||
#endif
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
|
||||
#include "vpx_config.h"
|
||||
#include "vpx_rtcd.h"
|
||||
#include "vpx/vpx_integer.h"
|
||||
|
||||
#if HAVE_DSPR2
|
||||
inline void prefetch_load_int(unsigned char *src)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"pref 0, 0(%[src]) \n\t"
|
||||
:
|
||||
: [src] "r" (src)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
__inline void vp8_copy_mem16x16_dspr2(
|
||||
unsigned char *RESTRICT src,
|
||||
int src_stride,
|
||||
unsigned char *RESTRICT dst,
|
||||
int dst_stride)
|
||||
{
|
||||
int r;
|
||||
unsigned int a0, a1, a2, a3;
|
||||
|
||||
for (r = 16; r--;)
|
||||
{
|
||||
/* load src data in cache memory */
|
||||
prefetch_load_int(src + src_stride);
|
||||
|
||||
/* use unaligned memory load and store */
|
||||
__asm__ __volatile__ (
|
||||
"ulw %[a0], 0(%[src]) \n\t"
|
||||
"ulw %[a1], 4(%[src]) \n\t"
|
||||
"ulw %[a2], 8(%[src]) \n\t"
|
||||
"ulw %[a3], 12(%[src]) \n\t"
|
||||
"sw %[a0], 0(%[dst]) \n\t"
|
||||
"sw %[a1], 4(%[dst]) \n\t"
|
||||
"sw %[a2], 8(%[dst]) \n\t"
|
||||
"sw %[a3], 12(%[dst]) \n\t"
|
||||
: [a0] "=&r" (a0), [a1] "=&r" (a1),
|
||||
[a2] "=&r" (a2), [a3] "=&r" (a3)
|
||||
: [src] "r" (src), [dst] "r" (dst)
|
||||
);
|
||||
|
||||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__inline void vp8_copy_mem8x8_dspr2(
|
||||
unsigned char *RESTRICT src,
|
||||
int src_stride,
|
||||
unsigned char *RESTRICT dst,
|
||||
int dst_stride)
|
||||
{
|
||||
int r;
|
||||
unsigned int a0, a1;
|
||||
|
||||
/* load src data in cache memory */
|
||||
prefetch_load_int(src + src_stride);
|
||||
|
||||
for (r = 8; r--;)
|
||||
{
|
||||
/* use unaligned memory load and store */
|
||||
__asm__ __volatile__ (
|
||||
"ulw %[a0], 0(%[src]) \n\t"
|
||||
"ulw %[a1], 4(%[src]) \n\t"
|
||||
"sw %[a0], 0(%[dst]) \n\t"
|
||||
"sw %[a1], 4(%[dst]) \n\t"
|
||||
: [a0] "=&r" (a0), [a1] "=&r" (a1)
|
||||
: [src] "r" (src), [dst] "r" (dst)
|
||||
);
|
||||
|
||||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__inline void vp8_copy_mem8x4_dspr2(
|
||||
unsigned char *RESTRICT src,
|
||||
int src_stride,
|
||||
unsigned char *RESTRICT dst,
|
||||
int dst_stride)
|
||||
{
|
||||
int r;
|
||||
unsigned int a0, a1;
|
||||
|
||||
/* load src data in cache memory */
|
||||
prefetch_load_int(src + src_stride);
|
||||
|
||||
for (r = 4; r--;)
|
||||
{
|
||||
/* use unaligned memory load and store */
|
||||
__asm__ __volatile__ (
|
||||
"ulw %[a0], 0(%[src]) \n\t"
|
||||
"ulw %[a1], 4(%[src]) \n\t"
|
||||
"sw %[a0], 0(%[dst]) \n\t"
|
||||
"sw %[a1], 4(%[dst]) \n\t"
|
||||
: [a0] "=&r" (a0), [a1] "=&r" (a1)
|
||||
: [src] "r" (src), [dst] "r" (dst)
|
||||
);
|
||||
|
||||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -22,35 +22,42 @@ specialize vp8_dequantize_b mmx media neon
|
|||
vp8_dequantize_b_media=vp8_dequantize_b_v6
|
||||
|
||||
prototype void vp8_dequant_idct_add "short *input, short *dq, unsigned char *output, int stride"
|
||||
specialize vp8_dequant_idct_add mmx media neon
|
||||
specialize vp8_dequant_idct_add mmx media neon dspr2
|
||||
vp8_dequant_idct_add_media=vp8_dequant_idct_add_v6
|
||||
vp8_dequant_idct_add_dspr2=vp8_dequant_idct_add_dspr2
|
||||
|
||||
prototype void vp8_dequant_idct_add_y_block "short *q, short *dq, unsigned char *dst, int stride, char *eobs"
|
||||
specialize vp8_dequant_idct_add_y_block mmx sse2 media neon
|
||||
specialize vp8_dequant_idct_add_y_block mmx sse2 media neon dspr2
|
||||
vp8_dequant_idct_add_y_block_media=vp8_dequant_idct_add_y_block_v6
|
||||
vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2
|
||||
|
||||
prototype void vp8_dequant_idct_add_uv_block "short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs"
|
||||
specialize vp8_dequant_idct_add_uv_block mmx sse2 media neon
|
||||
specialize vp8_dequant_idct_add_uv_block mmx sse2 media neon dspr2
|
||||
vp8_dequant_idct_add_uv_block_media=vp8_dequant_idct_add_uv_block_v6
|
||||
vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2
|
||||
|
||||
#
|
||||
# Loopfilter
|
||||
#
|
||||
prototype void vp8_loop_filter_mbv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_mbv mmx sse2 media neon
|
||||
specialize vp8_loop_filter_mbv mmx sse2 media neon dspr2
|
||||
vp8_loop_filter_mbv_media=vp8_loop_filter_mbv_armv6
|
||||
vp8_loop_filter_mbv_dspr2=vp8_loop_filter_mbv_dspr2
|
||||
|
||||
prototype void vp8_loop_filter_bv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_bv mmx sse2 media neon
|
||||
specialize vp8_loop_filter_bv mmx sse2 media neon dspr2
|
||||
vp8_loop_filter_bv_media=vp8_loop_filter_bv_armv6
|
||||
vp8_loop_filter_bv_dspr2=vp8_loop_filter_bv_dspr2
|
||||
|
||||
prototype void vp8_loop_filter_mbh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_mbh mmx sse2 media neon
|
||||
specialize vp8_loop_filter_mbh mmx sse2 media neon dspr2
|
||||
vp8_loop_filter_mbh_media=vp8_loop_filter_mbh_armv6
|
||||
vp8_loop_filter_mbh_dspr2=vp8_loop_filter_mbh_dspr2
|
||||
|
||||
prototype void vp8_loop_filter_bh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_bh mmx sse2 media neon
|
||||
specialize vp8_loop_filter_bh mmx sse2 media neon dspr2
|
||||
vp8_loop_filter_bh_media=vp8_loop_filter_bh_armv6
|
||||
vp8_loop_filter_bh_dspr2=vp8_loop_filter_bh_dspr2
|
||||
|
||||
|
||||
prototype void vp8_loop_filter_simple_mbv "unsigned char *y, int ystride, const unsigned char *blimit"
|
||||
|
@ -90,37 +97,45 @@ vp8_loop_filter_simple_bh_neon=vp8_loop_filter_bhs_neon
|
|||
#
|
||||
#idct16
|
||||
prototype void vp8_short_idct4x4llm "short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride"
|
||||
specialize vp8_short_idct4x4llm mmx media neon
|
||||
specialize vp8_short_idct4x4llm mmx media neon dspr2
|
||||
vp8_short_idct4x4llm_media=vp8_short_idct4x4llm_v6_dual
|
||||
vp8_short_idct4x4llm_dspr2=vp8_short_idct4x4llm_dspr2
|
||||
|
||||
#iwalsh1
|
||||
prototype void vp8_short_inv_walsh4x4_1 "short *input, short *output"
|
||||
specialize vp8_short_inv_walsh4x4_1 dspr2
|
||||
vp8_short_inv_walsh4x4_1_dspr2=vp8_short_inv_walsh4x4_1_dspr2
|
||||
# no asm yet
|
||||
|
||||
#iwalsh16
|
||||
prototype void vp8_short_inv_walsh4x4 "short *input, short *output"
|
||||
specialize vp8_short_inv_walsh4x4 mmx sse2 media neon
|
||||
specialize vp8_short_inv_walsh4x4 mmx sse2 media neon dspr2
|
||||
vp8_short_inv_walsh4x4_media=vp8_short_inv_walsh4x4_v6
|
||||
vp8_short_inv_walsh4x4_dspr2=vp8_short_inv_walsh4x4_dspr2
|
||||
|
||||
#idct1_scalar_add
|
||||
prototype void vp8_dc_only_idct_add "short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride"
|
||||
specialize vp8_dc_only_idct_add mmx media neon
|
||||
specialize vp8_dc_only_idct_add mmx media neon dspr2
|
||||
vp8_dc_only_idct_add_media=vp8_dc_only_idct_add_v6
|
||||
vp8_dc_only_idct_add_dspr2=vp8_dc_only_idct_add_dspr2
|
||||
|
||||
#
|
||||
# RECON
|
||||
#
|
||||
prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_copy_mem16x16 mmx sse2 media neon
|
||||
specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
|
||||
vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
|
||||
vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
|
||||
|
||||
prototype void vp8_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_copy_mem8x8 mmx media neon
|
||||
specialize vp8_copy_mem8x8 mmx media neon dspr2
|
||||
vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6
|
||||
vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2
|
||||
|
||||
prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_copy_mem8x4 mmx media neon
|
||||
specialize vp8_copy_mem8x4 mmx media neon dspr2
|
||||
vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6
|
||||
vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2
|
||||
|
||||
prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x, unsigned char * yabove_row, unsigned char * yleft, int left_stride, unsigned char * ypred_ptr, int y_stride"
|
||||
specialize vp8_build_intra_predictors_mby_s sse2 ssse3
|
||||
|
@ -177,20 +192,24 @@ fi
|
|||
# Subpixel
|
||||
#
|
||||
prototype void vp8_sixtap_predict16x16 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon
|
||||
specialize vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon dspr2
|
||||
vp8_sixtap_predict16x16_media=vp8_sixtap_predict16x16_armv6
|
||||
vp8_sixtap_predict16x16_dspr2=vp8_sixtap_predict16x16_dspr2
|
||||
|
||||
prototype void vp8_sixtap_predict8x8 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon
|
||||
specialize vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon dspr2
|
||||
vp8_sixtap_predict8x8_media=vp8_sixtap_predict8x8_armv6
|
||||
vp8_sixtap_predict8x8_dspr2=vp8_sixtap_predict8x8_dspr2
|
||||
|
||||
prototype void vp8_sixtap_predict8x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon
|
||||
specialize vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon dspr2
|
||||
vp8_sixtap_predict8x4_media=vp8_sixtap_predict8x4_armv6
|
||||
vp8_sixtap_predict8x4_dspr2=vp8_sixtap_predict8x4_dspr2
|
||||
|
||||
prototype void vp8_sixtap_predict4x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_sixtap_predict4x4 mmx ssse3 media neon
|
||||
specialize vp8_sixtap_predict4x4 mmx ssse3 media neon dspr2
|
||||
vp8_sixtap_predict4x4_media=vp8_sixtap_predict4x4_armv6
|
||||
vp8_sixtap_predict4x4_dspr2=vp8_sixtap_predict4x4_dspr2
|
||||
|
||||
prototype void vp8_bilinear_predict16x16 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_bilinear_predict16x16 mmx sse2 ssse3 media neon
|
||||
|
|
|
@ -119,6 +119,14 @@ ifeq ($(ARCH_X86_64),yes)
|
|||
VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/loopfilter_block_sse2.asm
|
||||
endif
|
||||
|
||||
# common (c)
|
||||
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/idctllm_dspr2.c
|
||||
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/filter_dspr2.c
|
||||
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/loopfilter_filters_dspr2.c
|
||||
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/reconinter_dspr2.c
|
||||
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/idct_blk_dspr2.c
|
||||
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/dequantize_dspr2.c
|
||||
|
||||
# common (c)
|
||||
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/filter_arm.c
|
||||
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/loopfilter_arm.c
|
||||
|
|
Загрузка…
Ссылка в новой задаче