Merge "Optimize 32x32 idct function" into experimental

This commit is contained in:
Yunqing Wang 2013-03-27 11:30:48 -07:00 коммит произвёл Gerrit Code Review
Родитель 513157e093 21a718d9a7
Коммит 0e91bec4b5
2 изменённых файлов: 691 добавлений и 1 удалений

Просмотреть файл

@ -361,7 +361,7 @@ specialize vp9_short_idct1_16x16
prototype void vp9_short_idct32x32 "int16_t *input, int16_t *output, int pitch" prototype void vp9_short_idct32x32 "int16_t *input, int16_t *output, int pitch"
specialize vp9_short_idct32x32 specialize vp9_short_idct32x32 sse2
prototype void vp9_short_idct1_32x32 "int16_t *input, int16_t *output" prototype void vp9_short_idct1_32x32 "int16_t *input, int16_t *output"
specialize vp9_short_idct1_32x32 specialize vp9_short_idct1_32x32

Просмотреть файл

@ -1506,4 +1506,694 @@ void vp9_short_idct10_16x16_sse2(int16_t *input, int16_t *output, int pitch) {
output += 8; output += 8;
} }
} }
// Define Macro for multiplying elements by constants and adding them together.
#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, \
cst0, cst1, cst2, cst3, res0, res1, res2, res3) \
{ \
tmp0 = _mm_madd_epi16(lo_0, cst0); \
tmp1 = _mm_madd_epi16(hi_0, cst0); \
tmp2 = _mm_madd_epi16(lo_0, cst1); \
tmp3 = _mm_madd_epi16(hi_0, cst1); \
tmp4 = _mm_madd_epi16(lo_1, cst2); \
tmp5 = _mm_madd_epi16(hi_1, cst2); \
tmp6 = _mm_madd_epi16(lo_1, cst3); \
tmp7 = _mm_madd_epi16(hi_1, cst3); \
\
tmp0 = _mm_add_epi32(tmp0, rounding); \
tmp1 = _mm_add_epi32(tmp1, rounding); \
tmp2 = _mm_add_epi32(tmp2, rounding); \
tmp3 = _mm_add_epi32(tmp3, rounding); \
tmp4 = _mm_add_epi32(tmp4, rounding); \
tmp5 = _mm_add_epi32(tmp5, rounding); \
tmp6 = _mm_add_epi32(tmp6, rounding); \
tmp7 = _mm_add_epi32(tmp7, rounding); \
\
tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
\
res0 = _mm_packs_epi32(tmp0, tmp1); \
res1 = _mm_packs_epi32(tmp2, tmp3); \
res2 = _mm_packs_epi32(tmp4, tmp5); \
res3 = _mm_packs_epi32(tmp6, tmp7); \
}
void vp9_short_idct32x32_sse2(int16_t *input, int16_t *output, int pitch) {
const int half_pitch = pitch >> 1;
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1<<5);
// idct constants for each stage
const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
const __m128i stg1_3 = pair_set_epi16(cospi_17_64, cospi_15_64);
const __m128i stg1_4 = pair_set_epi16(cospi_23_64, -cospi_9_64);
const __m128i stg1_5 = pair_set_epi16(cospi_9_64, cospi_23_64);
const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
const __m128i stg1_7 = pair_set_epi16(cospi_25_64, cospi_7_64);
const __m128i stg1_8 = pair_set_epi16(cospi_27_64, -cospi_5_64);
const __m128i stg1_9 = pair_set_epi16(cospi_5_64, cospi_27_64);
const __m128i stg1_10 = pair_set_epi16(cospi_11_64, -cospi_21_64);
const __m128i stg1_11 = pair_set_epi16(cospi_21_64, cospi_11_64);
const __m128i stg1_12 = pair_set_epi16(cospi_19_64, -cospi_13_64);
const __m128i stg1_13 = pair_set_epi16(cospi_13_64, cospi_19_64);
const __m128i stg1_14 = pair_set_epi16(cospi_3_64, -cospi_29_64);
const __m128i stg1_15 = pair_set_epi16(cospi_29_64, cospi_3_64);
const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64);
const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64);
const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64);
const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64);
const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
__m128i in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12,
in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23,
in24, in25, in26, in27, in28, in29, in30, in31;
__m128i col[128];
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22,
stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29,
stp1_30, stp1_31;
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22,
stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29,
stp2_30, stp2_31;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int i, j;
// We work on a 8x32 block each time, and loop 8 times for 2-D 32x32 idct.
for (i = 0; i < 8; i++) {
if (i < 4) {
// First 1-D idct
// Load input data.
in0 = _mm_load_si128((__m128i *)input);
in8 = _mm_load_si128((__m128i *)(input + 8 * 1));
in16 = _mm_load_si128((__m128i *)(input + 8 * 2));
in24 = _mm_load_si128((__m128i *)(input + 8 * 3));
in1 = _mm_load_si128((__m128i *)(input + 8 * 4));
in9 = _mm_load_si128((__m128i *)(input + 8 * 5));
in17 = _mm_load_si128((__m128i *)(input + 8 * 6));
in25 = _mm_load_si128((__m128i *)(input + 8 * 7));
in2 = _mm_load_si128((__m128i *)(input + 8 * 8));
in10 = _mm_load_si128((__m128i *)(input + 8 * 9));
in18 = _mm_load_si128((__m128i *)(input + 8 * 10));
in26 = _mm_load_si128((__m128i *)(input + 8 * 11));
in3 = _mm_load_si128((__m128i *)(input + 8 * 12));
in11 = _mm_load_si128((__m128i *)(input + 8 * 13));
in19 = _mm_load_si128((__m128i *)(input + 8 * 14));
in27 = _mm_load_si128((__m128i *)(input + 8 * 15));
in4 = _mm_load_si128((__m128i *)(input + 8 * 16));
in12 = _mm_load_si128((__m128i *)(input + 8 * 17));
in20 = _mm_load_si128((__m128i *)(input + 8 * 18));
in28 = _mm_load_si128((__m128i *)(input + 8 * 19));
in5 = _mm_load_si128((__m128i *)(input + 8 * 20));
in13 = _mm_load_si128((__m128i *)(input + 8 * 21));
in21 = _mm_load_si128((__m128i *)(input + 8 * 22));
in29 = _mm_load_si128((__m128i *)(input + 8 * 23));
in6 = _mm_load_si128((__m128i *)(input + 8 * 24));
in14 = _mm_load_si128((__m128i *)(input + 8 * 25));
in22 = _mm_load_si128((__m128i *)(input + 8 * 26));
in30 = _mm_load_si128((__m128i *)(input + 8 * 27));
in7 = _mm_load_si128((__m128i *)(input + 8 * 28));
in15 = _mm_load_si128((__m128i *)(input + 8 * 29));
in23 = _mm_load_si128((__m128i *)(input + 8 * 30));
in31 = _mm_load_si128((__m128i *)(input + 8 * 31));
input += 256;
// Transpose 32x8 block to 8x32 block
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
in10, in11, in12, in13, in14, in15);
TRANSPOSE_8X8(in16, in17, in18, in19, in20, in21, in22, in23, in16, in17,
in18, in19, in20, in21, in22, in23);
TRANSPOSE_8X8(in24, in25, in26, in27, in28, in29, in30, in31, in24, in25,
in26, in27, in28, in29, in30, in31);
} else {
// Second 1-D idct
j = i - 4;
// Transpose 32x8 block to 8x32 block
TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
col[j * 8 + 6], col[j * 8 + 7], in0, in1, in2, in3, in4,
in5, in6, in7);
j += 4;
TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
col[j * 8 + 6], col[j * 8 + 7], in8, in9, in10,
in11, in12, in13, in14, in15);
j += 4;
TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
col[j * 8 + 6], col[j * 8 + 7], in16, in17, in18,
in19, in20, in21, in22, in23);
j += 4;
TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
col[j * 8 + 6], col[j * 8 + 7], in24, in25, in26, in27,
in28, in29, in30, in31);
}
// Stage1
{
const __m128i lo_1_31 = _mm_unpacklo_epi16(in1, in31);
const __m128i hi_1_31 = _mm_unpackhi_epi16(in1, in31);
const __m128i lo_17_15 = _mm_unpacklo_epi16(in17, in15);
const __m128i hi_17_15 = _mm_unpackhi_epi16(in17, in15);
const __m128i lo_9_23 = _mm_unpacklo_epi16(in9, in23);
const __m128i hi_9_23 = _mm_unpackhi_epi16(in9, in23);
const __m128i lo_25_7= _mm_unpacklo_epi16(in25, in7);
const __m128i hi_25_7 = _mm_unpackhi_epi16(in25, in7);
const __m128i lo_5_27 = _mm_unpacklo_epi16(in5, in27);
const __m128i hi_5_27 = _mm_unpackhi_epi16(in5, in27);
const __m128i lo_21_11 = _mm_unpacklo_epi16(in21, in11);
const __m128i hi_21_11 = _mm_unpackhi_epi16(in21, in11);
const __m128i lo_13_19 = _mm_unpacklo_epi16(in13, in19);
const __m128i hi_13_19 = _mm_unpackhi_epi16(in13, in19);
const __m128i lo_29_3 = _mm_unpacklo_epi16(in29, in3);
const __m128i hi_29_3 = _mm_unpackhi_epi16(in29, in3);
MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0,
stg1_1, stg1_2, stg1_3, stp1_16, stp1_31,
stp1_17, stp1_30)
MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4,
stg1_5, stg1_6, stg1_7, stp1_18, stp1_29,
stp1_19, stp1_28)
MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8,
stg1_9, stg1_10, stg1_11, stp1_20, stp1_27,
stp1_21, stp1_26)
MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12,
stg1_13, stg1_14, stg1_15, stp1_22, stp1_25,
stp1_23, stp1_24)
}
// Stage2
{
const __m128i lo_2_30 = _mm_unpacklo_epi16(in2, in30);
const __m128i hi_2_30 = _mm_unpackhi_epi16(in2, in30);
const __m128i lo_18_14 = _mm_unpacklo_epi16(in18, in14);
const __m128i hi_18_14 = _mm_unpackhi_epi16(in18, in14);
const __m128i lo_10_22 = _mm_unpacklo_epi16(in10, in22);
const __m128i hi_10_22 = _mm_unpackhi_epi16(in10, in22);
const __m128i lo_26_6 = _mm_unpacklo_epi16(in26, in6);
const __m128i hi_26_6 = _mm_unpackhi_epi16(in26, in6);
MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0,
stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9,
stp2_14)
MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4,
stg2_5, stg2_6, stg2_7, stp2_10, stp2_13,
stp2_11, stp2_12)
stp2_16 = _mm_add_epi16(stp1_16, stp1_17);
stp2_17 = _mm_sub_epi16(stp1_16, stp1_17);
stp2_18 = _mm_sub_epi16(stp1_19, stp1_18);
stp2_19 = _mm_add_epi16(stp1_19, stp1_18);
stp2_20 = _mm_add_epi16(stp1_20, stp1_21);
stp2_21 = _mm_sub_epi16(stp1_20, stp1_21);
stp2_22 = _mm_sub_epi16(stp1_23, stp1_22);
stp2_23 = _mm_add_epi16(stp1_23, stp1_22);
stp2_24 = _mm_add_epi16(stp1_24, stp1_25);
stp2_25 = _mm_sub_epi16(stp1_24, stp1_25);
stp2_26 = _mm_sub_epi16(stp1_27, stp1_26);
stp2_27 = _mm_add_epi16(stp1_27, stp1_26);
stp2_28 = _mm_add_epi16(stp1_28, stp1_29);
stp2_29 = _mm_sub_epi16(stp1_28, stp1_29);
stp2_30 = _mm_sub_epi16(stp1_31, stp1_30);
stp2_31 = _mm_add_epi16(stp1_31, stp1_30);
}
// Stage3
{
const __m128i lo_4_28 = _mm_unpacklo_epi16(in4, in28);
const __m128i hi_4_28 = _mm_unpackhi_epi16(in4, in28);
const __m128i lo_20_12 = _mm_unpacklo_epi16(in20, in12);
const __m128i hi_20_12 = _mm_unpackhi_epi16(in20, in12);
const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30);
const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30);
const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);
const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);
const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);
const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);
MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0,
stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5,
stp1_6)
stp1_8 = _mm_add_epi16(stp2_8, stp2_9);
stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);
stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);
stp1_11 = _mm_add_epi16(stp2_11, stp2_10);
stp1_12 = _mm_add_epi16(stp2_12, stp2_13);
stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);
stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);
stp1_15 = _mm_add_epi16(stp2_15, stp2_14);
MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,
stg3_5, stg3_6, stg3_4, stp1_17, stp1_30,
stp1_18, stp1_29)
MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,
stg3_9, stg3_10, stg3_8, stp1_21, stp1_26,
stp1_22, stp1_25)
stp1_16 = stp2_16;
stp1_31 = stp2_31;
stp1_19 = stp2_19;
stp1_20 = stp2_20;
stp1_23 = stp2_23;
stp1_24 = stp2_24;
stp1_27 = stp2_27;
stp1_28 = stp2_28;
}
// Stage4
{
const __m128i lo_0_16 = _mm_unpacklo_epi16(in0, in16);
const __m128i hi_0_16 = _mm_unpackhi_epi16(in0, in16);
const __m128i lo_8_24 = _mm_unpacklo_epi16(in8, in24);
const __m128i hi_8_24 = _mm_unpackhi_epi16(in8, in24);
const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);
const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0,
stg4_1, stg4_2, stg4_3, stp2_0, stp2_1,
stp2_2, stp2_3)
stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,
stg4_5, stg4_6, stg4_4, stp2_9, stp2_14,
stp2_10, stp2_13)
stp2_8 = stp1_8;
stp2_15 = stp1_15;
stp2_11 = stp1_11;
stp2_12 = stp1_12;
stp2_16 = _mm_add_epi16(stp1_16, stp1_19);
stp2_17 = _mm_add_epi16(stp1_17, stp1_18);
stp2_18 = _mm_sub_epi16(stp1_17, stp1_18);
stp2_19 = _mm_sub_epi16(stp1_16, stp1_19);
stp2_20 = _mm_sub_epi16(stp1_23, stp1_20);
stp2_21 = _mm_sub_epi16(stp1_22, stp1_21);
stp2_22 = _mm_add_epi16(stp1_22, stp1_21);
stp2_23 = _mm_add_epi16(stp1_23, stp1_20);
stp2_24 = _mm_add_epi16(stp1_24, stp1_27);
stp2_25 = _mm_add_epi16(stp1_25, stp1_26);
stp2_26 = _mm_sub_epi16(stp1_25, stp1_26);
stp2_27 = _mm_sub_epi16(stp1_24, stp1_27);
stp2_28 = _mm_sub_epi16(stp1_31, stp1_28);
stp2_29 = _mm_sub_epi16(stp1_30, stp1_29);
stp2_30 = _mm_add_epi16(stp1_29, stp1_30);
stp2_31 = _mm_add_epi16(stp1_28, stp1_31);
}
// Stage5
{
const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);
const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);
const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);
const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);
const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);
const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);
const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);
tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);
tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);
tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);
tmp0 = _mm_add_epi32(tmp0, rounding);
tmp1 = _mm_add_epi32(tmp1, rounding);
tmp2 = _mm_add_epi32(tmp2, rounding);
tmp3 = _mm_add_epi32(tmp3, rounding);
tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
stp1_5 = _mm_packs_epi32(tmp0, tmp1);
stp1_6 = _mm_packs_epi32(tmp2, tmp3);
stp1_4 = stp2_4;
stp1_7 = stp2_7;
stp1_8 = _mm_add_epi16(stp2_8, stp2_11);
stp1_9 = _mm_add_epi16(stp2_9, stp2_10);
stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);
stp1_11 = _mm_sub_epi16(stp2_8, stp2_11);
stp1_12 = _mm_sub_epi16(stp2_15, stp2_12);
stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);
stp1_14 = _mm_add_epi16(stp2_14, stp2_13);
stp1_15 = _mm_add_epi16(stp2_15, stp2_12);
stp1_16 = stp2_16;
stp1_17 = stp2_17;
MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,
stg4_5, stg4_4, stg4_5, stp1_18, stp1_29,
stp1_19, stp1_28)
MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,
stg4_4, stg4_6, stg4_4, stp1_20, stp1_27,
stp1_21, stp1_26)
stp1_22 = stp2_22;
stp1_23 = stp2_23;
stp1_24 = stp2_24;
stp1_25 = stp2_25;
stp1_30 = stp2_30;
stp1_31 = stp2_31;
}
// Stage6
{
const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);
const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);
stp2_0 = _mm_add_epi16(stp1_0, stp1_7);
stp2_1 = _mm_add_epi16(stp1_1, stp1_6);
stp2_2 = _mm_add_epi16(stp1_2, stp1_5);
stp2_3 = _mm_add_epi16(stp1_3, stp1_4);
stp2_4 = _mm_sub_epi16(stp1_3, stp1_4);
stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);
stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);
stp2_7 = _mm_sub_epi16(stp1_0, stp1_7);
stp2_8 = stp1_8;
stp2_9 = stp1_9;
stp2_14 = stp1_14;
stp2_15 = stp1_15;
MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12,
stg6_0, stg4_0, stg6_0, stg4_0, stp2_10,
stp2_13, stp2_11, stp2_12)
stp2_16 = _mm_add_epi16(stp1_16, stp1_23);
stp2_17 = _mm_add_epi16(stp1_17, stp1_22);
stp2_18 = _mm_add_epi16(stp1_18, stp1_21);
stp2_19 = _mm_add_epi16(stp1_19, stp1_20);
stp2_20 = _mm_sub_epi16(stp1_19, stp1_20);
stp2_21 = _mm_sub_epi16(stp1_18, stp1_21);
stp2_22 = _mm_sub_epi16(stp1_17, stp1_22);
stp2_23 = _mm_sub_epi16(stp1_16, stp1_23);
stp2_24 = _mm_sub_epi16(stp1_31, stp1_24);
stp2_25 = _mm_sub_epi16(stp1_30, stp1_25);
stp2_26 = _mm_sub_epi16(stp1_29, stp1_26);
stp2_27 = _mm_sub_epi16(stp1_28, stp1_27);
stp2_28 = _mm_add_epi16(stp1_27, stp1_28);
stp2_29 = _mm_add_epi16(stp1_26, stp1_29);
stp2_30 = _mm_add_epi16(stp1_25, stp1_30);
stp2_31 = _mm_add_epi16(stp1_24, stp1_31);
}
// Stage7
{
const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);
const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);
const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);
const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);
stp1_0 = _mm_add_epi16(stp2_0, stp2_15);
stp1_1 = _mm_add_epi16(stp2_1, stp2_14);
stp1_2 = _mm_add_epi16(stp2_2, stp2_13);
stp1_3 = _mm_add_epi16(stp2_3, stp2_12);
stp1_4 = _mm_add_epi16(stp2_4, stp2_11);
stp1_5 = _mm_add_epi16(stp2_5, stp2_10);
stp1_6 = _mm_add_epi16(stp2_6, stp2_9);
stp1_7 = _mm_add_epi16(stp2_7, stp2_8);
stp1_8 = _mm_sub_epi16(stp2_7, stp2_8);
stp1_9 = _mm_sub_epi16(stp2_6, stp2_9);
stp1_10 = _mm_sub_epi16(stp2_5, stp2_10);
stp1_11 = _mm_sub_epi16(stp2_4, stp2_11);
stp1_12 = _mm_sub_epi16(stp2_3, stp2_12);
stp1_13 = _mm_sub_epi16(stp2_2, stp2_13);
stp1_14 = _mm_sub_epi16(stp2_1, stp2_14);
stp1_15 = _mm_sub_epi16(stp2_0, stp2_15);
stp1_16 = stp2_16;
stp1_17 = stp2_17;
stp1_18 = stp2_18;
stp1_19 = stp2_19;
MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,
stg4_0, stg6_0, stg4_0, stp1_20, stp1_27,
stp1_21, stp1_26)
MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,
stg4_0, stg6_0, stg4_0, stp1_22, stp1_25,
stp1_23, stp1_24)
stp1_28 = stp2_28;
stp1_29 = stp2_29;
stp1_30 = stp2_30;
stp1_31 = stp2_31;
}
// final stage
if (i < 4) {
// 1_D: Store 32 intermediate results for each 8x32 block.
col[i * 32 + 0] = _mm_add_epi16(stp1_0, stp1_31);
col[i * 32 + 1] = _mm_add_epi16(stp1_1, stp1_30);
col[i * 32 + 2] = _mm_add_epi16(stp1_2, stp1_29);
col[i * 32 + 3] = _mm_add_epi16(stp1_3, stp1_28);
col[i * 32 + 4] = _mm_add_epi16(stp1_4, stp1_27);
col[i * 32 + 5] = _mm_add_epi16(stp1_5, stp1_26);
col[i * 32 + 6] = _mm_add_epi16(stp1_6, stp1_25);
col[i * 32 + 7] = _mm_add_epi16(stp1_7, stp1_24);
col[i * 32 + 8] = _mm_add_epi16(stp1_8, stp1_23);
col[i * 32 + 9] = _mm_add_epi16(stp1_9, stp1_22);
col[i * 32 + 10] = _mm_add_epi16(stp1_10, stp1_21);
col[i * 32 + 11] = _mm_add_epi16(stp1_11, stp1_20);
col[i * 32 + 12] = _mm_add_epi16(stp1_12, stp1_19);
col[i * 32 + 13] = _mm_add_epi16(stp1_13, stp1_18);
col[i * 32 + 14] = _mm_add_epi16(stp1_14, stp1_17);
col[i * 32 + 15] = _mm_add_epi16(stp1_15, stp1_16);
col[i * 32 + 16] = _mm_sub_epi16(stp1_15, stp1_16);
col[i * 32 + 17] = _mm_sub_epi16(stp1_14, stp1_17);
col[i * 32 + 18] = _mm_sub_epi16(stp1_13, stp1_18);
col[i * 32 + 19] = _mm_sub_epi16(stp1_12, stp1_19);
col[i * 32 + 20] = _mm_sub_epi16(stp1_11, stp1_20);
col[i * 32 + 21] = _mm_sub_epi16(stp1_10, stp1_21);
col[i * 32 + 22] = _mm_sub_epi16(stp1_9, stp1_22);
col[i * 32 + 23] = _mm_sub_epi16(stp1_8, stp1_23);
col[i * 32 + 24] = _mm_sub_epi16(stp1_7, stp1_24);
col[i * 32 + 25] = _mm_sub_epi16(stp1_6, stp1_25);
col[i * 32 + 26] = _mm_sub_epi16(stp1_5, stp1_26);
col[i * 32 + 27] = _mm_sub_epi16(stp1_4, stp1_27);
col[i * 32 + 28] = _mm_sub_epi16(stp1_3, stp1_28);
col[i * 32 + 29] = _mm_sub_epi16(stp1_2, stp1_29);
col[i * 32 + 30] = _mm_sub_epi16(stp1_1, stp1_30);
col[i * 32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
} else {
// 2_D: Calculate the results and store them to destination.
in0 = _mm_add_epi16(stp1_0, stp1_31);
in1 = _mm_add_epi16(stp1_1, stp1_30);
in2 = _mm_add_epi16(stp1_2, stp1_29);
in3 = _mm_add_epi16(stp1_3, stp1_28);
in4 = _mm_add_epi16(stp1_4, stp1_27);
in5 = _mm_add_epi16(stp1_5, stp1_26);
in6 = _mm_add_epi16(stp1_6, stp1_25);
in7 = _mm_add_epi16(stp1_7, stp1_24);
in8 = _mm_add_epi16(stp1_8, stp1_23);
in9 = _mm_add_epi16(stp1_9, stp1_22);
in10 = _mm_add_epi16(stp1_10, stp1_21);
in11 = _mm_add_epi16(stp1_11, stp1_20);
in12 = _mm_add_epi16(stp1_12, stp1_19);
in13 = _mm_add_epi16(stp1_13, stp1_18);
in14 = _mm_add_epi16(stp1_14, stp1_17);
in15 = _mm_add_epi16(stp1_15, stp1_16);
in16 = _mm_sub_epi16(stp1_15, stp1_16);
in17 = _mm_sub_epi16(stp1_14, stp1_17);
in18 = _mm_sub_epi16(stp1_13, stp1_18);
in19 = _mm_sub_epi16(stp1_12, stp1_19);
in20 = _mm_sub_epi16(stp1_11, stp1_20);
in21 = _mm_sub_epi16(stp1_10, stp1_21);
in22 = _mm_sub_epi16(stp1_9, stp1_22);
in23 = _mm_sub_epi16(stp1_8, stp1_23);
in24 = _mm_sub_epi16(stp1_7, stp1_24);
in25 = _mm_sub_epi16(stp1_6, stp1_25);
in26 = _mm_sub_epi16(stp1_5, stp1_26);
in27 = _mm_sub_epi16(stp1_4, stp1_27);
in28 = _mm_sub_epi16(stp1_3, stp1_28);
in29 = _mm_sub_epi16(stp1_2, stp1_29);
in30 = _mm_sub_epi16(stp1_1, stp1_30);
in31 = _mm_sub_epi16(stp1_0, stp1_31);
// Final rounding and shift
in0 = _mm_adds_epi16(in0, final_rounding);
in1 = _mm_adds_epi16(in1, final_rounding);
in2 = _mm_adds_epi16(in2, final_rounding);
in3 = _mm_adds_epi16(in3, final_rounding);
in4 = _mm_adds_epi16(in4, final_rounding);
in5 = _mm_adds_epi16(in5, final_rounding);
in6 = _mm_adds_epi16(in6, final_rounding);
in7 = _mm_adds_epi16(in7, final_rounding);
in8 = _mm_adds_epi16(in8, final_rounding);
in9 = _mm_adds_epi16(in9, final_rounding);
in10 = _mm_adds_epi16(in10, final_rounding);
in11 = _mm_adds_epi16(in11, final_rounding);
in12 = _mm_adds_epi16(in12, final_rounding);
in13 = _mm_adds_epi16(in13, final_rounding);
in14 = _mm_adds_epi16(in14, final_rounding);
in15 = _mm_adds_epi16(in15, final_rounding);
in16 = _mm_adds_epi16(in16, final_rounding);
in17 = _mm_adds_epi16(in17, final_rounding);
in18 = _mm_adds_epi16(in18, final_rounding);
in19 = _mm_adds_epi16(in19, final_rounding);
in20 = _mm_adds_epi16(in20, final_rounding);
in21 = _mm_adds_epi16(in21, final_rounding);
in22 = _mm_adds_epi16(in22, final_rounding);
in23 = _mm_adds_epi16(in23, final_rounding);
in24 = _mm_adds_epi16(in24, final_rounding);
in25 = _mm_adds_epi16(in25, final_rounding);
in26 = _mm_adds_epi16(in26, final_rounding);
in27 = _mm_adds_epi16(in27, final_rounding);
in28 = _mm_adds_epi16(in28, final_rounding);
in29 = _mm_adds_epi16(in29, final_rounding);
in30 = _mm_adds_epi16(in30, final_rounding);
in31 = _mm_adds_epi16(in31, final_rounding);
in0 = _mm_srai_epi16(in0, 6);
in1 = _mm_srai_epi16(in1, 6);
in2 = _mm_srai_epi16(in2, 6);
in3 = _mm_srai_epi16(in3, 6);
in4 = _mm_srai_epi16(in4, 6);
in5 = _mm_srai_epi16(in5, 6);
in6 = _mm_srai_epi16(in6, 6);
in7 = _mm_srai_epi16(in7, 6);
in8 = _mm_srai_epi16(in8, 6);
in9 = _mm_srai_epi16(in9, 6);
in10 = _mm_srai_epi16(in10, 6);
in11 = _mm_srai_epi16(in11, 6);
in12 = _mm_srai_epi16(in12, 6);
in13 = _mm_srai_epi16(in13, 6);
in14 = _mm_srai_epi16(in14, 6);
in15 = _mm_srai_epi16(in15, 6);
in16 = _mm_srai_epi16(in16, 6);
in17 = _mm_srai_epi16(in17, 6);
in18 = _mm_srai_epi16(in18, 6);
in19 = _mm_srai_epi16(in19, 6);
in20 = _mm_srai_epi16(in20, 6);
in21 = _mm_srai_epi16(in21, 6);
in22 = _mm_srai_epi16(in22, 6);
in23 = _mm_srai_epi16(in23, 6);
in24 = _mm_srai_epi16(in24, 6);
in25 = _mm_srai_epi16(in25, 6);
in26 = _mm_srai_epi16(in26, 6);
in27 = _mm_srai_epi16(in27, 6);
in28 = _mm_srai_epi16(in28, 6);
in29 = _mm_srai_epi16(in29, 6);
in30 = _mm_srai_epi16(in30, 6);
in31 = _mm_srai_epi16(in31, 6);
// Store results
_mm_store_si128((__m128i *)output, in0);
_mm_store_si128((__m128i *)(output + half_pitch * 1), in1);
_mm_store_si128((__m128i *)(output + half_pitch * 2), in2);
_mm_store_si128((__m128i *)(output + half_pitch * 3), in3);
_mm_store_si128((__m128i *)(output + half_pitch * 4), in4);
_mm_store_si128((__m128i *)(output + half_pitch * 5), in5);
_mm_store_si128((__m128i *)(output + half_pitch * 6), in6);
_mm_store_si128((__m128i *)(output + half_pitch * 7), in7);
_mm_store_si128((__m128i *)(output + half_pitch * 8), in8);
_mm_store_si128((__m128i *)(output + half_pitch * 9), in9);
_mm_store_si128((__m128i *)(output + half_pitch * 10), in10);
_mm_store_si128((__m128i *)(output + half_pitch * 11), in11);
_mm_store_si128((__m128i *)(output + half_pitch * 12), in12);
_mm_store_si128((__m128i *)(output + half_pitch * 13), in13);
_mm_store_si128((__m128i *)(output + half_pitch * 14), in14);
_mm_store_si128((__m128i *)(output + half_pitch * 15), in15);
_mm_store_si128((__m128i *)(output + half_pitch * 16), in16);
_mm_store_si128((__m128i *)(output + half_pitch * 17), in17);
_mm_store_si128((__m128i *)(output + half_pitch * 18), in18);
_mm_store_si128((__m128i *)(output + half_pitch * 19), in19);
_mm_store_si128((__m128i *)(output + half_pitch * 20), in20);
_mm_store_si128((__m128i *)(output + half_pitch * 21), in21);
_mm_store_si128((__m128i *)(output + half_pitch * 22), in22);
_mm_store_si128((__m128i *)(output + half_pitch * 23), in23);
_mm_store_si128((__m128i *)(output + half_pitch * 24), in24);
_mm_store_si128((__m128i *)(output + half_pitch * 25), in25);
_mm_store_si128((__m128i *)(output + half_pitch * 26), in26);
_mm_store_si128((__m128i *)(output + half_pitch * 27), in27);
_mm_store_si128((__m128i *)(output + half_pitch * 28), in28);
_mm_store_si128((__m128i *)(output + half_pitch * 29), in29);
_mm_store_si128((__m128i *)(output + half_pitch * 30), in30);
_mm_store_si128((__m128i *)(output + half_pitch * 31), in31);
output += 8;
}
}
}
#endif #endif