2016-06-01 15:53:16 +03:00
|
|
|
/*
|
2016-09-02 00:32:49 +03:00
|
|
|
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
|
2016-06-01 15:53:16 +03:00
|
|
|
*
|
2016-09-02 00:32:49 +03:00
|
|
|
* This source code is subject to the terms of the BSD 2 Clause License and
|
|
|
|
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
|
|
|
|
* was not distributed with this source code in the LICENSE file, you can
|
|
|
|
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
|
|
|
|
* Media Patent License 1.0 was not distributed with this source code in the
|
|
|
|
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
|
2016-06-01 15:53:16 +03:00
|
|
|
*/
|
|
|
|
|
2016-09-02 00:32:49 +03:00
|
|
|
#ifndef AOM_DSP_X86_SYNONYMS_H_
|
|
|
|
#define AOM_DSP_X86_SYNONYMS_H_
|
2016-06-01 15:53:16 +03:00
|
|
|
|
|
|
|
#include <immintrin.h>
|
|
|
|
|
2016-08-31 00:01:10 +03:00
|
|
|
#include "./aom_config.h"
|
|
|
|
#include "aom/aom_integer.h"
|
2016-06-01 15:53:16 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Various reusable shorthands for x86 SIMD intrinsics.
|
|
|
|
*
|
|
|
|
* Intrinsics prefixed with xx_ operate on or return 128bit XMM registers.
|
|
|
|
* Intrinsics prefixed with yy_ operate on or return 256bit YMM registers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
// Loads and stores to do away with the tedium of casting the address
|
|
|
|
// to the right type.
|
|
|
|
static INLINE __m128i xx_loadl_32(const void *a) {
|
2016-08-09 08:59:08 +03:00
|
|
|
return _mm_cvtsi32_si128(*(const uint32_t *)a);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE __m128i xx_loadl_64(const void *a) {
|
2016-08-09 08:59:08 +03:00
|
|
|
return _mm_loadl_epi64((const __m128i *)a);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE __m128i xx_load_128(const void *a) {
|
2016-08-09 08:59:08 +03:00
|
|
|
return _mm_load_si128((const __m128i *)a);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE __m128i xx_loadu_128(const void *a) {
|
2016-08-09 08:59:08 +03:00
|
|
|
return _mm_loadu_si128((const __m128i *)a);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE void xx_storel_32(void *const a, const __m128i v) {
|
2016-08-09 08:59:08 +03:00
|
|
|
*(uint32_t *)a = _mm_cvtsi128_si32(v);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE void xx_storel_64(void *const a, const __m128i v) {
|
2016-08-09 08:59:08 +03:00
|
|
|
_mm_storel_epi64((__m128i *)a, v);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE void xx_store_128(void *const a, const __m128i v) {
|
2016-08-09 08:59:08 +03:00
|
|
|
_mm_store_si128((__m128i *)a, v);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE void xx_storeu_128(void *const a, const __m128i v) {
|
2016-08-09 08:59:08 +03:00
|
|
|
_mm_storeu_si128((__m128i *)a, v);
|
2016-06-01 15:53:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE __m128i xx_round_epu16(__m128i v_val_w) {
|
|
|
|
return _mm_avg_epu16(v_val_w, _mm_setzero_si128());
|
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE __m128i xx_roundn_epu16(__m128i v_val_w, int bits) {
|
2016-08-09 08:59:08 +03:00
|
|
|
const __m128i v_s_w = _mm_srli_epi16(v_val_w, bits - 1);
|
2016-06-01 15:53:16 +03:00
|
|
|
return _mm_avg_epu16(v_s_w, _mm_setzero_si128());
|
|
|
|
}
|
|
|
|
|
2016-07-05 15:36:25 +03:00
|
|
|
static INLINE __m128i xx_roundn_epu32(__m128i v_val_d, int bits) {
|
2016-09-02 18:05:53 +03:00
|
|
|
const __m128i v_bias_d = _mm_set1_epi32((1 << bits) >> 1);
|
2016-07-05 15:36:25 +03:00
|
|
|
const __m128i v_tmp_d = _mm_add_epi32(v_val_d, v_bias_d);
|
|
|
|
return _mm_srli_epi32(v_tmp_d, bits);
|
|
|
|
}
|
|
|
|
|
2016-09-29 11:14:54 +03:00
|
|
|
// This is equivalent to ROUND_POWER_OF_TWO(v_val_d, bits)
|
|
|
|
static INLINE __m128i xx_roundn_epi32_unsigned(__m128i v_val_d, int bits) {
|
|
|
|
const __m128i v_bias_d = _mm_set1_epi32((1 << bits) >> 1);
|
|
|
|
const __m128i v_tmp_d = _mm_add_epi32(v_val_d, v_bias_d);
|
|
|
|
return _mm_srai_epi32(v_tmp_d, bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is equivalent to ROUND_POWER_OF_TWO_SIGNED(v_val_d, bits)
|
2016-07-12 13:41:54 +03:00
|
|
|
static INLINE __m128i xx_roundn_epi32(__m128i v_val_d, int bits) {
|
2016-09-02 18:05:53 +03:00
|
|
|
const __m128i v_bias_d = _mm_set1_epi32((1 << bits) >> 1);
|
2016-07-12 13:41:54 +03:00
|
|
|
const __m128i v_sign_d = _mm_srai_epi32(v_val_d, 31);
|
2016-08-09 08:59:08 +03:00
|
|
|
const __m128i v_tmp_d =
|
|
|
|
_mm_add_epi32(_mm_add_epi32(v_val_d, v_bias_d), v_sign_d);
|
2016-07-12 13:41:54 +03:00
|
|
|
return _mm_srai_epi32(v_tmp_d, bits);
|
|
|
|
}
|
|
|
|
|
2016-11-11 01:32:29 +03:00
|
|
|
#ifdef __SSSE3__
|
2016-07-05 15:36:25 +03:00
|
|
|
static INLINE int32_t xx_hsum_epi32_si32(__m128i v_d) {
|
|
|
|
v_d = _mm_hadd_epi32(v_d, v_d);
|
|
|
|
v_d = _mm_hadd_epi32(v_d, v_d);
|
|
|
|
return _mm_cvtsi128_si32(v_d);
|
|
|
|
}
|
2016-07-12 13:41:54 +03:00
|
|
|
|
|
|
|
static INLINE int64_t xx_hsum_epi64_si64(__m128i v_q) {
|
|
|
|
v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8));
|
|
|
|
#if ARCH_X86_64
|
|
|
|
return _mm_cvtsi128_si64(v_q);
|
|
|
|
#else
|
|
|
|
{
|
|
|
|
int64_t tmp;
|
2016-08-09 08:59:08 +03:00
|
|
|
_mm_storel_epi64((__m128i *)&tmp, v_q);
|
2016-07-12 13:41:54 +03:00
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE int64_t xx_hsum_epi32_si64(__m128i v_d) {
|
2016-08-09 08:59:08 +03:00
|
|
|
const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
|
2016-07-12 13:41:54 +03:00
|
|
|
const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
|
|
|
|
const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
|
|
|
|
return xx_hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
|
|
|
|
}
|
2016-11-11 01:32:29 +03:00
|
|
|
#endif // __SSSE3__
|
2016-07-05 15:36:25 +03:00
|
|
|
|
2016-09-02 00:32:49 +03:00
|
|
|
#endif // AOM_DSP_X86_SYNONYMS_H_
|