vpx/vpx_dsp/x86/convolve_avx2.h
Kyle Siefring ae35425ae6 Optimize convolve8 SSSE3 and AVX2 intrinsics
Changed the intrinsics to perform summation similiar to the way the assembly does.

The new code diverges from the assembly by preferring unsaturated additions.

Results for haswell

SSSE3
Horiz/Vert  Size  Speedup
Horiz       x4    ~32%
Horiz       x8    ~6%
Vert        x8    ~4%

AVX2
Horiz/Vert  Size  Speedup
Horiz       x16   ~16%
Vert        x16   ~14%

BUG=webm:1471

Change-Id: I7ad98ea688c904b1ba324adf8eb977873c8b8668
2017-10-24 10:39:48 -04:00

106 lines
4.4 KiB
C

/*
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VPX_DSP_X86_CONVOLVE_AVX2_H_
#define VPX_DSP_X86_CONVOLVE_AVX2_H_
#include <immintrin.h> // AVX2
#include "./vpx_config.h"
#if defined(__clang__)
#if (__clang_major__ > 0 && __clang_major__ < 3) || \
(__clang_major__ == 3 && __clang_minor__ <= 3) || \
(defined(__APPLE__) && defined(__apple_build_version__) && \
((__clang_major__ == 4 && __clang_minor__ <= 2) || \
(__clang_major__ == 5 && __clang_minor__ == 0)))
#define MM256_BROADCASTSI128_SI256(x) \
_mm_broadcastsi128_si256((__m128i const *)&(x))
#else // clang > 3.3, and not 5.0 on macosx.
#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
#endif // clang <= 3.3
#elif defined(__GNUC__)
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6)
#define MM256_BROADCASTSI128_SI256(x) \
_mm_broadcastsi128_si256((__m128i const *)&(x))
#elif __GNUC__ == 4 && __GNUC_MINOR__ == 7
#define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x)
#else // gcc > 4.7
#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
#endif // gcc <= 4.6
#else // !(gcc || clang)
#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
#endif // __clang__
static INLINE void shuffle_filter_avx2(const int16_t *const filter,
__m256i *const f) {
const __m256i f_values =
MM256_BROADCASTSI128_SI256(_mm_load_si128((const __m128i *)filter));
// pack and duplicate the filter values
f[0] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0200u));
f[1] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0604u));
f[2] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0a08u));
f[3] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0e0cu));
}
static INLINE __m256i convolve8_16_avx2(const __m256i *const s,
const __m256i *const f) {
// multiply 2 adjacent elements with the filter and add the result
const __m256i k_64 = _mm256_set1_epi16(1 << 6);
const __m256i x0 = _mm256_maddubs_epi16(s[0], f[0]);
const __m256i x1 = _mm256_maddubs_epi16(s[1], f[1]);
const __m256i x2 = _mm256_maddubs_epi16(s[2], f[2]);
const __m256i x3 = _mm256_maddubs_epi16(s[3], f[3]);
__m256i sum1, sum2;
// sum the results together, saturating only on the final step
// adding x0 with x2 and x1 with x3 is the only order that prevents
// outranges for all filters
sum1 = _mm256_add_epi16(x0, x2);
sum2 = _mm256_add_epi16(x1, x3);
// add the rounding offset early to avoid another saturated add
sum1 = _mm256_add_epi16(sum1, k_64);
sum1 = _mm256_adds_epi16(sum1, sum2);
// round and shift by 7 bit each 16 bit
sum1 = _mm256_srai_epi16(sum1, 7);
return sum1;
}
static INLINE __m128i convolve8_8_avx2(const __m256i *const s,
const __m256i *const f) {
// multiply 2 adjacent elements with the filter and add the result
const __m128i k_64 = _mm_set1_epi16(1 << 6);
const __m128i x0 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[0]),
_mm256_castsi256_si128(f[0]));
const __m128i x1 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[1]),
_mm256_castsi256_si128(f[1]));
const __m128i x2 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[2]),
_mm256_castsi256_si128(f[2]));
const __m128i x3 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[3]),
_mm256_castsi256_si128(f[3]));
__m128i sum1, sum2;
// sum the results together, saturating only on the final step
// adding x0 with x2 and x1 with x3 is the only order that prevents
// outranges for all filters
sum1 = _mm_add_epi16(x0, x2);
sum2 = _mm_add_epi16(x1, x3);
// add the rounding offset early to avoid another saturated add
sum1 = _mm_add_epi16(sum1, k_64);
sum1 = _mm_adds_epi16(sum1, sum2);
// shift by 7 bit each 16 bit
sum1 = _mm_srai_epi16(sum1, 7);
return sum1;
}
#undef MM256_BROADCASTSI128_SI256
#endif // VPX_DSP_X86_CONVOLVE_AVX2_H_