From ef29b15c9a360fcbd4c3fe48d3ce2574a8ff37f1 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH] reciprocal --- modules/core/perf/perf_arithm.cpp | 14 +++ modules/core/src/arithm.cpp | 200 +++++++++++++++++++++++++++++- 2 files changed, 213 insertions(+), 1 deletion(-) diff --git a/modules/core/perf/perf_arithm.cpp b/modules/core/perf/perf_arithm.cpp index c6e4c40db..c6c2a1b29 100644 --- a/modules/core/perf/perf_arithm.cpp +++ b/modules/core/perf/perf_arithm.cpp @@ -256,3 +256,17 @@ PERF_TEST_P(Size_MatType, divide, TYPICAL_MATS_CORE_ARITHM) SANITY_CHECK_NOTHING(); } + +PERF_TEST_P(Size_MatType, reciprocal, TYPICAL_MATS_CORE_ARITHM) +{ + Size sz = get<0>(GetParam()); + int type = get<1>(GetParam()); + cv::Mat b(sz, type), c(sz, type); + double scale = 0.5; + + declare.in(b, WARMUP_RNG).out(c); + + TEST_CYCLE() divide(scale, b, c); + + SANITY_CHECK_NOTHING(); +} diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 5875d61cc..49a9cceae 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -2886,6 +2886,202 @@ div_( const T* src1, size_t step1, const T* src2, size_t step2, } } +template +struct Recip_SIMD +{ + int operator() (const T *, T *, int, double) const + { + return 0; + } +}; + +#if CV_SSE2 + +#if CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const uchar * src2, uchar * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_unpacklo_epi8(_v_src2, v_zero); + + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const schar * src2, schar * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _v_src2), 8); + + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const ushort * src2, ushort * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const short * src2, short * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +template <> +struct Recip_SIMD +{ + int operator() (const int * src2, int * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128d v_src2d = _mm_cvtepi32_pd(v_src2); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + + __m128i v_dst = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + __m128i v_mask = _mm_cmpeq_epi32(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, v_dst)); + } + + return x; + } +}; + +#endif + template static void recip_( const T*, size_t, const T* src2, size_t step2, T* dst, size_t step, Size size, double scale ) @@ -2893,9 +3089,11 @@ recip_( const T*, size_t, const T* src2, size_t step2, step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); + Recip_SIMD vop; + for( ; size.height--; src2 += step2, dst += step ) { - int i = 0; + int i = vop(src2, dst, size.width, scale); #if CV_ENABLE_UNROLLED for( ; i <= size.width - 4; i += 4 ) {