Merge pull request #3591 from ilya-lavrenov:sse_avx

This commit is contained in:
Vadim Pisarevsky
2015-01-21 10:46:23 +00:00
31 changed files with 7617 additions and 205 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -11,6 +11,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,

View File

@@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@@ -593,14 +594,46 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre
{
const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1];
double *angle = (double*)ptrs[2];
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)),
_mm_cvtpd_ps(_mm_loadu_pd(x + k + 2)));
__m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)),
_mm_cvtpd_ps(_mm_loadu_pd(y + k + 2)));
_mm_storeu_ps(buf[0] + k, v_dst0);
_mm_storeu_ps(buf[1] + k, v_dst1);
}
}
#endif
for( ; k < len; k++ )
{
buf[0][k] = (float)x[k];
buf[1][k] = (float)y[k];
}
FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_src = _mm_loadu_ps(buf[0] + k);
_mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src));
_mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8))));
}
}
#endif
for( ; k < len; k++ )
angle[k] = buf[0][k];
}
ptrs[0] += len*esz1;
@@ -698,14 +731,46 @@ void cartToPolar( InputArray src1, InputArray src2,
double *angle = (double*)ptrs[3];
Magnitude_64f(x, y, (double*)ptrs[2], len);
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)),
_mm_cvtpd_ps(_mm_loadu_pd(x + k + 2)));
__m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)),
_mm_cvtpd_ps(_mm_loadu_pd(y + k + 2)));
_mm_storeu_ps(buf[0] + k, v_dst0);
_mm_storeu_ps(buf[1] + k, v_dst1);
}
}
#endif
for( ; k < len; k++ )
{
buf[0][k] = (float)x[k];
buf[1][k] = (float)y[k];
}
FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_src = _mm_loadu_ps(buf[0] + k);
_mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src));
_mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8))));
}
}
#endif
for( ; k < len; k++ )
angle[k] = buf[0][k];
}
ptrs[0] += len*esz1;
@@ -771,14 +836,77 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval,
/*static const double cos_a2 = 1;*/
double k1;
int i;
int i = 0;
if( !angle_in_degrees )
k1 = N/(2*CV_PI);
else
k1 = N/360.;
for( i = 0; i < len; i++ )
#if CV_AVX2
if (USE_AVX2)
{
__m128d v_k1 = _mm_set1_pd(k1);
__m128d v_1 = _mm_set1_pd(1);
__m128i v_N1 = _mm_set1_epi32(N - 1);
__m128i v_N4 = _mm_set1_epi32(N >> 2);
__m128d v_sin_a0 = _mm_set1_pd(sin_a0);
__m128d v_sin_a2 = _mm_set1_pd(sin_a2);
__m128d v_cos_a0 = _mm_set1_pd(cos_a0);
for ( ; i <= len - 4; i += 4)
{
__m128 v_angle = _mm_loadu_ps(angle + i);
// 0-1
__m128d v_t = _mm_mul_pd(_mm_cvtps_pd(v_angle), v_k1);
__m128i v_it = _mm_cvtpd_epi32(v_t);
v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it));
__m128i v_sin_idx = _mm_and_si128(v_it, v_N1);
__m128i v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1);
__m128d v_t2 = _mm_mul_pd(v_t, v_t);
__m128d v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t);
__m128d v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1);
__m128d v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8);
__m128d v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8);
__m128d v_sin_val_0 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b),
_mm_mul_pd(v_cos_a, v_sin_b));
__m128d v_cos_val_0 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b),
_mm_mul_pd(v_sin_a, v_sin_b));
// 2-3
v_t = _mm_mul_pd(_mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_angle), 8))), v_k1);
v_it = _mm_cvtpd_epi32(v_t);
v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it));
v_sin_idx = _mm_and_si128(v_it, v_N1);
v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1);
v_t2 = _mm_mul_pd(v_t, v_t);
v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t);
v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1);
v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8);
v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8);
__m128d v_sin_val_1 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b),
_mm_mul_pd(v_cos_a, v_sin_b));
__m128d v_cos_val_1 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b),
_mm_mul_pd(v_sin_a, v_sin_b));
_mm_storeu_ps(sinval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_sin_val_0),
_mm_cvtpd_ps(v_sin_val_1)));
_mm_storeu_ps(cosval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_cos_val_0),
_mm_cvtpd_ps(v_cos_val_1)));
}
}
#endif
for( ; i < len; i++ )
{
double t = angle[i]*k1;
int it = cvRound(t);
@@ -914,6 +1042,16 @@ void polarToCart( InputArray src1, InputArray src2,
vst1q_f32(x + k, vmulq_f32(vld1q_f32(x + k), v_m));
vst1q_f32(y + k, vmulq_f32(vld1q_f32(y + k), v_m));
}
#elif CV_SSE2
if (USE_SSE2)
{
for( ; k <= len - 4; k += 4 )
{
__m128 v_m = _mm_loadu_ps(mag + k);
_mm_storeu_ps(x + k, _mm_mul_ps(_mm_loadu_ps(x + k), v_m));
_mm_storeu_ps(y + k, _mm_mul_ps(_mm_loadu_ps(y + k), v_m));
}
}
#endif
for( ; k < len; k++ )
@@ -939,10 +1077,10 @@ void polarToCart( InputArray src1, InputArray src2,
x[k] = buf[0][k]*m; y[k] = buf[1][k]*m;
}
else
for( k = 0; k < len; k++ )
{
x[k] = buf[0][k]; y[k] = buf[1][k];
}
{
std::memcpy(x, buf[0], sizeof(float) * len);
std::memcpy(y, buf[1], sizeof(float) * len);
}
}
if( ptrs[0] )

View File

@@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,

View File

@@ -192,6 +192,7 @@ struct NoVec
extern volatile bool USE_SSE2;
extern volatile bool USE_SSE4_2;
extern volatile bool USE_AVX;
extern volatile bool USE_AVX2;
enum { BLOCK_SIZE = 1024 };

View File

@@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@@ -72,7 +73,114 @@ struct Sum_SIMD
}
};
#if CV_NEON
#if CV_SSE2
template <>
struct Sum_SIMD<schar, int>
{
int operator () (const schar * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2)
return 0;
int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero;
for ( ; x <= len - 16; x += 16)
{
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x));
__m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
}
for ( ; x <= len - 8; x += 8)
{
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
}
int CV_DECL_ALIGNED(16) ar[4];
_mm_store_si128((__m128i*)ar, v_sum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
template <>
struct Sum_SIMD<int, double>
{
int operator () (const int * src0, const uchar * mask, double * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2)
return 0;
int x = 0;
__m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero;
for ( ; x <= len - 4; x += 4)
{
__m128i v_src = _mm_loadu_si128((__m128i const *)(src0 + x));
v_sum0 = _mm_add_pd(v_sum0, _mm_cvtepi32_pd(v_src));
v_sum1 = _mm_add_pd(v_sum1, _mm_cvtepi32_pd(_mm_srli_si128(v_src, 8)));
}
double CV_DECL_ALIGNED(16) ar[4];
_mm_store_pd(ar, v_sum0);
_mm_store_pd(ar + 2, v_sum1);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
template <>
struct Sum_SIMD<float, double>
{
int operator () (const float * src0, const uchar * mask, double * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2)
return 0;
int x = 0;
__m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero;
for ( ; x <= len - 4; x += 4)
{
__m128 v_src = _mm_loadu_ps(src0 + x);
v_sum0 = _mm_add_pd(v_sum0, _mm_cvtps_pd(v_src));
v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8));
v_sum1 = _mm_add_pd(v_sum1, _mm_cvtps_pd(v_src));
}
double CV_DECL_ALIGNED(16) ar[4];
_mm_store_pd(ar, v_sum0);
_mm_store_pd(ar + 2, v_sum1);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
#elif CV_NEON
template <>
struct Sum_SIMD<uchar, int>
@@ -396,6 +504,38 @@ static int countNonZero_(const T* src, int len )
return nz;
}
#if CV_SSE2
static const uchar * initPopcountTable()
{
static uchar tab[256];
static volatile bool initialized = false;
if( !initialized )
{
// we compute inverse popcount table,
// since we pass (img[x] == 0) mask as index in the table.
unsigned int j = 0u;
#if CV_POPCNT
if (checkHardwareSupport(CV_CPU_POPCNT))
for( ; j < 256u; j++ )
tab[j] = (uchar)(8 - _mm_popcnt_u32(j));
#else
for( ; j < 256u; j++ )
{
int val = 0;
for( int mask = 1; mask < 256; mask += mask )
val += (j & mask) == 0;
tab[j] = (uchar)val;
}
#endif
initialized = true;
}
return tab;
}
#endif
static int countNonZero8u( const uchar* src, int len )
{
int i=0, nz = 0;
@@ -403,21 +543,7 @@ static int countNonZero8u( const uchar* src, int len )
if(USE_SSE2)//5x-6x
{
__m128i pattern = _mm_setzero_si128 ();
static uchar tab[256];
static volatile bool initialized = false;
if( !initialized )
{
// we compute inverse popcount table,
// since we pass (img[x] == 0) mask as index in the table.
for( int j = 0; j < 256; j++ )
{
int val = 0;
for( int mask = 1; mask < 256; mask += mask )
val += (j & mask) == 0;
tab[j] = (uchar)val;
}
initialized = true;
}
static const uchar * tab = initPopcountTable();
for (; i<=len-16; i+=16)
{
@@ -467,7 +593,22 @@ static int countNonZero8u( const uchar* src, int len )
static int countNonZero16u( const ushort* src, int len )
{
int i = 0, nz = 0;
#if CV_NEON
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero = _mm_setzero_si128 ();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_src = _mm_loadu_si128((const __m128i*)(src + i));
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_cmpeq_epi16(v_src, v_zero), v_zero));
nz += tab[val];
}
src += i;
}
#elif CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
uint16x8_t v_zero = vdupq_n_u16(0), v_1 = vdupq_n_u16(1);
@@ -503,7 +644,27 @@ static int countNonZero16u( const ushort* src, int len )
static int countNonZero32s( const int* src, int len )
{
int i = 0, nz = 0;
#if CV_NEON
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero = _mm_setzero_si128 ();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_src = _mm_loadu_si128((const __m128i*)(src + i));
__m128i v_dst0 = _mm_cmpeq_epi32(v_src, v_zero);
v_src = _mm_loadu_si128((const __m128i*)(src + i + 4));
__m128i v_dst1 = _mm_cmpeq_epi32(v_src, v_zero);
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero));
nz += tab[val];
}
src += i;
}
#elif CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
int32x4_t v_zero = vdupq_n_s32(0.0f);
@@ -541,7 +702,25 @@ static int countNonZero32s( const int* src, int len )
static int countNonZero32f( const float* src, int len )
{
int i = 0, nz = 0;
#if CV_NEON
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero_i = _mm_setzero_si128();
__m128 v_zero_f = _mm_setzero_ps();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_dst0 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i), v_zero_f));
__m128i v_dst1 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i + 4), v_zero_f));
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i));
nz += tab[val];
}
src += i;
}
#elif CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
float32x4_t v_zero = vdupq_n_f32(0.0f);
@@ -577,7 +756,34 @@ static int countNonZero32f( const float* src, int len )
}
static int countNonZero64f( const double* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero_i = _mm_setzero_si128();
__m128d v_zero_d = _mm_setzero_pd();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_dst0 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i), v_zero_d));
__m128i v_dst1 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 2), v_zero_d));
__m128i v_dst2 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 4), v_zero_d));
__m128i v_dst3 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 6), v_zero_d));
v_dst0 = _mm_packs_epi32(v_dst0, v_dst1);
v_dst1 = _mm_packs_epi32(v_dst2, v_dst3);
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i));
nz += tab[val];
}
src += i;
}
#endif
return nz + countNonZero_(src, len - i);
}
typedef int (*CountNonZeroFunc)(const uchar*, int);
@@ -594,6 +800,137 @@ static CountNonZeroFunc getCountNonZeroTab(int depth)
return countNonZeroTab[depth];
}
template <typename T, typename ST, typename SQT>
struct SumSqr_SIMD
{
int operator () (const T *, const uchar *, ST *, SQT *, int, int) const
{
return 0;
}
};
#if CV_SSE2
template <>
struct SumSqr_SIMD<uchar, int, int>
{
int operator () (const uchar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2) || !USE_SSE2)
return 0;
int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero;
for ( ; x <= len - 16; x += 16)
{
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x));
__m128i v_half = _mm_unpacklo_epi8(v_src, v_zero);
__m128i v_mullo = _mm_mullo_epi16(v_half, v_half);
__m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero));
v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
v_half = _mm_unpackhi_epi8(v_src, v_zero);
v_mullo = _mm_mullo_epi16(v_half, v_half);
v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero));
v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
for ( ; x <= len - 8; x += 8)
{
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src0 + x)), v_zero);
__m128i v_mullo = _mm_mullo_epi16(v_src, v_src);
__m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src);
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_src, v_zero));
v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_src, v_zero));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
int CV_DECL_ALIGNED(16) ar[8];
_mm_store_si128((__m128i*)ar, v_sum);
_mm_store_si128((__m128i*)(ar + 4), v_sqsum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
{
sum[j] += ar[j + i];
sqsum[j] += ar[4 + j + i];
}
return x / cn;
}
};
template <>
struct SumSqr_SIMD<schar, int, int>
{
int operator () (const schar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2) || !USE_SSE2)
return 0;
int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero;
for ( ; x <= len - 16; x += 16)
{
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x));
__m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8);
__m128i v_mullo = _mm_mullo_epi16(v_half, v_half);
__m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8);
v_mullo = _mm_mullo_epi16(v_half, v_half);
v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
for ( ; x <= len - 8; x += 8)
{
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8);
__m128i v_mullo = _mm_mullo_epi16(v_src, v_src);
__m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
int CV_DECL_ALIGNED(16) ar[8];
_mm_store_si128((__m128i*)ar, v_sum);
_mm_store_si128((__m128i*)(ar + 4), v_sqsum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
{
sum[j] += ar[j + i];
sqsum[j] += ar[4 + j + i];
}
return x / cn;
}
};
#endif
template<typename T, typename ST, typename SQT>
static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn )
{
@@ -601,14 +938,15 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
if( !mask )
{
int i;
int k = cn % 4;
SumSqr_SIMD<T, ST, SQT> vop;
int i = vop(src0, mask, sum, sqsum, len, cn), k = cn % 4;
src += i * cn;
if( k == 1 )
{
ST s0 = sum[0];
SQT sq0 = sqsum[0];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v = src[0];
s0 += v; sq0 += (SQT)v*v;
@@ -620,7 +958,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
{
ST s0 = sum[0], s1 = sum[1];
SQT sq0 = sqsum[0], sq1 = sqsum[1];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v0 = src[0], v1 = src[1];
s0 += v0; sq0 += (SQT)v0*v0;
@@ -633,7 +971,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
{
ST s0 = sum[0], s1 = sum[1], s2 = sum[2];
SQT sq0 = sqsum[0], sq1 = sqsum[1], sq2 = sqsum[2];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v0 = src[0], v1 = src[1], v2 = src[2];
s0 += v0; sq0 += (SQT)v0*v0;
@@ -649,7 +987,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
src = src0 + k;
ST s0 = sum[k], s1 = sum[k+1], s2 = sum[k+2], s3 = sum[k+3];
SQT sq0 = sqsum[k], sq1 = sqsum[k+1], sq2 = sqsum[k+2], sq3 = sqsum[k+3];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v0, v1;
v0 = src[0], v1 = src[1];
@@ -924,7 +1262,6 @@ cv::Scalar cv::sum( InputArray _src )
}
}
#endif
SumFunc func = getSumFunc(depth);
CV_Assert( cn <= 4 && func != 0 );

View File

@@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@@ -89,6 +90,22 @@
pop ebx
}
}
static void __cpuidex(int* cpuid_data, int, int)
{
__asm
{
push edi
mov edi, cpuid_data
mov eax, 7
mov ecx, 0
cpuid
mov [edi], eax
mov [edi + 4], ebx
mov [edi + 8], ecx
mov [edi + 12], edx
pop edi
}
}
#endif
#endif
@@ -208,7 +225,7 @@ struct HWFeatures
enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE };
HWFeatures(void)
{
{
memset( have, 0, sizeof(have) );
x86_family = 0;
}
@@ -252,10 +269,54 @@ struct HWFeatures
f.have[CV_CPU_SSE2] = (cpuid_data[3] & (1<<26)) != 0;
f.have[CV_CPU_SSE3] = (cpuid_data[2] & (1<<0)) != 0;
f.have[CV_CPU_SSSE3] = (cpuid_data[2] & (1<<9)) != 0;
f.have[CV_CPU_FMA3] = (cpuid_data[2] & (1<<12)) != 0;
f.have[CV_CPU_SSE4_1] = (cpuid_data[2] & (1<<19)) != 0;
f.have[CV_CPU_SSE4_2] = (cpuid_data[2] & (1<<20)) != 0;
f.have[CV_CPU_POPCNT] = (cpuid_data[2] & (1<<23)) != 0;
f.have[CV_CPU_AVX] = (((cpuid_data[2] & (1<<28)) != 0)&&((cpuid_data[2] & (1<<27)) != 0));//OS uses XSAVE_XRSTORE and CPU support AVX
// make the second call to the cpuid command in order to get
// information about extended features like AVX2
#if defined _MSC_VER && (defined _M_IX86 || defined _M_X64)
__cpuidex(cpuid_data, 7, 0);
#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
#ifdef __x86_64__
asm __volatile__
(
"movl $7, %%eax\n\t"
"movl $0, %%ecx\n\t"
"cpuid\n\t"
:[eax]"=a"(cpuid_data[0]),[ebx]"=b"(cpuid_data[1]),[ecx]"=c"(cpuid_data[2]),[edx]"=d"(cpuid_data[3])
:
: "cc"
);
#else
asm volatile
(
"pushl %%eax\n\t"
"pushl %%edx\n\t"
"movl $7,%%eax\n\t"
"movl $0,%%ecx\n\t"
"cpuid\n\t"
"popl %%edx\n\t"
"popl %%eax\n\t"
: "=b"(cpuid_data[1]), "=c"(cpuid_data[2])
:
: "cc"
);
#endif
#endif
f.have[CV_CPU_AVX2] = (cpuid_data[1] & (1<<5)) != 0;
f.have[CV_CPU_AVX_512F] = (cpuid_data[1] & (1<<16)) != 0;
f.have[CV_CPU_AVX_512DQ] = (cpuid_data[1] & (1<<17)) != 0;
f.have[CV_CPU_AVX_512IFMA512] = (cpuid_data[1] & (1<<21)) != 0;
f.have[CV_CPU_AVX_512PF] = (cpuid_data[1] & (1<<26)) != 0;
f.have[CV_CPU_AVX_512ER] = (cpuid_data[1] & (1<<27)) != 0;
f.have[CV_CPU_AVX_512CD] = (cpuid_data[1] & (1<<28)) != 0;
f.have[CV_CPU_AVX_512BW] = (cpuid_data[1] & (1<<30)) != 0;
f.have[CV_CPU_AVX_512VL] = (cpuid_data[1] & (1<<31)) != 0;
f.have[CV_CPU_AVX_512VBMI] = (cpuid_data[2] & (1<<1)) != 0;
}
#if defined ANDROID || defined __linux__
@@ -318,6 +379,7 @@ IPPInitializer ippInitializer;
volatile bool USE_SSE2 = featuresEnabled.have[CV_CPU_SSE2];
volatile bool USE_SSE4_2 = featuresEnabled.have[CV_CPU_SSE4_2];
volatile bool USE_AVX = featuresEnabled.have[CV_CPU_AVX];
volatile bool USE_AVX2 = featuresEnabled.have[CV_CPU_AVX2];
void setUseOptimized( bool flag )
{

View File

@@ -10,8 +10,7 @@
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,