use static assertation where suitable

This commit is contained in:
marina.kolpakova 2013-03-20 01:52:40 +04:00
parent 5c1c13e745
commit 4096b54560
7 changed files with 123 additions and 126 deletions

View File

@ -1148,7 +1148,6 @@ void CV_EssentialMatTest::get_test_array_types_and_sizes( int /*test_case_idx*/,
dims = 2; dims = 2;
method = CV_LMEDS << (cvtest::randInt(rng) % 2); method = CV_LMEDS << (cvtest::randInt(rng) % 2);
types[INPUT][0] = CV_MAKETYPE(pt_depth, 1); types[INPUT][0] = CV_MAKETYPE(pt_depth, 1);
if( 0 && cvtest::randInt(rng) % 2 ) if( 0 && cvtest::randInt(rng) % 2 )

View File

@ -764,7 +764,7 @@ inline void SVD::solveZ( InputArray m, OutputArray _dst )
template<typename _Tp, int m, int n, int nm> inline void template<typename _Tp, int m, int n, int nm> inline void
SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt )
{ {
assert( nm == MIN(m, n)); CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector.");
Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false);
SVD::compute(_a, _w, _u, _vt); SVD::compute(_a, _w, _u, _vt);
CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]);
@ -773,7 +773,7 @@ template<typename _Tp, int m, int n, int nm> inline void
template<typename _Tp, int m, int n, int nm> inline void template<typename _Tp, int m, int n, int nm> inline void
SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w )
{ {
assert( nm == MIN(m, n)); CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector.");
Mat _a(a, false), _w(w, false); Mat _a(a, false), _w(w, false);
SVD::compute(_a, _w); SVD::compute(_a, _w);
CV_Assert(_w.data == (uchar*)&w.val[0]); CV_Assert(_w.data == (uchar*)&w.val[0]);
@ -784,7 +784,7 @@ SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u,
const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs,
Matx<_Tp, n, nb>& dst ) Matx<_Tp, n, nb>& dst )
{ {
assert( nm == MIN(m, n)); CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector.");
Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false);
SVD::backSubst(_w, _u, _vt, _rhs, _dst); SVD::backSubst(_w, _u, _vt, _rhs, _dst);
CV_Assert(_dst.data == (uchar*)&dst.val[0]); CV_Assert(_dst.data == (uchar*)&dst.val[0]);

View File

@ -82,7 +82,7 @@
# endif # endif
# ifndef CV_StaticAssert # ifndef CV_StaticAssert
# if defined(__GNUC__) && (__GNUC__ > 3) && (__GNUC_MINOR__ > 2) # if defined(__GNUC__) && (__GNUC__ > 3) && (__GNUC_MINOR__ > 2)
# define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error("CV_StaticAssert: " reason " " #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()), 0; }) # define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error("CV_StaticAssert: " reason " " #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()); })
# else # else
namespace cv { namespace cv {
template <bool x> struct CV_StaticAssert_failed; template <bool x> struct CV_StaticAssert_failed;
@ -201,28 +201,28 @@ template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0)
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1)
{ {
assert(channels >= 2); CV_StaticAssert(channels >= 2, "Matx should have at least 2 elaments.");
val[0] = v0; val[1] = v1; val[0] = v0; val[1] = v1;
for(int i = 2; i < channels; i++) val[i] = _Tp(0); for(int i = 2; i < channels; i++) val[i] = _Tp(0);
} }
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2)
{ {
assert(channels >= 3); CV_StaticAssert(channels >= 3, "Matx should have at least 3 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[0] = v0; val[1] = v1; val[2] = v2;
for(int i = 3; i < channels; i++) val[i] = _Tp(0); for(int i = 3; i < channels; i++) val[i] = _Tp(0);
} }
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3)
{ {
assert(channels >= 4); CV_StaticAssert(channels >= 4, "Matx should have at least 4 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
for(int i = 4; i < channels; i++) val[i] = _Tp(0); for(int i = 4; i < channels; i++) val[i] = _Tp(0);
} }
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4)
{ {
assert(channels >= 5); CV_StaticAssert(channels >= 5, "Matx should have at least 5 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4;
for(int i = 5; i < channels; i++) val[i] = _Tp(0); for(int i = 5; i < channels; i++) val[i] = _Tp(0);
} }
@ -230,7 +230,7 @@ template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5) _Tp v4, _Tp v5)
{ {
assert(channels >= 6); CV_StaticAssert(channels >= 6, "Matx should have at least 6 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[4] = v4; val[5] = v5;
for(int i = 6; i < channels; i++) val[i] = _Tp(0); for(int i = 6; i < channels; i++) val[i] = _Tp(0);
@ -239,7 +239,7 @@ template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5, _Tp v6) _Tp v4, _Tp v5, _Tp v6)
{ {
assert(channels >= 7); CV_StaticAssert(channels >= 7, "Matx should have at least 7 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[4] = v4; val[5] = v5; val[6] = v6;
for(int i = 7; i < channels; i++) val[i] = _Tp(0); for(int i = 7; i < channels; i++) val[i] = _Tp(0);
@ -248,7 +248,7 @@ template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5, _Tp v6, _Tp v7) _Tp v4, _Tp v5, _Tp v6, _Tp v7)
{ {
assert(channels >= 8); CV_StaticAssert(channels >= 8, "Matx should have at least 8 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
for(int i = 8; i < channels; i++) val[i] = _Tp(0); for(int i = 8; i < channels; i++) val[i] = _Tp(0);
@ -258,7 +258,7 @@ template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1
_Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v4, _Tp v5, _Tp v6, _Tp v7,
_Tp v8) _Tp v8)
{ {
assert(channels >= 9); CV_StaticAssert(channels >= 9, "Matx should have at least 9 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8; val[8] = v8;
@ -269,7 +269,7 @@ template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1
_Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v4, _Tp v5, _Tp v6, _Tp v7,
_Tp v8, _Tp v9) _Tp v8, _Tp v9)
{ {
assert(channels >= 10); CV_StaticAssert(channels >= 10, "Matx should have at least 10 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8; val[9] = v9; val[8] = v8; val[9] = v9;
@ -282,7 +282,7 @@ inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v4, _Tp v5, _Tp v6, _Tp v7,
_Tp v8, _Tp v9, _Tp v10, _Tp v11) _Tp v8, _Tp v9, _Tp v10, _Tp v11)
{ {
assert(channels == 12); CV_StaticAssert(channels == 12, "Matx should have at least 12 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;
@ -294,7 +294,7 @@ inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v8, _Tp v9, _Tp v10, _Tp v11,
_Tp v12, _Tp v13, _Tp v14, _Tp v15) _Tp v12, _Tp v13, _Tp v14, _Tp v15)
{ {
assert(channels == 16); CV_StaticAssert(channels == 16, "Matx should have at least 16 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;

View File

@ -100,61 +100,61 @@ void FAST_t(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bo
#if CV_SSE2 #if CV_SSE2
if( patternSize == 16 ) if( patternSize == 16 )
{ {
for(; j < img.cols - 16 - 3; j += 16, ptr += 16) for(; j < img.cols - 16 - 3; j += 16, ptr += 16)
{
__m128i m0, m1;
__m128i v0 = _mm_loadu_si128((const __m128i*)ptr);
__m128i v1 = _mm_xor_si128(_mm_subs_epu8(v0, t), delta);
v0 = _mm_xor_si128(_mm_adds_epu8(v0, t), delta);
__m128i x0 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[0])), delta);
__m128i x1 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[quarterPatternSize])), delta);
__m128i x2 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[2*quarterPatternSize])), delta);
__m128i x3 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[3*quarterPatternSize])), delta);
m0 = _mm_and_si128(_mm_cmpgt_epi8(x0, v0), _mm_cmpgt_epi8(x1, v0));
m1 = _mm_and_si128(_mm_cmpgt_epi8(v1, x0), _mm_cmpgt_epi8(v1, x1));
m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x1, v0), _mm_cmpgt_epi8(x2, v0)));
m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x1), _mm_cmpgt_epi8(v1, x2)));
m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x2, v0), _mm_cmpgt_epi8(x3, v0)));
m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x2), _mm_cmpgt_epi8(v1, x3)));
m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x3, v0), _mm_cmpgt_epi8(x0, v0)));
m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x3), _mm_cmpgt_epi8(v1, x0)));
m0 = _mm_or_si128(m0, m1);
int mask = _mm_movemask_epi8(m0);
if( mask == 0 )
continue;
if( (mask & 255) == 0 )
{ {
j -= 8; __m128i m0, m1;
ptr -= 8; __m128i v0 = _mm_loadu_si128((const __m128i*)ptr);
continue; __m128i v1 = _mm_xor_si128(_mm_subs_epu8(v0, t), delta);
} v0 = _mm_xor_si128(_mm_adds_epu8(v0, t), delta);
__m128i c0 = _mm_setzero_si128(), c1 = c0, max0 = c0, max1 = c0; __m128i x0 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[0])), delta);
for( k = 0; k < N; k++ ) __m128i x1 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[quarterPatternSize])), delta);
{ __m128i x2 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[2*quarterPatternSize])), delta);
__m128i x = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(ptr + pixel[k])), delta); __m128i x3 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[3*quarterPatternSize])), delta);
m0 = _mm_cmpgt_epi8(x, v0); m0 = _mm_and_si128(_mm_cmpgt_epi8(x0, v0), _mm_cmpgt_epi8(x1, v0));
m1 = _mm_cmpgt_epi8(v1, x); m1 = _mm_and_si128(_mm_cmpgt_epi8(v1, x0), _mm_cmpgt_epi8(v1, x1));
m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x1, v0), _mm_cmpgt_epi8(x2, v0)));
c0 = _mm_and_si128(_mm_sub_epi8(c0, m0), m0); m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x1), _mm_cmpgt_epi8(v1, x2)));
c1 = _mm_and_si128(_mm_sub_epi8(c1, m1), m1); m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x2, v0), _mm_cmpgt_epi8(x3, v0)));
m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x2), _mm_cmpgt_epi8(v1, x3)));
max0 = _mm_max_epu8(max0, c0); m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x3, v0), _mm_cmpgt_epi8(x0, v0)));
max1 = _mm_max_epu8(max1, c1); m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x3), _mm_cmpgt_epi8(v1, x0)));
} m0 = _mm_or_si128(m0, m1);
int mask = _mm_movemask_epi8(m0);
max0 = _mm_max_epu8(max0, max1); if( mask == 0 )
int m = _mm_movemask_epi8(_mm_cmpgt_epi8(max0, K16)); continue;
if( (mask & 255) == 0 )
for( k = 0; m > 0 && k < 16; k++, m >>= 1 )
if(m & 1)
{ {
cornerpos[ncorners++] = j+k; j -= 8;
if(nonmax_suppression) ptr -= 8;
curr[j+k] = (uchar)cornerScore<patternSize>(ptr+k, pixel, threshold); continue;
} }
}
__m128i c0 = _mm_setzero_si128(), c1 = c0, max0 = c0, max1 = c0;
for( k = 0; k < N; k++ )
{
__m128i x = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(ptr + pixel[k])), delta);
m0 = _mm_cmpgt_epi8(x, v0);
m1 = _mm_cmpgt_epi8(v1, x);
c0 = _mm_and_si128(_mm_sub_epi8(c0, m0), m0);
c1 = _mm_and_si128(_mm_sub_epi8(c1, m1), m1);
max0 = _mm_max_epu8(max0, c0);
max1 = _mm_max_epu8(max1, c1);
}
max0 = _mm_max_epu8(max0, max1);
int m = _mm_movemask_epi8(_mm_cmpgt_epi8(max0, K16));
for( k = 0; m > 0 && k < 16; k++, m >>= 1 )
if(m & 1)
{
cornerpos[ncorners++] = j+k;
if(nonmax_suppression)
curr[j+k] = (uchar)cornerScore<patternSize>(ptr+k, pixel, threshold);
}
}
} }
#endif #endif
for( ; j < img.cols - 3; j++, ptr++ ) for( ; j < img.cols - 3; j++, ptr++ )

View File

@ -208,7 +208,6 @@ private:
#define SANITY_CHECK_KEYPOINTS(array, ...) ::perf::Regression::addKeypoints(this, #array, array , ## __VA_ARGS__) #define SANITY_CHECK_KEYPOINTS(array, ...) ::perf::Regression::addKeypoints(this, #array, array , ## __VA_ARGS__)
#define SANITY_CHECK_MATCHES(array, ...) ::perf::Regression::addMatches(this, #array, array , ## __VA_ARGS__) #define SANITY_CHECK_MATCHES(array, ...) ::perf::Regression::addMatches(this, #array, array , ## __VA_ARGS__)
#ifdef HAVE_CUDA
class CV_EXPORTS GpuPerf class CV_EXPORTS GpuPerf
{ {
public: public:
@ -216,9 +215,6 @@ public:
}; };
# define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice() # define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice()
#else
# define PERF_RUN_GPU() false
#endif
/*****************************************************************************************\ /*****************************************************************************************\

View File

@ -1324,12 +1324,14 @@ void perf::sort(std::vector<cv::KeyPoint>& pts, cv::InputOutputArray descriptors
/*****************************************************************************************\ /*****************************************************************************************\
* ::perf::GpuPerf * ::perf::GpuPerf
\*****************************************************************************************/ \*****************************************************************************************/
#ifdef HAVE_CUDA
bool perf::GpuPerf::targetDevice() bool perf::GpuPerf::targetDevice()
{ {
#ifdef HAVE_CUDA
return !param_run_cpu; return !param_run_cpu;
} #else
return false;
#endif #endif
}
/*****************************************************************************************\ /*****************************************************************************************\
* ::perf::PrintTo * ::perf::PrintTo