diff --git a/apps/traincascade/traincascade.cpp b/apps/traincascade/traincascade.cpp index e6e16ba89..52bacc808 100644 --- a/apps/traincascade/traincascade.cpp +++ b/apps/traincascade/traincascade.cpp @@ -13,6 +13,7 @@ int main( int argc, char* argv[] ) int numPos = 2000; int numNeg = 1000; int numStages = 20; + int numThreads = getNumThreads(); int precalcValBufSize = 256, precalcIdxBufSize = 256; bool baseFormatSave = false; @@ -36,6 +37,7 @@ int main( int argc, char* argv[] ) cout << " [-precalcValBufSize ]" << endl; cout << " [-precalcIdxBufSize ]" << endl; cout << " [-baseFormatSave]" << endl; + cout << " [-numThreads ]" << endl; cascadeParams.printDefaults(); stageParams.printDefaults(); for( int fi = 0; fi < fc; fi++ ) @@ -82,6 +84,10 @@ int main( int argc, char* argv[] ) { baseFormatSave = true; } + else if( !strcmp( argv[i], "-numThreads" ) ) + { + numThreads = atoi(argv[++i]); + } else if ( cascadeParams.scanAttr( argv[i], argv[i+1] ) ) { i++; } else if ( stageParams.scanAttr( argv[i], argv[i+1] ) ) { i++; } else if ( !set ) @@ -98,6 +104,7 @@ int main( int argc, char* argv[] ) } } + setNumThreads( numThreads ); classifier.train( cascadeDirName, vecName, bgName, diff --git a/cmake/OpenCVFindOpenNI2.cmake b/cmake/OpenCVFindOpenNI2.cmake index 8a5f47ca7..83acf1bc5 100644 --- a/cmake/OpenCVFindOpenNI2.cmake +++ b/cmake/OpenCVFindOpenNI2.cmake @@ -21,13 +21,13 @@ if(WIN32) find_library(OPENNI2_LIBRARY "OpenNI2" PATHS $ENV{OPENNI2_LIB64} DOC "OpenNI2 library") endif() elseif(UNIX OR APPLE) - find_file(OPENNI_INCLUDES "OpenNI.h" PATHS "/usr/include/ni2" "/usr/include/openni2" DOC "OpenNI2 c++ interface header") - find_library(OPENNI_LIBRARY "OpenNI2" PATHS "/usr/lib" DOC "OpenNI2 library") + find_file(OPENNI2_INCLUDES "OpenNI.h" PATHS "/usr/include/ni2" "/usr/include/openni2" DOC "OpenNI2 c++ interface header") + find_library(OPENNI2_LIBRARY "OpenNI2" PATHS "/usr/lib" DOC "OpenNI2 library") endif() if(OPENNI2_LIBRARY AND OPENNI2_INCLUDES) set(HAVE_OPENNI2 TRUE) -endif() #if(OPENNI_LIBRARY AND OPENNI_INCLUDES) +endif() #if(OPENNI2_LIBRARY AND OPENNI2_INCLUDES) get_filename_component(OPENNI2_LIB_DIR "${OPENNI2_LIBRARY}" PATH) get_filename_component(OPENNI2_INCLUDE_DIR ${OPENNI2_INCLUDES} PATH) diff --git a/doc/tutorials/features2d/akaze_matching/akaze_matching.rst b/doc/tutorials/features2d/akaze_matching/akaze_matching.rst new file mode 100644 index 000000000..3fe5df4f6 --- /dev/null +++ b/doc/tutorials/features2d/akaze_matching/akaze_matching.rst @@ -0,0 +1,161 @@ +.. _akazeMatching: + + +AKAZE local features matching +****************************** + +Introduction +------------------ + +In this tutorial we will learn how to use [AKAZE]_ local features to detect and match keypoints on two images. + +We will find keypoints on a pair of images with given homography matrix, +match them and count the number of inliers (i. e. matches that fit in the given homography). + +You can find expanded version of this example here: https://github.com/pablofdezalc/test_kaze_akaze_opencv + +.. [AKAZE] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. + +Data +------------------ +We are going to use images 1 and 3 from *Graffity* sequence of Oxford dataset. + +.. image:: images/graf.png + :height: 200pt + :width: 320pt + :alt: Graffity + :align: center + +Homography is given by a 3 by 3 matrix: + +.. code-block:: none + + 7.6285898e-01 -2.9922929e-01 2.2567123e+02 + 3.3443473e-01 1.0143901e+00 -7.6999973e+01 + 3.4663091e-04 -1.4364524e-05 1.0000000e+00 + +You can find the images (*graf1.png*, *graf3.png*) and homography (*H1to3p.xml*) in *opencv/samples/cpp*. + +Source Code +=========== +.. literalinclude:: ../../../../samples/cpp/tutorial_code/features2D/AKAZE_match.cpp + :language: cpp + :linenos: + :tab-width: 4 + +Explanation +=========== + +1. **Load images and homography** + + .. code-block:: cpp + + Mat img1 = imread("graf1.png", IMREAD_GRAYSCALE); + Mat img2 = imread("graf3.png", IMREAD_GRAYSCALE); + + Mat homography; + FileStorage fs("H1to3p.xml", FileStorage::READ); + fs.getFirstTopLevelNode() >> homography; + + We are loading grayscale images here. Homography is stored in the xml created with FileStorage. + +2. **Detect keypoints and compute descriptors using AKAZE** + + .. code-block:: cpp + + vector kpts1, kpts2; + Mat desc1, desc2; + + AKAZE akaze; + akaze(img1, noArray(), kpts1, desc1); + akaze(img2, noArray(), kpts2, desc2); + + We create AKAZE object and use it's *operator()* functionality. Since we don't need the *mask* parameter, *noArray()* is used. + +3. **Use brute-force matcher to find 2-nn matches** + + .. code-block:: cpp + + BFMatcher matcher(NORM_HAMMING); + vector< vector > nn_matches; + matcher.knnMatch(desc1, desc2, nn_matches, 2); + + We use Hamming distance, because AKAZE uses binary descriptor by default. + +4. **Use 2-nn matches to find correct keypoint matches** + + .. code-block:: cpp + + for(size_t i = 0; i < nn_matches.size(); i++) { + DMatch first = nn_matches[i][0]; + float dist1 = nn_matches[i][0].distance; + float dist2 = nn_matches[i][1].distance; + + if(dist1 < nn_match_ratio * dist2) { + matched1.push_back(kpts1[first.queryIdx]); + matched2.push_back(kpts2[first.trainIdx]); + } + } + + If the closest match is *ratio* closer than the second closest one, then the match is correct. + +5. **Check if our matches fit in the homography model** + + .. code-block:: cpp + + for(int i = 0; i < matched1.size(); i++) { + Mat col = Mat::ones(3, 1, CV_64F); + col.at(0) = matched1[i].pt.x; + col.at(1) = matched1[i].pt.y; + + col = homography * col; + col /= col.at(2); + float dist = sqrt( pow(col.at(0) - matched2[i].pt.x, 2) + + pow(col.at(1) - matched2[i].pt.y, 2)); + + if(dist < inlier_threshold) { + int new_i = inliers1.size(); + inliers1.push_back(matched1[i]); + inliers2.push_back(matched2[i]); + good_matches.push_back(DMatch(new_i, new_i, 0)); + } + } + + If the distance from first keypoint's projection to the second keypoint is less than threshold, then it it fits in the homography. + + We create a new set of matches for the inliers, because it is required by the drawing function. + +6. **Output results** + + .. code-block:: cpp + + Mat res; + drawMatches(img1, inliers1, img2, inliers2, good_matches, res); + imwrite("res.png", res); + ... + + Here we save the resulting image and print some statistics. + +Results +======= + +Found matches +-------------- + +.. image:: images/res.png + :height: 200pt + :width: 320pt + :alt: Matches + :align: center + +A-KAZE Matching Results +-------------------------- +Keypoints 1: 2943 + +Keypoints 2: 3511 + +Matches: 447 + +Inliers: 308 + +Inliers Ratio: 0.689038 diff --git a/doc/tutorials/features2d/akaze_matching/images/graf.png b/doc/tutorials/features2d/akaze_matching/images/graf.png new file mode 100644 index 000000000..d9213bcdf Binary files /dev/null and b/doc/tutorials/features2d/akaze_matching/images/graf.png differ diff --git a/doc/tutorials/features2d/akaze_matching/images/res.png b/doc/tutorials/features2d/akaze_matching/images/res.png new file mode 100644 index 000000000..23fd5bd48 Binary files /dev/null and b/doc/tutorials/features2d/akaze_matching/images/res.png differ diff --git a/doc/tutorials/features2d/table_of_content_features2d/images/AKAZE_Match_Tutorial_Cover.png b/doc/tutorials/features2d/table_of_content_features2d/images/AKAZE_Match_Tutorial_Cover.png new file mode 100755 index 000000000..fdf2007ba Binary files /dev/null and b/doc/tutorials/features2d/table_of_content_features2d/images/AKAZE_Match_Tutorial_Cover.png differ diff --git a/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst b/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst index f4107804b..bb79ca32f 100644 --- a/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst +++ b/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst @@ -183,6 +183,25 @@ Learn about how to use the feature points detectors, descriptors and matching f :height: 90pt :width: 90pt ++ + .. tabularcolumns:: m{100pt} m{300pt} + .. cssclass:: toctableopencv + + ===================== ============================================== + |AkazeMatch| **Title:** :ref:`akazeMatching` + + *Compatibility:* > OpenCV 3.0 + + *Author:* Fedor Morozov + + Use *AKAZE* local features to find correspondence between two images. + + ===================== ============================================== + + .. |AkazeMatch| image:: images/AKAZE_Match_Tutorial_Cover.png + :height: 90pt + :width: 90pt + .. raw:: latex \pagebreak @@ -201,3 +220,4 @@ Learn about how to use the feature points detectors, descriptors and matching f ../feature_flann_matcher/feature_flann_matcher ../feature_homography/feature_homography ../detection_of_planar_objects/detection_of_planar_objects + ../akaze_matching/akaze_matching diff --git a/doc/user_guide/ug_traincascade.rst b/doc/user_guide/ug_traincascade.rst index 601f50438..20c1b1683 100644 --- a/doc/user_guide/ug_traincascade.rst +++ b/doc/user_guide/ug_traincascade.rst @@ -200,6 +200,12 @@ Command line arguments of ``opencv_traincascade`` application grouped by purpose This argument is actual in case of Haar-like features. If it is specified, the cascade will be saved in the old format. + * ``-numThreads `` + + Maximum number of threads to use during training. Notice that + the actual number of used threads may be lower, depending on + your machine and compilation options. + #. Cascade parameters: diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 8108a61e6..1f64cd2ac 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -244,6 +244,7 @@ typedef signed char schar; /* fundamental constants */ #define CV_PI 3.1415926535897932384626433832795 +#define CV_2PI 6.283185307179586476925286766559 #define CV_LOG2 0.69314718055994530941723212145818 /****************************************************************************************\ diff --git a/modules/core/perf/opencl/perf_dxt.cpp b/modules/core/perf/opencl/perf_dxt.cpp index d0219913b..c0e41485e 100644 --- a/modules/core/perf/opencl/perf_dxt.cpp +++ b/modules/core/perf/opencl/perf_dxt.cpp @@ -54,23 +54,42 @@ namespace ocl { ///////////// dft //////////////////////// -typedef tuple DftParams; +enum OCL_FFT_TYPE +{ + R2R = 0, + C2R = 1, + R2C = 2, + C2C = 3 +}; + +typedef tuple DftParams; typedef TestBaseWithParam DftFixture; -OCL_PERF_TEST_P(DftFixture, Dft, ::testing::Combine(Values(OCL_SIZE_1, OCL_SIZE_2, OCL_SIZE_3), - Values((int)DFT_ROWS, (int)DFT_SCALE, (int)DFT_INVERSE, - (int)DFT_INVERSE | DFT_SCALE, (int)DFT_ROWS | DFT_INVERSE))) +OCL_PERF_TEST_P(DftFixture, Dft, ::testing::Combine(Values(C2C, R2R, C2R, R2C), + Values(OCL_SIZE_1, OCL_SIZE_2, OCL_SIZE_3, Size(512, 512), Size(1024, 1024), Size(2048, 2048)), + Values((int) 0, (int)DFT_ROWS, (int)DFT_SCALE, (int)DFT_INVERSE, + (int)DFT_INVERSE | DFT_SCALE, (int)DFT_ROWS | DFT_INVERSE))) { const DftParams params = GetParam(); - const Size srcSize = get<0>(params); - const int flags = get<1>(params); + const int dft_type = get<0>(params); + const Size srcSize = get<1>(params); + int flags = get<2>(params); - UMat src(srcSize, CV_32FC2), dst(srcSize, CV_32FC2); + int in_cn, out_cn; + switch (dft_type) + { + case R2R: flags |= cv::DFT_REAL_OUTPUT; in_cn = 1; out_cn = 1; break; + case C2R: flags |= cv::DFT_REAL_OUTPUT; in_cn = 2; out_cn = 2; break; + case R2C: flags |= cv::DFT_COMPLEX_OUTPUT; in_cn = 1; out_cn = 2; break; + case C2C: flags |= cv::DFT_COMPLEX_OUTPUT; in_cn = 2; out_cn = 2; break; + } + + UMat src(srcSize, CV_MAKE_TYPE(CV_32F, in_cn)), dst(srcSize, CV_MAKE_TYPE(CV_32F, out_cn)); declare.in(src, WARMUP_RNG).out(dst); - OCL_TEST_CYCLE() cv::dft(src, dst, flags | DFT_COMPLEX_OUTPUT); + OCL_TEST_CYCLE() cv::dft(src, dst, flags); - SANITY_CHECK(dst, 1e-3); + SANITY_CHECK(dst, 1e-5, ERROR_RELATIVE); } ///////////// MulSpectrums //////////////////////// diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index b98bf830e..29501a071 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -54,21 +54,23 @@ namespace cv struct NOP {}; -#if CV_SSE2 +#if CV_SSE2 || CV_NEON #define FUNCTOR_TEMPLATE(name) \ template struct name {} FUNCTOR_TEMPLATE(VLoadStore128); +#if CV_SSE2 FUNCTOR_TEMPLATE(VLoadStore64); FUNCTOR_TEMPLATE(VLoadStore128Aligned); +#endif #endif template void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, Size sz) { -#if CV_SSE2 +#if CV_SSE2 || CV_NEON VOp vop; #endif Op op; @@ -79,9 +81,11 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si { int x = 0; +#if CV_NEON || CV_SSE2 #if CV_SSE2 if( USE_SSE2 ) { +#endif for( ; x <= sz.width - 32/(int)sizeof(T); x += 32/sizeof(T) ) { typename VLoadStore128::reg_type r0 = VLoadStore128::load(src1 + x ); @@ -91,8 +95,10 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si VLoadStore128::store(dst + x , r0); VLoadStore128::store(dst + x + 16/sizeof(T), r1); } +#if CV_SSE2 } #endif +#endif #if CV_SSE2 if( USE_SSE2 ) { @@ -125,7 +131,7 @@ template void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, Size sz) { -#if CV_SSE2 +#if CV_SSE2 || CV_NEON Op32 op32; #endif Op op; @@ -153,9 +159,11 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, } } #endif +#if CV_NEON || CV_SSE2 #if CV_SSE2 if( USE_SSE2 ) { +#endif for( ; x <= sz.width - 8; x += 8 ) { typename VLoadStore128::reg_type r0 = VLoadStore128::load(src1 + x ); @@ -165,8 +173,10 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, VLoadStore128::store(dst + x , r0); VLoadStore128::store(dst + x + 4, r1); } +#if CV_SSE2 } #endif +#endif #if CV_ENABLE_UNROLLED for( ; x <= sz.width - 4; x += 4 ) { @@ -383,7 +393,98 @@ FUNCTOR_TEMPLATE(VNot); FUNCTOR_CLOSURE_1arg(VNot, uchar, return _mm_xor_si128(_mm_set1_epi32(-1), a)); #endif -#if CV_SSE2 +#if CV_NEON + +#define FUNCTOR_LOADSTORE(name, template_arg, register_type, load_body, store_body)\ + template <> \ + struct name{ \ + typedef register_type reg_type; \ + static reg_type load(const template_arg * p) { return load_body (p);}; \ + static void store(template_arg * p, reg_type v) { store_body (p, v);}; \ + } + +#define FUNCTOR_CLOSURE_2arg(name, template_arg, body)\ + template<> \ + struct name \ + { \ + VLoadStore128::reg_type operator()( \ + VLoadStore128::reg_type a, \ + VLoadStore128::reg_type b) const \ + { \ + return body; \ + }; \ + } + +#define FUNCTOR_CLOSURE_1arg(name, template_arg, body)\ + template<> \ + struct name \ + { \ + VLoadStore128::reg_type operator()( \ + VLoadStore128::reg_type a, \ + VLoadStore128::reg_type ) const \ + { \ + return body; \ + }; \ + } + +FUNCTOR_LOADSTORE(VLoadStore128, uchar, uint8x16_t, vld1q_u8 , vst1q_u8 ); +FUNCTOR_LOADSTORE(VLoadStore128, schar, int8x16_t, vld1q_s8 , vst1q_s8 ); +FUNCTOR_LOADSTORE(VLoadStore128, ushort, uint16x8_t, vld1q_u16, vst1q_u16); +FUNCTOR_LOADSTORE(VLoadStore128, short, int16x8_t, vld1q_s16, vst1q_s16); +FUNCTOR_LOADSTORE(VLoadStore128, int, int32x4_t, vld1q_s32, vst1q_s32); +FUNCTOR_LOADSTORE(VLoadStore128, float, float32x4_t, vld1q_f32, vst1q_f32); + +FUNCTOR_TEMPLATE(VAdd); +FUNCTOR_CLOSURE_2arg(VAdd, uchar, vqaddq_u8 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, schar, vqaddq_s8 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, ushort, vqaddq_u16(a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, short, vqaddq_s16(a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, int, vaddq_s32 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, float, vaddq_f32 (a, b)); + +FUNCTOR_TEMPLATE(VSub); +FUNCTOR_CLOSURE_2arg(VSub, uchar, vqsubq_u8 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, schar, vqsubq_s8 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, ushort, vqsubq_u16(a, b)); +FUNCTOR_CLOSURE_2arg(VSub, short, vqsubq_s16(a, b)); +FUNCTOR_CLOSURE_2arg(VSub, int, vsubq_s32 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, float, vsubq_f32 (a, b)); + +FUNCTOR_TEMPLATE(VMin); +FUNCTOR_CLOSURE_2arg(VMin, uchar, vminq_u8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, schar, vminq_s8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, ushort, vminq_u16(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, short, vminq_s16(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, int, vminq_s32(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, float, vminq_f32(a, b)); + +FUNCTOR_TEMPLATE(VMax); +FUNCTOR_CLOSURE_2arg(VMax, uchar, vmaxq_u8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, schar, vmaxq_s8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, ushort, vmaxq_u16(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, short, vmaxq_s16(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, int, vmaxq_s32(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, float, vmaxq_f32(a, b)); + +FUNCTOR_TEMPLATE(VAbsDiff); +FUNCTOR_CLOSURE_2arg(VAbsDiff, uchar, vabdq_u8 (a, b)); +FUNCTOR_CLOSURE_2arg(VAbsDiff, schar, vqabsq_s8 (vqsubq_s8(a, b))); +FUNCTOR_CLOSURE_2arg(VAbsDiff, ushort, vabdq_u16 (a, b)); +FUNCTOR_CLOSURE_2arg(VAbsDiff, short, vqabsq_s16(vqsubq_s16(a, b))); +FUNCTOR_CLOSURE_2arg(VAbsDiff, int, vabdq_s32 (a, b)); +FUNCTOR_CLOSURE_2arg(VAbsDiff, float, vabdq_f32 (a, b)); + +FUNCTOR_TEMPLATE(VAnd); +FUNCTOR_CLOSURE_2arg(VAnd, uchar, vandq_u8(a, b)); +FUNCTOR_TEMPLATE(VOr); +FUNCTOR_CLOSURE_2arg(VOr , uchar, vorrq_u8(a, b)); +FUNCTOR_TEMPLATE(VXor); +FUNCTOR_CLOSURE_2arg(VXor, uchar, veorq_u8(a, b)); +FUNCTOR_TEMPLATE(VNot); +FUNCTOR_CLOSURE_1arg(VNot, uchar, vmvnq_u8(a )); +#endif + +#if CV_SSE2 || CV_NEON #define IF_SIMD(op) op #else #define IF_SIMD(op) NOP diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp index d3327940d..bbe0f7400 100644 --- a/modules/core/src/dxt.cpp +++ b/modules/core/src/dxt.cpp @@ -43,6 +43,7 @@ #include "opencv2/core/opencl/runtime/opencl_clamdfft.hpp" #include "opencv2/core/opencl/runtime/opencl_core.hpp" #include "opencl_kernels.hpp" +#include namespace cv { @@ -1781,6 +1782,375 @@ static bool ippi_DFT_R_32F(const Mat& src, Mat& dst, bool inv, int norm_flag) #endif } +#ifdef HAVE_OPENCL + +namespace cv +{ + +enum FftType +{ + R2R = 0, // real to CCS in case forward transform, CCS to real otherwise + C2R = 1, // complex to real in case inverse transform + R2C = 2, // real to complex in case forward transform + C2C = 3 // complex to complex +}; + +struct OCL_FftPlan +{ +private: + UMat twiddles; + String buildOptions; + int thread_count; + bool status; + int dft_size; + +public: + OCL_FftPlan(int _size): dft_size(_size), status(true) + { + int min_radix; + std::vector radixes, blocks; + ocl_getRadixes(dft_size, radixes, blocks, min_radix); + thread_count = dft_size / min_radix; + + if (thread_count > (int) ocl::Device::getDefault().maxWorkGroupSize()) + { + status = false; + return; + } + + // generate string with radix calls + String radix_processing; + int n = 1, twiddle_size = 0; + for (size_t i=0; i 1) + radix_processing += format("fft_radix%d_B%d(smem,twiddles+%d,ind,%d,%d);", radix, block, twiddle_size, n, dft_size/radix); + else + radix_processing += format("fft_radix%d(smem,twiddles+%d,ind,%d,%d);", radix, twiddle_size, n, dft_size/radix); + twiddle_size += (radix-1)*n; + n *= radix; + } + + Mat tw(1, twiddle_size, CV_32FC2); + float* ptr = tw.ptr(); + int ptr_index = 0; + + n = 1; + for (size_t i=0; i& radixes, std::vector& blocks, int& min_radix) + { + int factors[34]; + int nf = DFTFactorize(cols, factors); + + int n = 1; + int factor_index = 0; + min_radix = INT_MAX; + + // 2^n transforms + if ((factors[factor_index] & 1) == 0) + { + for( ; n < factors[factor_index];) + { + int radix = 2, block = 1; + if (8*n <= factors[0]) + radix = 8; + else if (4*n <= factors[0]) + { + radix = 4; + if (cols % 12 == 0) + block = 3; + else if (cols % 8 == 0) + block = 2; + } + else + { + if (cols % 10 == 0) + block = 5; + else if (cols % 8 == 0) + block = 4; + else if (cols % 6 == 0) + block = 3; + else if (cols % 4 == 0) + block = 2; + } + + radixes.push_back(radix); + blocks.push_back(block); + min_radix = min(min_radix, block*radix); + n *= radix; + } + factor_index++; + } + + // all the other transforms + for( ; factor_index < nf; factor_index++) + { + int radix = factors[factor_index], block = 1; + if (radix == 3) + { + if (cols % 12 == 0) + block = 4; + else if (cols % 9 == 0) + block = 3; + else if (cols % 6 == 0) + block = 2; + } + else if (radix == 5) + { + if (cols % 10 == 0) + block = 2; + } + radixes.push_back(radix); + blocks.push_back(block); + min_radix = min(min_radix, block*radix); + } + } +}; + +class OCL_FftPlanCache +{ +public: + static OCL_FftPlanCache & getInstance() + { + static OCL_FftPlanCache planCache; + return planCache; + } + + Ptr getFftPlan(int dft_size) + { + std::map >::iterator f = planStorage.find(dft_size); + if (f != planStorage.end()) + { + return f->second; + } + else + { + Ptr newPlan = Ptr(new OCL_FftPlan(dft_size)); + planStorage[dft_size] = newPlan; + return newPlan; + } + } + + ~OCL_FftPlanCache() + { + planStorage.clear(); + } + +protected: + OCL_FftPlanCache() : + planStorage() + { + } + std::map > planStorage; +}; + +static bool ocl_dft_rows(InputArray _src, OutputArray _dst, int nonzero_rows, int flags, int fftType) +{ + Ptr plan = OCL_FftPlanCache::getInstance().getFftPlan(_src.cols()); + return plan->enqueueTransform(_src, _dst, nonzero_rows, flags, fftType, true); +} + +static bool ocl_dft_cols(InputArray _src, OutputArray _dst, int nonzero_cols, int flags, int fftType) +{ + Ptr plan = OCL_FftPlanCache::getInstance().getFftPlan(_src.rows()); + return plan->enqueueTransform(_src, _dst, nonzero_cols, flags, fftType, false); +} + +static bool ocl_dft(InputArray _src, OutputArray _dst, int flags, int nonzero_rows) +{ + int type = _src.type(), cn = CV_MAT_CN(type); + Size ssize = _src.size(); + if ( !(type == CV_32FC1 || type == CV_32FC2) ) + return false; + + // if is not a multiplication of prime numbers { 2, 3, 5 } + if (ssize.area() != getOptimalDFTSize(ssize.area())) + return false; + + UMat src = _src.getUMat(); + int complex_input = cn == 2 ? 1 : 0; + int complex_output = (flags & DFT_COMPLEX_OUTPUT) != 0; + int real_input = cn == 1 ? 1 : 0; + int real_output = (flags & DFT_REAL_OUTPUT) != 0; + bool inv = (flags & DFT_INVERSE) != 0 ? 1 : 0; + + if( nonzero_rows <= 0 || nonzero_rows > _src.rows() ) + nonzero_rows = _src.rows(); + bool is1d = (flags & DFT_ROWS) != 0 || nonzero_rows == 1; + + // if output format is not specified + if (complex_output + real_output == 0) + { + if (real_input) + real_output = 1; + else + complex_output = 1; + } + + FftType fftType = (FftType)(complex_input << 0 | complex_output << 1); + + // Forward Complex to CCS not supported + if (fftType == C2R && !inv) + fftType = C2C; + + // Inverse CCS to Complex not supported + if (fftType == R2C && inv) + fftType = R2R; + + UMat output; + if (fftType == C2C || fftType == R2C) + { + // complex output + _dst.create(src.size(), CV_32FC2); + output = _dst.getUMat(); + } + else + { + // real output + if (is1d) + { + _dst.create(src.size(), CV_32FC1); + output = _dst.getUMat(); + } + else + { + _dst.create(src.size(), CV_32FC1); + output.create(src.size(), CV_32FC2); + } + } + + if (!inv) + { + if (!ocl_dft_rows(src, output, nonzero_rows, flags, fftType)) + return false; + + if (!is1d) + { + int nonzero_cols = fftType == R2R ? output.cols/2 + 1 : output.cols; + if (!ocl_dft_cols(output, _dst, nonzero_cols, flags, fftType)) + return false; + } + } + else + { + if (fftType == C2C) + { + // complex output + if (!ocl_dft_rows(src, output, nonzero_rows, flags, fftType)) + return false; + + if (!is1d) + { + if (!ocl_dft_cols(output, output, output.cols, flags, fftType)) + return false; + } + } + else + { + if (is1d) + { + if (!ocl_dft_rows(src, output, nonzero_rows, flags, fftType)) + return false; + } + else + { + int nonzero_cols = src.cols/2 + 1; + if (!ocl_dft_cols(src, output, nonzero_cols, flags, fftType)) + return false; + + if (!ocl_dft_rows(output, _dst, nonzero_rows, flags, fftType)) + return false; + } + } + } + return true; +} + +} // namespace cv; + +#endif + #ifdef HAVE_CLAMDFFT namespace cv { @@ -1791,14 +2161,6 @@ namespace cv { CV_Assert(s == CLFFT_SUCCESS); \ } -enum FftType -{ - R2R = 0, // real to real - C2R = 1, // opencl HERMITIAN_INTERLEAVED to real - R2C = 2, // real to opencl HERMITIAN_INTERLEAVED - C2C = 3 // complex to complex -}; - class PlanCache { struct FftPlan @@ -1923,7 +2285,7 @@ public: } // no baked plan is found, so let's create a new one - FftPlan * newPlan = new FftPlan(dft_size, src_step, dst_step, doubleFP, inplace, flags, fftType); + Ptr newPlan = Ptr(new FftPlan(dft_size, src_step, dst_step, doubleFP, inplace, flags, fftType)); planStorage.push_back(newPlan); return newPlan->plHandle; @@ -1931,8 +2293,6 @@ public: ~PlanCache() { - for (std::vector::iterator i = planStorage.begin(), end = planStorage.end(); i != end; ++i) - delete (*i); planStorage.clear(); } @@ -1942,7 +2302,7 @@ protected: { } - std::vector planStorage; + std::vector > planStorage; }; extern "C" { @@ -1960,7 +2320,7 @@ static void CL_CALLBACK oclCleanupCallback(cl_event e, cl_int, void *p) } -static bool ocl_dft(InputArray _src, OutputArray _dst, int flags) +static bool ocl_dft_amdfft(InputArray _src, OutputArray _dst, int flags) { int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); Size ssize = _src.size(); @@ -2019,7 +2379,6 @@ static bool ocl_dft(InputArray _src, OutputArray _dst, int flags) tmpBuffer.addref(); clSetEventCallback(e, CL_COMPLETE, oclCleanupCallback, tmpBuffer.u); - return true; } @@ -2034,7 +2393,12 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) #ifdef HAVE_CLAMDFFT CV_OCL_RUN(ocl::haveAmdFft() && ocl::Device::getDefault().type() != ocl::Device::TYPE_CPU && _dst.isUMat() && _src0.dims() <= 2 && nonzero_rows == 0, - ocl_dft(_src0, _dst, flags)) + ocl_dft_amdfft(_src0, _dst, flags)) +#endif + +#ifdef HAVE_OPENCL + CV_OCL_RUN(_dst.isUMat() && _src0.dims() <= 2, + ocl_dft(_src0, _dst, flags, nonzero_rows)) #endif static DFTFunc dft_tbl[6] = @@ -2046,10 +2410,8 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) (DFTFunc)RealDFT_64f, (DFTFunc)CCSIDFT_64f }; - AutoBuffer buf; void *spec = 0; - Mat src0 = _src0.getMat(), src = src0; int prev_len = 0, stage = 0; bool inv = (flags & DFT_INVERSE) != 0; diff --git a/modules/core/src/opencl/fft.cl b/modules/core/src/opencl/fft.cl new file mode 100644 index 000000000..1268c4d6e --- /dev/null +++ b/modules/core/src/opencl/fft.cl @@ -0,0 +1,864 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +// Copyright (C) 2014, Itseez, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. + +#define SQRT_2 0.707106781188f +#define sin_120 0.866025403784f +#define fft5_2 0.559016994374f +#define fft5_3 -0.951056516295f +#define fft5_4 -1.538841768587f +#define fft5_5 0.363271264002f + +__attribute__((always_inline)) +float2 mul_float2(float2 a, float2 b) { + return (float2)(fma(a.x, b.x, -a.y * b.y), fma(a.x, b.y, a.y * b.x)); +} + +__attribute__((always_inline)) +float2 twiddle(float2 a) { + return (float2)(a.y, -a.x); +} + +__attribute__((always_inline)) +void butterfly2(float2 a0, float2 a1, __local float2* smem, __global const float2* twiddles, + const int x, const int block_size) +{ + const int k = x & (block_size - 1); + a1 = mul_float2(twiddles[k], a1); + const int dst_ind = (x << 1) - k; + + smem[dst_ind] = a0 + a1; + smem[dst_ind+block_size] = a0 - a1; +} + +__attribute__((always_inline)) +void butterfly4(float2 a0, float2 a1, float2 a2, float2 a3, __local float2* smem, __global const float2* twiddles, + const int x, const int block_size) +{ + const int k = x & (block_size - 1); + a1 = mul_float2(twiddles[k], a1); + a2 = mul_float2(twiddles[k + block_size], a2); + a3 = mul_float2(twiddles[k + 2*block_size], a3); + + const int dst_ind = ((x - k) << 2) + k; + + float2 b0 = a0 + a2; + a2 = a0 - a2; + float2 b1 = a1 + a3; + a3 = twiddle(a1 - a3); + + smem[dst_ind] = b0 + b1; + smem[dst_ind + block_size] = a2 + a3; + smem[dst_ind + 2*block_size] = b0 - b1; + smem[dst_ind + 3*block_size] = a2 - a3; +} + +__attribute__((always_inline)) +void butterfly3(float2 a0, float2 a1, float2 a2, __local float2* smem, __global const float2* twiddles, + const int x, const int block_size) +{ + const int k = x % block_size; + a1 = mul_float2(twiddles[k], a1); + a2 = mul_float2(twiddles[k+block_size], a2); + const int dst_ind = ((x - k) * 3) + k; + + float2 b1 = a1 + a2; + a2 = twiddle(sin_120*(a1 - a2)); + float2 b0 = a0 - (float2)(0.5f)*b1; + + smem[dst_ind] = a0 + b1; + smem[dst_ind + block_size] = b0 + a2; + smem[dst_ind + 2*block_size] = b0 - a2; +} + +__attribute__((always_inline)) +void butterfly5(float2 a0, float2 a1, float2 a2, float2 a3, float2 a4, __local float2* smem, __global const float2* twiddles, + const int x, const int block_size) +{ + const int k = x % block_size; + a1 = mul_float2(twiddles[k], a1); + a2 = mul_float2(twiddles[k + block_size], a2); + a3 = mul_float2(twiddles[k+2*block_size], a3); + a4 = mul_float2(twiddles[k+3*block_size], a4); + + const int dst_ind = ((x - k) * 5) + k; + __local float2* dst = smem + dst_ind; + + float2 b0, b1, b5; + + b1 = a1 + a4; + a1 -= a4; + + a4 = a3 + a2; + a3 -= a2; + + a2 = b1 + a4; + b0 = a0 - (float2)0.25f * a2; + + b1 = fft5_2 * (b1 - a4); + a4 = fft5_3 * (float2)(-a1.y - a3.y, a1.x + a3.x); + b5 = (float2)(a4.x - fft5_5 * a1.y, a4.y + fft5_5 * a1.x); + + a4.x += fft5_4 * a3.y; + a4.y -= fft5_4 * a3.x; + + a1 = b0 + b1; + b0 -= b1; + + dst[0] = a0 + a2; + dst[block_size] = a1 + a4; + dst[2 * block_size] = b0 + b5; + dst[3 * block_size] = b0 - b5; + dst[4 * block_size] = a1 - a4; +} + +__attribute__((always_inline)) +void fft_radix2(__local float2* smem, __global const float2* twiddles, const int x, const int block_size, const int t) +{ + float2 a0, a1; + + if (x < t) + { + a0 = smem[x]; + a1 = smem[x+t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x < t) + butterfly2(a0, a1, smem, twiddles, x, block_size); + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix2_B2(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int x2 = x1 + t/2; + float2 a0, a1, a2, a3; + + if (x1 < t/2) + { + a0 = smem[x1]; a1 = smem[x1+t]; + a2 = smem[x2]; a3 = smem[x2+t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/2) + { + butterfly2(a0, a1, smem, twiddles, x1, block_size); + butterfly2(a2, a3, smem, twiddles, x2, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix2_B3(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int x2 = x1 + t/3; + const int x3 = x1 + 2*t/3; + float2 a0, a1, a2, a3, a4, a5; + + if (x1 < t/3) + { + a0 = smem[x1]; a1 = smem[x1+t]; + a2 = smem[x2]; a3 = smem[x2+t]; + a4 = smem[x3]; a5 = smem[x3+t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/3) + { + butterfly2(a0, a1, smem, twiddles, x1, block_size); + butterfly2(a2, a3, smem, twiddles, x2, block_size); + butterfly2(a4, a5, smem, twiddles, x3, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix2_B4(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int thread_block = t/4; + const int x2 = x1 + thread_block; + const int x3 = x1 + 2*thread_block; + const int x4 = x1 + 3*thread_block; + float2 a0, a1, a2, a3, a4, a5, a6, a7; + + if (x1 < t/4) + { + a0 = smem[x1]; a1 = smem[x1+t]; + a2 = smem[x2]; a3 = smem[x2+t]; + a4 = smem[x3]; a5 = smem[x3+t]; + a6 = smem[x4]; a7 = smem[x4+t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/4) + { + butterfly2(a0, a1, smem, twiddles, x1, block_size); + butterfly2(a2, a3, smem, twiddles, x2, block_size); + butterfly2(a4, a5, smem, twiddles, x3, block_size); + butterfly2(a6, a7, smem, twiddles, x4, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix2_B5(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int thread_block = t/5; + const int x2 = x1 + thread_block; + const int x3 = x1 + 2*thread_block; + const int x4 = x1 + 3*thread_block; + const int x5 = x1 + 4*thread_block; + float2 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9; + + if (x1 < t/5) + { + a0 = smem[x1]; a1 = smem[x1+t]; + a2 = smem[x2]; a3 = smem[x2+t]; + a4 = smem[x3]; a5 = smem[x3+t]; + a6 = smem[x4]; a7 = smem[x4+t]; + a8 = smem[x5]; a9 = smem[x5+t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/5) + { + butterfly2(a0, a1, smem, twiddles, x1, block_size); + butterfly2(a2, a3, smem, twiddles, x2, block_size); + butterfly2(a4, a5, smem, twiddles, x3, block_size); + butterfly2(a6, a7, smem, twiddles, x4, block_size); + butterfly2(a8, a9, smem, twiddles, x5, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix4(__local float2* smem, __global const float2* twiddles, const int x, const int block_size, const int t) +{ + float2 a0, a1, a2, a3; + + if (x < t) + { + a0 = smem[x]; a1 = smem[x+t]; a2 = smem[x+2*t]; a3 = smem[x+3*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x < t) + butterfly4(a0, a1, a2, a3, smem, twiddles, x, block_size); + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix4_B2(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int x2 = x1 + t/2; + float2 a0, a1, a2, a3, a4, a5, a6, a7; + + if (x1 < t/2) + { + a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t]; a3 = smem[x1+3*t]; + a4 = smem[x2]; a5 = smem[x2+t]; a6 = smem[x2+2*t]; a7 = smem[x2+3*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/2) + { + butterfly4(a0, a1, a2, a3, smem, twiddles, x1, block_size); + butterfly4(a4, a5, a6, a7, smem, twiddles, x2, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix4_B3(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int x2 = x1 + t/3; + const int x3 = x2 + t/3; + float2 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11; + + if (x1 < t/3) + { + a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t]; a3 = smem[x1+3*t]; + a4 = smem[x2]; a5 = smem[x2+t]; a6 = smem[x2+2*t]; a7 = smem[x2+3*t]; + a8 = smem[x3]; a9 = smem[x3+t]; a10 = smem[x3+2*t]; a11 = smem[x3+3*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/3) + { + butterfly4(a0, a1, a2, a3, smem, twiddles, x1, block_size); + butterfly4(a4, a5, a6, a7, smem, twiddles, x2, block_size); + butterfly4(a8, a9, a10, a11, smem, twiddles, x3, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix8(__local float2* smem, __global const float2* twiddles, const int x, const int block_size, const int t) +{ + const int k = x % block_size; + float2 a0, a1, a2, a3, a4, a5, a6, a7; + + if (x < t) + { + int tw_ind = block_size / 8; + + a0 = smem[x]; + a1 = mul_float2(twiddles[k], smem[x + t]); + a2 = mul_float2(twiddles[k + block_size],smem[x+2*t]); + a3 = mul_float2(twiddles[k+2*block_size],smem[x+3*t]); + a4 = mul_float2(twiddles[k+3*block_size],smem[x+4*t]); + a5 = mul_float2(twiddles[k+4*block_size],smem[x+5*t]); + a6 = mul_float2(twiddles[k+5*block_size],smem[x+6*t]); + a7 = mul_float2(twiddles[k+6*block_size],smem[x+7*t]); + + float2 b0, b1, b6, b7; + + b0 = a0 + a4; + a4 = a0 - a4; + b1 = a1 + a5; + a5 = a1 - a5; + a5 = (float2)(SQRT_2) * (float2)(a5.x + a5.y, -a5.x + a5.y); + b6 = twiddle(a2 - a6); + a2 = a2 + a6; + b7 = a3 - a7; + b7 = (float2)(SQRT_2) * (float2)(-b7.x + b7.y, -b7.x - b7.y); + a3 = a3 + a7; + + a0 = b0 + a2; + a2 = b0 - a2; + a1 = b1 + a3; + a3 = twiddle(b1 - a3); + a6 = a4 - b6; + a4 = a4 + b6; + a7 = twiddle(a5 - b7); + a5 = a5 + b7; + + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x < t) + { + const int dst_ind = ((x - k) << 3) + k; + __local float2* dst = smem + dst_ind; + + dst[0] = a0 + a1; + dst[block_size] = a4 + a5; + dst[2 * block_size] = a2 + a3; + dst[3 * block_size] = a6 + a7; + dst[4 * block_size] = a0 - a1; + dst[5 * block_size] = a4 - a5; + dst[6 * block_size] = a2 - a3; + dst[7 * block_size] = a6 - a7; + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix3(__local float2* smem, __global const float2* twiddles, const int x, const int block_size, const int t) +{ + float2 a0, a1, a2; + + if (x < t) + { + a0 = smem[x]; a1 = smem[x+t]; a2 = smem[x+2*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x < t) + butterfly3(a0, a1, a2, smem, twiddles, x, block_size); + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix3_B2(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int x2 = x1 + t/2; + float2 a0, a1, a2, a3, a4, a5; + + if (x1 < t/2) + { + a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t]; + a3 = smem[x2]; a4 = smem[x2+t]; a5 = smem[x2+2*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/2) + { + butterfly3(a0, a1, a2, smem, twiddles, x1, block_size); + butterfly3(a3, a4, a5, smem, twiddles, x2, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix3_B3(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int x2 = x1 + t/3; + const int x3 = x2 + t/3; + float2 a0, a1, a2, a3, a4, a5, a6, a7, a8; + + if (x1 < t/2) + { + a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t]; + a3 = smem[x2]; a4 = smem[x2+t]; a5 = smem[x2+2*t]; + a6 = smem[x3]; a7 = smem[x3+t]; a8 = smem[x3+2*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/2) + { + butterfly3(a0, a1, a2, smem, twiddles, x1, block_size); + butterfly3(a3, a4, a5, smem, twiddles, x2, block_size); + butterfly3(a6, a7, a8, smem, twiddles, x3, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix3_B4(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int thread_block = t/4; + const int x2 = x1 + thread_block; + const int x3 = x1 + 2*thread_block; + const int x4 = x1 + 3*thread_block; + float2 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11; + + if (x1 < t/4) + { + a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t]; + a3 = smem[x2]; a4 = smem[x2+t]; a5 = smem[x2+2*t]; + a6 = smem[x3]; a7 = smem[x3+t]; a8 = smem[x3+2*t]; + a9 = smem[x4]; a10 = smem[x4+t]; a11 = smem[x4+2*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/4) + { + butterfly3(a0, a1, a2, smem, twiddles, x1, block_size); + butterfly3(a3, a4, a5, smem, twiddles, x2, block_size); + butterfly3(a6, a7, a8, smem, twiddles, x3, block_size); + butterfly3(a9, a10, a11, smem, twiddles, x4, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix5(__local float2* smem, __global const float2* twiddles, const int x, const int block_size, const int t) +{ + const int k = x % block_size; + float2 a0, a1, a2, a3, a4; + + if (x < t) + { + a0 = smem[x]; a1 = smem[x + t]; a2 = smem[x+2*t]; a3 = smem[x+3*t]; a4 = smem[x+4*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x < t) + butterfly5(a0, a1, a2, a3, a4, smem, twiddles, x, block_size); + + barrier(CLK_LOCAL_MEM_FENCE); +} + +__attribute__((always_inline)) +void fft_radix5_B2(__local float2* smem, __global const float2* twiddles, const int x1, const int block_size, const int t) +{ + const int x2 = x1+t/2; + float2 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9; + + if (x1 < t/2) + { + a0 = smem[x1]; a1 = smem[x1 + t]; a2 = smem[x1+2*t]; a3 = smem[x1+3*t]; a4 = smem[x1+4*t]; + a5 = smem[x2]; a6 = smem[x2 + t]; a7 = smem[x2+2*t]; a8 = smem[x2+3*t]; a9 = smem[x2+4*t]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (x1 < t/2) + { + butterfly5(a0, a1, a2, a3, a4, smem, twiddles, x1, block_size); + butterfly5(a5, a6, a7, a8, a9, smem, twiddles, x2, block_size); + } + + barrier(CLK_LOCAL_MEM_FENCE); +} + +#ifdef DFT_SCALE +#define SCALE_VAL(x, scale) x*scale +#else +#define SCALE_VAL(x, scale) x +#endif + +__kernel void fft_multi_radix_rows(__global const uchar* src_ptr, int src_step, int src_offset, int src_rows, int src_cols, + __global uchar* dst_ptr, int dst_step, int dst_offset, int dst_rows, int dst_cols, + __global float2* twiddles_ptr, const int t, const int nz) +{ + const int x = get_global_id(0); + const int y = get_group_id(1); + const int block_size = LOCAL_SIZE/kercn; + if (y < nz) + { + __local float2 smem[LOCAL_SIZE]; + __global const float2* twiddles = (__global float2*) twiddles_ptr; + const int ind = x; +#ifdef IS_1D + float scale = 1.f/dst_cols; +#else + float scale = 1.f/(dst_cols*dst_rows); +#endif + +#ifdef COMPLEX_INPUT + __global const float2* src = (__global const float2*)(src_ptr + mad24(y, src_step, mad24(x, (int)(sizeof(float)*2), src_offset))); + #pragma unroll + for (int i=0; i 4 ) return false; @@ -513,7 +514,7 @@ static bool ocl_sum( InputArray _src, Scalar & res, int sum_op, InputArray _mask haveMask && _mask.isContinuous() ? " -D HAVE_MASK_CONT" : "", kercn, haveSrc2 ? " -D HAVE_SRC2" : "", calc2 ? " -D OP_CALC2" : "", haveSrc2 && _src2.isContinuous() ? " -D HAVE_SRC2_CONT" : "", - depth <= CV_32S && ddepth == CV_32S ? ocl::convertTypeStr(CV_8U, ddepth, mcn, cvt[1]) : "noconvert"); + depth <= CV_32S && ddepth == CV_32S ? ocl::convertTypeStr(CV_8U, ddepth, convert_cn, cvt[1]) : "noconvert"); ocl::Kernel k("reduce", ocl::core::reduce_oclsrc, opts); if (k.empty()) @@ -1451,6 +1452,9 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* CV_Assert(!haveSrc2 || _src2.type() == type); + if (depth == CV_32S || depth == CV_32F) + return false; + if ((depth == CV_64F || ddepth == CV_64F) && !doubleSupport) return false; @@ -2184,6 +2188,9 @@ static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double & (!doubleSupport && depth == CV_64F)) return false; + if( depth == CV_32F && (!_mask.empty() || normType == NORM_INF) ) + return false; + UMat src = _src.getUMat(); if (normType == NORM_INF) @@ -2539,7 +2546,7 @@ static bool ocl_norm( InputArray _src1, InputArray _src2, int normType, InputArr normType &= ~NORM_RELATIVE; bool normsum = normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR; - if ( !(normType == NORM_INF || normsum) ) + if ( !normsum || !_mask.empty() ) return false; if (normsum) diff --git a/modules/core/test/ocl/test_dft.cpp b/modules/core/test/ocl/test_dft.cpp index 1f0e43b20..cd0c1f07d 100644 --- a/modules/core/test/ocl/test_dft.cpp +++ b/modules/core/test/ocl/test_dft.cpp @@ -48,17 +48,26 @@ #ifdef HAVE_OPENCL +enum OCL_FFT_TYPE +{ + R2R = 0, + C2R = 1, + R2C = 2, + C2C = 3 +}; + namespace cvtest { namespace ocl { //////////////////////////////////////////////////////////////////////////// // Dft -PARAM_TEST_CASE(Dft, cv::Size, MatDepth, bool, bool, bool, bool) +PARAM_TEST_CASE(Dft, cv::Size, OCL_FFT_TYPE, bool, bool, bool, bool) { cv::Size dft_size; - int dft_flags, depth; - bool inplace; + int dft_flags, depth, cn, dft_type; + bool hint; + bool is1d; TEST_DECLARE_INPUT_PARAMETER(src); TEST_DECLARE_OUTPUT_PARAMETER(dst); @@ -66,34 +75,50 @@ PARAM_TEST_CASE(Dft, cv::Size, MatDepth, bool, bool, bool, bool) virtual void SetUp() { dft_size = GET_PARAM(0); - depth = GET_PARAM(1); - inplace = GET_PARAM(2); + dft_type = GET_PARAM(1); + depth = CV_32F; dft_flags = 0; + switch (dft_type) + { + case R2R: dft_flags |= cv::DFT_REAL_OUTPUT; cn = 1; break; + case C2R: dft_flags |= cv::DFT_REAL_OUTPUT; cn = 2; break; + case R2C: dft_flags |= cv::DFT_COMPLEX_OUTPUT; cn = 1; break; + case C2C: dft_flags |= cv::DFT_COMPLEX_OUTPUT; cn = 2; break; + } + + if (GET_PARAM(2)) + dft_flags |= cv::DFT_INVERSE; if (GET_PARAM(3)) dft_flags |= cv::DFT_ROWS; if (GET_PARAM(4)) dft_flags |= cv::DFT_SCALE; - if (GET_PARAM(5)) - dft_flags |= cv::DFT_INVERSE; + hint = GET_PARAM(5); + is1d = (dft_flags & DFT_ROWS) != 0 || dft_size.height == 1; } - void generateTestData(int cn = 2) + void generateTestData() { src = randomMat(dft_size, CV_MAKE_TYPE(depth, cn), 0.0, 100.0); usrc = src.getUMat(ACCESS_READ); - - if (inplace) - dst = src, udst = usrc; } }; -OCL_TEST_P(Dft, C2C) +OCL_TEST_P(Dft, Mat) { generateTestData(); - OCL_OFF(cv::dft(src, dst, dft_flags | cv::DFT_COMPLEX_OUTPUT)); - OCL_ON(cv::dft(usrc, udst, dft_flags | cv::DFT_COMPLEX_OUTPUT)); + int nonzero_rows = hint ? src.cols - randomInt(1, src.rows-1) : 0; + OCL_OFF(cv::dft(src, dst, dft_flags, nonzero_rows)); + OCL_ON(cv::dft(usrc, udst, dft_flags, nonzero_rows)); + + // In case forward R2C 1d tranform dst contains only half of output + // without complex conjugate + if (dft_type == R2C && is1d && (dft_flags & cv::DFT_INVERSE) == 0) + { + dst = dst(cv::Range(0, dst.rows), cv::Range(0, dst.cols/2 + 1)); + udst = udst(cv::Range(0, udst.rows), cv::Range(0, udst.cols/2 + 1)); + } double eps = src.size().area() * 1e-4; EXPECT_MAT_NEAR(dst, udst, eps); @@ -150,15 +175,15 @@ OCL_TEST_P(MulSpectrums, Mat) OCL_INSTANTIATE_TEST_CASE_P(OCL_ImgProc, MulSpectrums, testing::Combine(Bool(), Bool())); -OCL_INSTANTIATE_TEST_CASE_P(Core, Dft, Combine(Values(cv::Size(2, 3), cv::Size(5, 4), cv::Size(25, 20), - cv::Size(512, 1), cv::Size(1024, 768)), - Values(CV_32F, CV_64F), - Bool(), // inplace +OCL_INSTANTIATE_TEST_CASE_P(Core, Dft, Combine(Values(cv::Size(10, 10), cv::Size(36, 36), cv::Size(512, 1), cv::Size(1280, 768)), + Values((OCL_FFT_TYPE) R2C, (OCL_FFT_TYPE) C2C, (OCL_FFT_TYPE) R2R, (OCL_FFT_TYPE) C2R), + Bool(), // DFT_INVERSE Bool(), // DFT_ROWS Bool(), // DFT_SCALE - Bool()) // DFT_INVERSE + Bool() // hint + ) ); } } // namespace cvtest::ocl -#endif // HAVE_OPENCL +#endif // HAVE_OPENCL \ No newline at end of file diff --git a/modules/cudabgsegm/perf/perf_bgsegm.cpp b/modules/cudabgsegm/perf/perf_bgsegm.cpp index 9d3da2927..02fc9a8ee 100644 --- a/modules/cudabgsegm/perf/perf_bgsegm.cpp +++ b/modules/cudabgsegm/perf/perf_bgsegm.cpp @@ -42,8 +42,8 @@ #include "perf_precomp.hpp" -#ifdef HAVE_OPENCV_LEGACY -# include "opencv2/legacy.hpp" +#ifdef HAVE_OPENCV_CUDALEGACY +# include "opencv2/cudalegacy.hpp" #endif #ifdef HAVE_OPENCV_CUDAIMGPROC @@ -72,7 +72,7 @@ using namespace perf; #if BUILD_WITH_VIDEO_INPUT_SUPPORT -#ifdef HAVE_OPENCV_LEGACY +#ifdef HAVE_OPENCV_CUDALEGACY namespace cv { @@ -150,7 +150,7 @@ PERF_TEST_P(Video, FGDStatModel, } else { -#ifdef HAVE_OPENCV_LEGACY +#ifdef HAVE_OPENCV_CUDALEGACY IplImage ipl_frame = frame; cv::Ptr model(cvCreateFGDStatModel(&ipl_frame)); diff --git a/modules/cudabgsegm/test/test_bgsegm.cpp b/modules/cudabgsegm/test/test_bgsegm.cpp index 75d6d73a3..34f3dcc9a 100644 --- a/modules/cudabgsegm/test/test_bgsegm.cpp +++ b/modules/cudabgsegm/test/test_bgsegm.cpp @@ -42,8 +42,8 @@ #include "test_precomp.hpp" -#ifdef HAVE_OPENCV_LEGACY -# include "opencv2/legacy.hpp" +#ifdef HAVE_OPENCV_CUDALEGACY +# include "opencv2/cudalegacy.hpp" #endif #ifdef HAVE_CUDA @@ -66,7 +66,7 @@ using namespace cvtest; ////////////////////////////////////////////////////// // FGDStatModel -#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined(HAVE_OPENCV_LEGACY) +#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined(HAVE_OPENCV_CUDALEGACY) namespace cv { diff --git a/modules/cudacodec/CMakeLists.txt b/modules/cudacodec/CMakeLists.txt index ca6299550..5d8f7327c 100644 --- a/modules/cudacodec/CMakeLists.txt +++ b/modules/cudacodec/CMakeLists.txt @@ -6,7 +6,7 @@ set(the_description "CUDA-accelerated Video Encoding/Decoding") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef) -ocv_add_module(cudacodec OPTIONAL opencv_cudev) +ocv_add_module(cudacodec opencv_core opencv_videoio OPTIONAL opencv_cudev) ocv_module_include_directories() ocv_glob_module_sources() diff --git a/modules/cudastereo/src/cuda/disparity_bilateral_filter.cu b/modules/cudastereo/src/cuda/disparity_bilateral_filter.cu index b5de989ae..a9f2d2650 100644 --- a/modules/cudastereo/src/cuda/disparity_bilateral_filter.cu +++ b/modules/cudastereo/src/cuda/disparity_bilateral_filter.cu @@ -45,34 +45,12 @@ #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/limits.hpp" +#include "cuda/disparity_bilateral_filter.hpp" + namespace cv { namespace cuda { namespace device { namespace disp_bilateral_filter { - __constant__ float* ctable_color; - __constant__ float* ctable_space; - __constant__ size_t ctable_space_step; - - __constant__ int cndisp; - __constant__ int cradius; - - __constant__ short cedge_disc; - __constant__ short cmax_disc; - - void disp_load_constants(float* table_color, PtrStepSzf table_space, int ndisp, int radius, short edge_disc, short max_disc) - { - cudaSafeCall( cudaMemcpyToSymbol(ctable_color, &table_color, sizeof(table_color)) ); - cudaSafeCall( cudaMemcpyToSymbol(ctable_space, &table_space.data, sizeof(table_space.data)) ); - size_t table_space_step = table_space.step / sizeof(float); - cudaSafeCall( cudaMemcpyToSymbol(ctable_space_step, &table_space_step, sizeof(size_t)) ); - - cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) ); - cudaSafeCall( cudaMemcpyToSymbol(cradius, &radius, sizeof(int)) ); - - cudaSafeCall( cudaMemcpyToSymbol(cedge_disc, &edge_disc, sizeof(short)) ); - cudaSafeCall( cudaMemcpyToSymbol(cmax_disc, &max_disc, sizeof(short)) ); - } - template struct DistRgbMax { @@ -95,7 +73,11 @@ namespace cv { namespace cuda { namespace device }; template - __global__ void disp_bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w) + __global__ void disp_bilateral_filter(int t, T* disp, size_t disp_step, + const uchar* img, size_t img_step, int h, int w, + const float* ctable_color, const float * ctable_space, size_t ctable_space_step, + int cradius, + short cedge_disc, short cmax_disc) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); @@ -178,7 +160,7 @@ namespace cv { namespace cuda { namespace device } template - void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream) + void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, const float *table_color, const float* table_space, size_t table_step, int radius, short edge_disc, short max_disc, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); @@ -190,20 +172,20 @@ namespace cv { namespace cuda { namespace device case 1: for (int i = 0; i < iters; ++i) { - disp_bilateral_filter<1><<>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + disp_bilateral_filter<1><<>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols, table_color, table_space, table_step, radius, edge_disc, max_disc); cudaSafeCall( cudaGetLastError() ); - disp_bilateral_filter<1><<>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + disp_bilateral_filter<1><<>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols, table_color, table_space, table_step, radius, edge_disc, max_disc); cudaSafeCall( cudaGetLastError() ); } break; case 3: for (int i = 0; i < iters; ++i) { - disp_bilateral_filter<3><<>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + disp_bilateral_filter<3><<>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols, table_color, table_space, table_step, radius, edge_disc, max_disc); cudaSafeCall( cudaGetLastError() ); - disp_bilateral_filter<3><<>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + disp_bilateral_filter<3><<>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols, table_color, table_space, table_step, radius, edge_disc, max_disc); cudaSafeCall( cudaGetLastError() ); } break; @@ -215,8 +197,8 @@ namespace cv { namespace cuda { namespace device cudaSafeCall( cudaDeviceSynchronize() ); } - template void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream); - template void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream); + template void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, const float *table_color, const float *table_space, size_t table_step, int radius, short, short, cudaStream_t stream); + template void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, const float *table_color, const float *table_space, size_t table_step, int radius, short, short, cudaStream_t stream); } // namespace bilateral_filter }}} // namespace cv { namespace cuda { namespace cudev diff --git a/modules/cudastereo/src/cuda/disparity_bilateral_filter.hpp b/modules/cudastereo/src/cuda/disparity_bilateral_filter.hpp new file mode 100644 index 000000000..95be83457 --- /dev/null +++ b/modules/cudastereo/src/cuda/disparity_bilateral_filter.hpp @@ -0,0 +1,8 @@ +namespace cv { namespace cuda { namespace device +{ + namespace disp_bilateral_filter + { + template + void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, const float *, const float *, size_t, int radius, short edge_disc, short max_disc, cudaStream_t stream); + } +}}} diff --git a/modules/cudastereo/src/cuda/stereocsbp.cu b/modules/cudastereo/src/cuda/stereocsbp.cu index b1426607d..dd535e8b2 100644 --- a/modules/cudastereo/src/cuda/stereocsbp.cu +++ b/modules/cudastereo/src/cuda/stereocsbp.cu @@ -48,109 +48,61 @@ #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/functional.hpp" +#include "cuda/stereocsbp.hpp" + namespace cv { namespace cuda { namespace device { namespace stereocsbp { - /////////////////////////////////////////////////////////////// - /////////////////////// load constants //////////////////////// - /////////////////////////////////////////////////////////////// - - __constant__ int cndisp; - - __constant__ float cmax_data_term; - __constant__ float cdata_weight; - __constant__ float cmax_disc_term; - __constant__ float cdisc_single_jump; - - __constant__ int cth; - - __constant__ size_t cimg_step; - __constant__ size_t cmsg_step; - __constant__ size_t cdisp_step1; - __constant__ size_t cdisp_step2; - - __constant__ uchar* cleft; - __constant__ uchar* cright; - __constant__ uchar* ctemp; - - - void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th, - const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& temp) - { - cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) ); - - cudaSafeCall( cudaMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) ); - cudaSafeCall( cudaMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) ); - cudaSafeCall( cudaMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) ); - cudaSafeCall( cudaMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) ); - - cudaSafeCall( cudaMemcpyToSymbol(cth, &min_disp_th, sizeof(int)) ); - - cudaSafeCall( cudaMemcpyToSymbol(cimg_step, &left.step, sizeof(size_t)) ); - - cudaSafeCall( cudaMemcpyToSymbol(cleft, &left.data, sizeof(left.data)) ); - cudaSafeCall( cudaMemcpyToSymbol(cright, &right.data, sizeof(right.data)) ); - cudaSafeCall( cudaMemcpyToSymbol(ctemp, &temp.data, sizeof(temp.data)) ); - } - /////////////////////////////////////////////////////////////// /////////////////////// init data cost //////////////////////// /////////////////////////////////////////////////////////////// - template struct DataCostPerPixel; - template <> struct DataCostPerPixel<1> + template static float __device__ pixeldiff(const uchar* left, const uchar* right, float max_data_term); + template<> __device__ __forceinline__ static float pixeldiff<1>(const uchar* left, const uchar* right, float max_data_term) { - static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) - { - return fmin(cdata_weight * ::abs((int)*left - *right), cdata_weight * cmax_data_term); - } - }; - template <> struct DataCostPerPixel<3> + return fmin( ::abs((int)*left - *right), max_data_term); + } + template<> __device__ __forceinline__ static float pixeldiff<3>(const uchar* left, const uchar* right, float max_data_term) { - static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) - { - float tb = 0.114f * ::abs((int)left[0] - right[0]); - float tg = 0.587f * ::abs((int)left[1] - right[1]); - float tr = 0.299f * ::abs((int)left[2] - right[2]); + float tb = 0.114f * ::abs((int)left[0] - right[0]); + float tg = 0.587f * ::abs((int)left[1] - right[1]); + float tr = 0.299f * ::abs((int)left[2] - right[2]); - return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term); - } - }; - template <> struct DataCostPerPixel<4> + return fmin(tr + tg + tb, max_data_term); + } + template<> __device__ __forceinline__ static float pixeldiff<4>(const uchar* left, const uchar* right, float max_data_term) { - static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) - { - uchar4 l = *((const uchar4*)left); - uchar4 r = *((const uchar4*)right); + uchar4 l = *((const uchar4*)left); + uchar4 r = *((const uchar4*)right); - float tb = 0.114f * ::abs((int)l.x - r.x); - float tg = 0.587f * ::abs((int)l.y - r.y); - float tr = 0.299f * ::abs((int)l.z - r.z); + float tb = 0.114f * ::abs((int)l.x - r.x); + float tg = 0.587f * ::abs((int)l.y - r.y); + float tr = 0.299f * ::abs((int)l.z - r.z); - return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term); - } - }; + return fmin(tr + tg + tb, max_data_term); + } template - __global__ void get_first_k_initial_global(T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane) + __global__ void get_first_k_initial_global(uchar *ctemp, T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane, int ndisp, + size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { - T* selected_disparity = selected_disp_pyr + y * cmsg_step + x; - T* data_cost_selected = data_cost_selected_ + y * cmsg_step + x; - T* data_cost = (T*)ctemp + y * cmsg_step + x; + T* selected_disparity = selected_disp_pyr + y * msg_step + x; + T* data_cost_selected = data_cost_selected_ + y * msg_step + x; + T* data_cost = (T*)ctemp + y * msg_step + x; for(int i = 0; i < nr_plane; i++) { T minimum = device::numeric_limits::max(); int id = 0; - for(int d = 0; d < cndisp; d++) + for(int d = 0; d < ndisp; d++) { - T cur = data_cost[d * cdisp_step1]; + T cur = data_cost[d * disp_step]; if(cur < minimum) { minimum = cur; @@ -158,46 +110,47 @@ namespace cv { namespace cuda { namespace device } } - data_cost_selected[i * cdisp_step1] = minimum; - selected_disparity[i * cdisp_step1] = id; - data_cost [id * cdisp_step1] = numeric_limits::max(); + data_cost_selected[i * disp_step] = minimum; + selected_disparity[i * disp_step] = id; + data_cost [id * disp_step] = numeric_limits::max(); } } } template - __global__ void get_first_k_initial_local(T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane) + __global__ void get_first_k_initial_local(uchar *ctemp, T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane, int ndisp, + size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { - T* selected_disparity = selected_disp_pyr + y * cmsg_step + x; - T* data_cost_selected = data_cost_selected_ + y * cmsg_step + x; - T* data_cost = (T*)ctemp + y * cmsg_step + x; + T* selected_disparity = selected_disp_pyr + y * msg_step + x; + T* data_cost_selected = data_cost_selected_ + y * msg_step + x; + T* data_cost = (T*)ctemp + y * msg_step + x; int nr_local_minimum = 0; - T prev = data_cost[0 * cdisp_step1]; - T cur = data_cost[1 * cdisp_step1]; - T next = data_cost[2 * cdisp_step1]; + T prev = data_cost[0 * disp_step]; + T cur = data_cost[1 * disp_step]; + T next = data_cost[2 * disp_step]; - for (int d = 1; d < cndisp - 1 && nr_local_minimum < nr_plane; d++) + for (int d = 1; d < ndisp - 1 && nr_local_minimum < nr_plane; d++) { if (cur < prev && cur < next) { - data_cost_selected[nr_local_minimum * cdisp_step1] = cur; - selected_disparity[nr_local_minimum * cdisp_step1] = d; + data_cost_selected[nr_local_minimum * disp_step] = cur; + selected_disparity[nr_local_minimum * disp_step] = d; - data_cost[d * cdisp_step1] = numeric_limits::max(); + data_cost[d * disp_step] = numeric_limits::max(); nr_local_minimum++; } prev = cur; cur = next; - next = data_cost[(d + 1) * cdisp_step1]; + next = data_cost[(d + 1) * disp_step]; } for (int i = nr_local_minimum; i < nr_plane; i++) @@ -205,25 +158,27 @@ namespace cv { namespace cuda { namespace device T minimum = numeric_limits::max(); int id = 0; - for (int d = 0; d < cndisp; d++) + for (int d = 0; d < ndisp; d++) { - cur = data_cost[d * cdisp_step1]; + cur = data_cost[d * disp_step]; if (cur < minimum) { minimum = cur; id = d; } } - data_cost_selected[i * cdisp_step1] = minimum; - selected_disparity[i * cdisp_step1] = id; + data_cost_selected[i * disp_step] = minimum; + selected_disparity[i * disp_step] = id; - data_cost[id * cdisp_step1] = numeric_limits::max(); + data_cost[id * disp_step] = numeric_limits::max(); } } } template - __global__ void init_data_cost(int h, int w, int level) + __global__ void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, + int h, int w, int level, int ndisp, float data_weight, float max_data_term, + int min_disp, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; @@ -236,9 +191,9 @@ namespace cv { namespace cuda { namespace device int x0 = x << level; int xt = (x + 1) << level; - T* data_cost = (T*)ctemp + y * cmsg_step + x; + T* data_cost = (T*)ctemp + y * msg_step + x; - for(int d = 0; d < cndisp; ++d) + for(int d = 0; d < ndisp; ++d) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) @@ -246,24 +201,26 @@ namespace cv { namespace cuda { namespace device for(int xi = x0; xi < xt; xi++) { int xr = xi - d; - if(d < cth || xr < 0) - val += cdata_weight * cmax_data_term; + if(d < min_disp || xr < 0) + val += data_weight * max_data_term; else { const uchar* lle = cleft + yi * cimg_step + xi * channels; const uchar* lri = cright + yi * cimg_step + xr * channels; - val += DataCostPerPixel::compute(lle, lri); + val += data_weight * pixeldiff(lle, lri, max_data_term); } } } - data_cost[cdisp_step1 * d] = saturate_cast(val); + data_cost[disp_step * d] = saturate_cast(val); } } } template - __global__ void init_data_cost_reduce(int level, int rows, int cols, int h) + __global__ void init_data_cost_reduce(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, + int level, int rows, int cols, int h, int ndisp, float data_weight, float max_data_term, + int min_disp, size_t msg_step, size_t disp_step) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; @@ -271,7 +228,7 @@ namespace cv { namespace cuda { namespace device int tid = threadIdx.x; - if (d < cndisp) + if (d < ndisp) { int x0 = x_out << level; int y0 = y_out << level; @@ -281,8 +238,8 @@ namespace cv { namespace cuda { namespace device float val = 0.0f; if (x0 + tid < cols) { - if (x0 + tid - d < 0 || d < cth) - val = cdata_weight * cmax_data_term * len; + if (x0 + tid - d < 0 || d < min_disp) + val = data_weight * max_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); @@ -290,7 +247,7 @@ namespace cv { namespace cuda { namespace device for(int y = 0; y < len; ++y) { - val += DataCostPerPixel::compute(lle, lri); + val += data_weight * pixeldiff(lle, lri, max_data_term); lle += cimg_step; lri += cimg_step; @@ -302,16 +259,16 @@ namespace cv { namespace cuda { namespace device reduce(smem + winsz * threadIdx.z, val, tid, plus()); - T* data_cost = (T*)ctemp + y_out * cmsg_step + x_out; + T* data_cost = (T*)ctemp + y_out * msg_step + x_out; if (tid == 0) - data_cost[cdisp_step1 * d] = saturate_cast(val); + data_cost[disp_step * d] = saturate_cast(val); } } template - void init_data_cost_caller_(int /*rows*/, int /*cols*/, int h, int w, int level, int /*ndisp*/, int channels, cudaStream_t stream) + void init_data_cost_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int /*rows*/, int /*cols*/, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); @@ -321,15 +278,15 @@ namespace cv { namespace cuda { namespace device switch (channels) { - case 1: init_data_cost<<>>(h, w, level); break; - case 3: init_data_cost<<>>(h, w, level); break; - case 4: init_data_cost<<>>(h, w, level); break; + case 1: init_data_cost<<>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; + case 3: init_data_cost<<>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; + case 4: init_data_cost<<>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template - void init_data_cost_reduce_caller_(int rows, int cols, int h, int w, int level, int ndisp, int channels, cudaStream_t stream) + void init_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); @@ -340,19 +297,19 @@ namespace cv { namespace cuda { namespace device switch (channels) { - case 1: init_data_cost_reduce<<>>(level, rows, cols, h); break; - case 3: init_data_cost_reduce<<>>(level, rows, cols, h); break; - case 4: init_data_cost_reduce<<>>(level, rows, cols, h); break; + case 1: init_data_cost_reduce<<>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; + case 3: init_data_cost_reduce<<>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; + case 4: init_data_cost_reduce<<>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template - void init_data_cost(int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, - int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream) + void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, + int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream) { - typedef void (*InitDataCostCaller)(int cols, int rows, int w, int h, int level, int ndisp, int channels, cudaStream_t stream); + typedef void (*InitDataCostCaller)(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int cols, int rows, int w, int h, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream); static const InitDataCostCaller init_data_cost_callers[] = { @@ -362,10 +319,8 @@ namespace cv { namespace cuda { namespace device }; size_t disp_step = msg_step * h; - cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); - cudaSafeCall( cudaMemcpyToSymbol(cmsg_step, &msg_step, sizeof(size_t)) ); - init_data_cost_callers[level](rows, cols, h, w, level, ndisp, channels, stream); + init_data_cost_callers[level](cleft, cright, ctemp, cimg_step, rows, cols, h, w, level, ndisp, channels, data_weight, max_data_term, min_disp, msg_step, disp_step, stream); cudaSafeCall( cudaGetLastError() ); if (stream == 0) @@ -378,9 +333,9 @@ namespace cv { namespace cuda { namespace device grid.y = divUp(h, threads.y); if (use_local_init_data_cost == true) - get_first_k_initial_local<<>> (data_cost_selected, disp_selected_pyr, h, w, nr_plane); + get_first_k_initial_local<<>> (ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step); else - get_first_k_initial_global<<>>(data_cost_selected, disp_selected_pyr, h, w, nr_plane); + get_first_k_initial_global<<>>(ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step); cudaSafeCall( cudaGetLastError() ); @@ -388,18 +343,18 @@ namespace cv { namespace cuda { namespace device cudaSafeCall( cudaDeviceSynchronize() ); } - template void init_data_cost(int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step, - int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream); + template void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step, + int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream); - template void init_data_cost(int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step, - int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream); + template void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step, + int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream); /////////////////////////////////////////////////////////////// ////////////////////// compute data cost ////////////////////// /////////////////////////////////////////////////////////////// template - __global__ void compute_data_cost(const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane) + __global__ void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; @@ -412,8 +367,8 @@ namespace cv { namespace cuda { namespace device int x0 = x << level; int xt = (x + 1) << level; - const T* selected_disparity = selected_disp_pyr + y/2 * cmsg_step + x/2; - T* data_cost = data_cost_ + y * cmsg_step + x; + const T* selected_disparity = selected_disp_pyr + y/2 * msg_step + x/2; + T* data_cost = data_cost_ + y * msg_step + x; for(int d = 0; d < nr_plane; d++) { @@ -422,27 +377,27 @@ namespace cv { namespace cuda { namespace device { for(int xi = x0; xi < xt; xi++) { - int sel_disp = selected_disparity[d * cdisp_step2]; + int sel_disp = selected_disparity[d * disp_step2]; int xr = xi - sel_disp; - if (xr < 0 || sel_disp < cth) - val += cdata_weight * cmax_data_term; + if (xr < 0 || sel_disp < min_disp) + val += data_weight * max_data_term; else { const uchar* left_x = cleft + yi * cimg_step + xi * channels; const uchar* right_x = cright + yi * cimg_step + xr * channels; - val += DataCostPerPixel::compute(left_x, right_x); + val += data_weight * pixeldiff(left_x, right_x, max_data_term); } } } - data_cost[cdisp_step1 * d] = saturate_cast(val); + data_cost[disp_step1 * d] = saturate_cast(val); } } } template - __global__ void compute_data_cost_reduce(const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane) + __global__ void compute_data_cost_reduce(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; @@ -450,12 +405,12 @@ namespace cv { namespace cuda { namespace device int tid = threadIdx.x; - const T* selected_disparity = selected_disp_pyr + y_out/2 * cmsg_step + x_out/2; - T* data_cost = data_cost_ + y_out * cmsg_step + x_out; + const T* selected_disparity = selected_disp_pyr + y_out/2 * msg_step + x_out/2; + T* data_cost = data_cost_ + y_out * msg_step + x_out; if (d < nr_plane) { - int sel_disp = selected_disparity[d * cdisp_step2]; + int sel_disp = selected_disparity[d * disp_step2]; int x0 = x_out << level; int y0 = y_out << level; @@ -465,8 +420,8 @@ namespace cv { namespace cuda { namespace device float val = 0.0f; if (x0 + tid < cols) { - if (x0 + tid - sel_disp < 0 || sel_disp < cth) - val = cdata_weight * cmax_data_term * len; + if (x0 + tid - sel_disp < 0 || sel_disp < min_disp) + val = data_weight * max_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); @@ -474,7 +429,7 @@ namespace cv { namespace cuda { namespace device for(int y = 0; y < len; ++y) { - val += DataCostPerPixel::compute(lle, lri); + val += data_weight * pixeldiff(lle, lri, max_data_term); lle += cimg_step; lri += cimg_step; @@ -487,13 +442,13 @@ namespace cv { namespace cuda { namespace device reduce(smem + winsz * threadIdx.z, val, tid, plus()); if (tid == 0) - data_cost[cdisp_step1 * d] = saturate_cast(val); + data_cost[disp_step1 * d] = saturate_cast(val); } } template - void compute_data_cost_caller_(const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/, - int h, int w, int level, int nr_plane, int channels, cudaStream_t stream) + void compute_data_cost_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/, + int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); @@ -503,16 +458,16 @@ namespace cv { namespace cuda { namespace device switch(channels) { - case 1: compute_data_cost<<>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break; - case 3: compute_data_cost<<>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break; - case 4: compute_data_cost<<>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break; + case 1: compute_data_cost<<>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; + case 3: compute_data_cost<<>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; + case 4: compute_data_cost<<>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template - void compute_data_cost_reduce_caller_(const T* disp_selected_pyr, T* data_cost, int rows, int cols, - int h, int w, int level, int nr_plane, int channels, cudaStream_t stream) + void compute_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols, + int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); @@ -523,19 +478,20 @@ namespace cv { namespace cuda { namespace device switch (channels) { - case 1: compute_data_cost_reduce<<>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; - case 3: compute_data_cost_reduce<<>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; - case 4: compute_data_cost_reduce<<>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; + case 1: compute_data_cost_reduce<<>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; + case 3: compute_data_cost_reduce<<>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; + case 4: compute_data_cost_reduce<<>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template - void compute_data_cost(const T* disp_selected_pyr, T* data_cost, size_t msg_step, - int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream) + void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, size_t msg_step, + int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, + int min_disp, cudaStream_t stream) { - typedef void (*ComputeDataCostCaller)(const T* disp_selected_pyr, T* data_cost, int rows, int cols, - int h, int w, int level, int nr_plane, int channels, cudaStream_t stream); + typedef void (*ComputeDataCostCaller)(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols, + int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream); static const ComputeDataCostCaller callers[] = { @@ -546,22 +502,19 @@ namespace cv { namespace cuda { namespace device size_t disp_step1 = msg_step * h; size_t disp_step2 = msg_step * h2; - cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) ); - cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) ); - cudaSafeCall( cudaMemcpyToSymbol(cmsg_step, &msg_step, sizeof(size_t)) ); - callers[level](disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, stream); + callers[level](cleft, cright, cimg_step, disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2, stream); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } - template void compute_data_cost(const short* disp_selected_pyr, short* data_cost, size_t msg_step, - int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream); + template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const short* disp_selected_pyr, short* data_cost, size_t msg_step, + int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, cudaStream_t stream); - template void compute_data_cost(const float* disp_selected_pyr, float* data_cost, size_t msg_step, - int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream); + template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const float* disp_selected_pyr, float* data_cost, size_t msg_step, + int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, cudaStream_t stream); /////////////////////////////////////////////////////////////// @@ -574,7 +527,7 @@ namespace cv { namespace cuda { namespace device const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* data_cost_selected, T* disparity_selected_new, T* data_cost_new, const T* data_cost_cur, const T* disparity_selected_cur, - int nr_plane, int nr_plane2) + int nr_plane, int nr_plane2, size_t disp_step1, size_t disp_step2) { for(int i = 0; i < nr_plane; i++) { @@ -582,7 +535,7 @@ namespace cv { namespace cuda { namespace device int id = 0; for(int j = 0; j < nr_plane2; j++) { - T cur = data_cost_new[j * cdisp_step1]; + T cur = data_cost_new[j * disp_step1]; if(cur < minimum) { minimum = cur; @@ -590,70 +543,72 @@ namespace cv { namespace cuda { namespace device } } - data_cost_selected[i * cdisp_step1] = data_cost_cur[id * cdisp_step1]; - disparity_selected_new[i * cdisp_step1] = disparity_selected_cur[id * cdisp_step2]; + data_cost_selected[i * disp_step1] = data_cost_cur[id * disp_step1]; + disparity_selected_new[i * disp_step1] = disparity_selected_cur[id * disp_step2]; - u_new[i * cdisp_step1] = u_cur[id * cdisp_step2]; - d_new[i * cdisp_step1] = d_cur[id * cdisp_step2]; - l_new[i * cdisp_step1] = l_cur[id * cdisp_step2]; - r_new[i * cdisp_step1] = r_cur[id * cdisp_step2]; + u_new[i * disp_step1] = u_cur[id * disp_step2]; + d_new[i * disp_step1] = d_cur[id * disp_step2]; + l_new[i * disp_step1] = l_cur[id * disp_step2]; + r_new[i * disp_step1] = r_cur[id * disp_step2]; - data_cost_new[id * cdisp_step1] = numeric_limits::max(); + data_cost_new[id * disp_step1] = numeric_limits::max(); } } template - __global__ void init_message(T* u_new_, T* d_new_, T* l_new_, T* r_new_, + __global__ void init_message(uchar *ctemp, T* u_new_, T* d_new_, T* l_new_, T* r_new_, const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected_, const T* data_cost_, - int h, int w, int nr_plane, int h2, int w2, int nr_plane2) + int h, int w, int nr_plane, int h2, int w2, int nr_plane2, + size_t msg_step, size_t disp_step1, size_t disp_step2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { - const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * cmsg_step + x/2; - const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * cmsg_step + x/2; - const T* l_cur = l_cur_ + (y/2) * cmsg_step + ::min(w2-1, x/2 + 1); - const T* r_cur = r_cur_ + (y/2) * cmsg_step + ::max(0, x/2 - 1); + const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * msg_step + x/2; + const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * msg_step + x/2; + const T* l_cur = l_cur_ + (y/2) * msg_step + ::min(w2-1, x/2 + 1); + const T* r_cur = r_cur_ + (y/2) * msg_step + ::max(0, x/2 - 1); - T* data_cost_new = (T*)ctemp + y * cmsg_step + x; + T* data_cost_new = (T*)ctemp + y * msg_step + x; - const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * cmsg_step + x/2; - const T* data_cost = data_cost_ + y * cmsg_step + x; + const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * msg_step + x/2; + const T* data_cost = data_cost_ + y * msg_step + x; for(int d = 0; d < nr_plane2; d++) { - int idx2 = d * cdisp_step2; + int idx2 = d * disp_step2; - T val = data_cost[d * cdisp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2]; - data_cost_new[d * cdisp_step1] = val; + T val = data_cost[d * disp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2]; + data_cost_new[d * disp_step1] = val; } - T* data_cost_selected = data_cost_selected_ + y * cmsg_step + x; - T* disparity_selected_new = selected_disp_pyr_new + y * cmsg_step + x; + T* data_cost_selected = data_cost_selected_ + y * msg_step + x; + T* disparity_selected_new = selected_disp_pyr_new + y * msg_step + x; - T* u_new = u_new_ + y * cmsg_step + x; - T* d_new = d_new_ + y * cmsg_step + x; - T* l_new = l_new_ + y * cmsg_step + x; - T* r_new = r_new_ + y * cmsg_step + x; + T* u_new = u_new_ + y * msg_step + x; + T* d_new = d_new_ + y * msg_step + x; + T* l_new = l_new_ + y * msg_step + x; + T* r_new = r_new_ + y * msg_step + x; - u_cur = u_cur_ + y/2 * cmsg_step + x/2; - d_cur = d_cur_ + y/2 * cmsg_step + x/2; - l_cur = l_cur_ + y/2 * cmsg_step + x/2; - r_cur = r_cur_ + y/2 * cmsg_step + x/2; + u_cur = u_cur_ + y/2 * msg_step + x/2; + d_cur = d_cur_ + y/2 * msg_step + x/2; + l_cur = l_cur_ + y/2 * msg_step + x/2; + r_cur = r_cur_ + y/2 * msg_step + x/2; get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, data_cost_selected, disparity_selected_new, data_cost_new, - data_cost, disparity_selected_cur, nr_plane, nr_plane2); + data_cost, disparity_selected_cur, nr_plane, nr_plane2, + disp_step1, disp_step2); } } template - void init_message(T* u_new, T* d_new, T* l_new, T* r_new, + void init_message(uchar *ctemp, T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected, const T* data_cost, size_t msg_step, @@ -662,9 +617,6 @@ namespace cv { namespace cuda { namespace device size_t disp_step1 = msg_step * h; size_t disp_step2 = msg_step * h2; - cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) ); - cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) ); - cudaSafeCall( cudaMemcpyToSymbol(cmsg_step, &msg_step, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); @@ -672,11 +624,12 @@ namespace cv { namespace cuda { namespace device grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); - init_message<<>>(u_new, d_new, l_new, r_new, + init_message<<>>(ctemp, u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, selected_disp_pyr_new, selected_disp_pyr_cur, data_cost_selected, data_cost, - h, w, nr_plane, h2, w2, nr_plane2); + h, w, nr_plane, h2, w2, nr_plane2, + msg_step, disp_step1, disp_step2); cudaSafeCall( cudaGetLastError() ); if (stream == 0) @@ -684,13 +637,13 @@ namespace cv { namespace cuda { namespace device } - template void init_message(short* u_new, short* d_new, short* l_new, short* r_new, + template void init_message(uchar *ctemp, short* u_new, short* d_new, short* l_new, short* r_new, const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur, short* selected_disp_pyr_new, const short* selected_disp_pyr_cur, short* data_cost_selected, const short* data_cost, size_t msg_step, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); - template void init_message(float* u_new, float* d_new, float* l_new, float* r_new, + template void init_message(uchar *ctemp, float* u_new, float* d_new, float* l_new, float* r_new, const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur, float* selected_disp_pyr_new, const float* selected_disp_pyr_cur, float* data_cost_selected, const float* data_cost, size_t msg_step, @@ -702,13 +655,14 @@ namespace cv { namespace cuda { namespace device template __device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3, - const T* dst_disp, const T* src_disp, int nr_plane, volatile T* temp) + const T* dst_disp, const T* src_disp, int nr_plane, int max_disc_term, float disc_single_jump, volatile T* temp, + size_t disp_step) { T minimum = numeric_limits::max(); for(int d = 0; d < nr_plane; d++) { - int idx = d * cdisp_step1; + int idx = d * disp_step; T val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx]; if(val < minimum) @@ -720,55 +674,53 @@ namespace cv { namespace cuda { namespace device float sum = 0; for(int d = 0; d < nr_plane; d++) { - float cost_min = minimum + cmax_disc_term; - T src_disp_reg = src_disp[d * cdisp_step1]; + float cost_min = minimum + max_disc_term; + T src_disp_reg = src_disp[d * disp_step]; for(int d2 = 0; d2 < nr_plane; d2++) - cost_min = fmin(cost_min, msg_dst[d2 * cdisp_step1] + cdisc_single_jump * ::abs(dst_disp[d2 * cdisp_step1] - src_disp_reg)); + cost_min = fmin(cost_min, msg_dst[d2 * disp_step] + disc_single_jump * ::abs(dst_disp[d2 * disp_step] - src_disp_reg)); - temp[d * cdisp_step1] = saturate_cast(cost_min); + temp[d * disp_step] = saturate_cast(cost_min); sum += cost_min; } sum /= nr_plane; for(int d = 0; d < nr_plane; d++) - msg_dst[d * cdisp_step1] = saturate_cast(temp[d * cdisp_step1] - sum); + msg_dst[d * disp_step] = saturate_cast(temp[d * disp_step] - sum); } template - __global__ void compute_message(T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i) + __global__ void compute_message(uchar *ctemp, T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i, int max_disc_term, float disc_single_jump, size_t msg_step, size_t disp_step) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1); if (y > 0 && y < h - 1 && x > 0 && x < w - 1) { - const T* data = data_cost_selected + y * cmsg_step + x; + const T* data = data_cost_selected + y * msg_step + x; - T* u = u_ + y * cmsg_step + x; - T* d = d_ + y * cmsg_step + x; - T* l = l_ + y * cmsg_step + x; - T* r = r_ + y * cmsg_step + x; + T* u = u_ + y * msg_step + x; + T* d = d_ + y * msg_step + x; + T* l = l_ + y * msg_step + x; + T* r = r_ + y * msg_step + x; - const T* disp = selected_disp_pyr_cur + y * cmsg_step + x; + const T* disp = selected_disp_pyr_cur + y * msg_step + x; - T* temp = (T*)ctemp + y * cmsg_step + x; + T* temp = (T*)ctemp + y * msg_step + x; - message_per_pixel(data, u, r - 1, u + cmsg_step, l + 1, disp, disp - cmsg_step, nr_plane, temp); - message_per_pixel(data, d, d - cmsg_step, r - 1, l + 1, disp, disp + cmsg_step, nr_plane, temp); - message_per_pixel(data, l, u + cmsg_step, d - cmsg_step, l + 1, disp, disp - 1, nr_plane, temp); - message_per_pixel(data, r, u + cmsg_step, d - cmsg_step, r - 1, disp, disp + 1, nr_plane, temp); + message_per_pixel(data, u, r - 1, u + msg_step, l + 1, disp, disp - msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); + message_per_pixel(data, d, d - msg_step, r - 1, l + 1, disp, disp + msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); + message_per_pixel(data, l, u + msg_step, d - msg_step, l + 1, disp, disp - 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); + message_per_pixel(data, r, u + msg_step, d - msg_step, r - 1, disp, disp + 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); } } template - void calc_all_iterations(T* u, T* d, T* l, T* r, const T* data_cost_selected, - const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream) + void calc_all_iterations(uchar *ctemp, T* u, T* d, T* l, T* r, const T* data_cost_selected, + const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream) { size_t disp_step = msg_step * h; - cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); - cudaSafeCall( cudaMemcpyToSymbol(cmsg_step, &msg_step, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); @@ -778,18 +730,18 @@ namespace cv { namespace cuda { namespace device for(int t = 0; t < iters; ++t) { - compute_message<<>>(u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1); + compute_message<<>>(ctemp, u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1, max_disc_term, disc_single_jump, msg_step, disp_step); cudaSafeCall( cudaGetLastError() ); } if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); }; - template void calc_all_iterations(short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step, - int h, int w, int nr_plane, int iters, cudaStream_t stream); + template void calc_all_iterations(uchar *ctemp, short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step, + int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream); - template void calc_all_iterations(float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step, - int h, int w, int nr_plane, int iters, cudaStream_t stream); + template void calc_all_iterations(uchar *ctemp, float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step, + int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream); /////////////////////////////////////////////////////////////// @@ -800,26 +752,26 @@ namespace cv { namespace cuda { namespace device template __global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_, const T* data_cost_selected, const T* disp_selected_pyr, - PtrStepSz disp, int nr_plane) + PtrStepSz disp, int nr_plane, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1) { - const T* data = data_cost_selected + y * cmsg_step + x; - const T* disp_selected = disp_selected_pyr + y * cmsg_step + x; + const T* data = data_cost_selected + y * msg_step + x; + const T* disp_selected = disp_selected_pyr + y * msg_step + x; - const T* u = u_ + (y+1) * cmsg_step + (x+0); - const T* d = d_ + (y-1) * cmsg_step + (x+0); - const T* l = l_ + (y+0) * cmsg_step + (x+1); - const T* r = r_ + (y+0) * cmsg_step + (x-1); + const T* u = u_ + (y+1) * msg_step + (x+0); + const T* d = d_ + (y-1) * msg_step + (x+0); + const T* l = l_ + (y+0) * msg_step + (x+1); + const T* r = r_ + (y+0) * msg_step + (x-1); int best = 0; T best_val = numeric_limits::max(); for (int i = 0; i < nr_plane; ++i) { - int idx = i * cdisp_step1; + int idx = i * disp_step; T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx]; if (val < best_val) @@ -837,8 +789,6 @@ namespace cv { namespace cuda { namespace device const PtrStepSz& disp, int nr_plane, cudaStream_t stream) { size_t disp_step = disp.rows * msg_step; - cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); - cudaSafeCall( cudaMemcpyToSymbol(cmsg_step, &msg_step, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); @@ -846,7 +796,7 @@ namespace cv { namespace cuda { namespace device grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); - compute_disp<<>>(u, d, l, r, data_cost_selected, disp_selected, disp, nr_plane); + compute_disp<<>>(u, d, l, r, data_cost_selected, disp_selected, disp, nr_plane, msg_step, disp_step); cudaSafeCall( cudaGetLastError() ); if (stream == 0) diff --git a/modules/cudastereo/src/cuda/stereocsbp.hpp b/modules/cudastereo/src/cuda/stereocsbp.hpp new file mode 100644 index 000000000..305497292 --- /dev/null +++ b/modules/cudastereo/src/cuda/stereocsbp.hpp @@ -0,0 +1,29 @@ +namespace cv { namespace cuda { namespace device +{ + namespace stereocsbp + { + template + void init_data_cost(const uchar *left, const uchar *right, uchar *ctemp, size_t cimg_step, int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, + int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream); + + template + void compute_data_cost(const uchar *left, const uchar *right, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, size_t msg_step, + int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, + int min_disp, cudaStream_t stream); + + template + void init_message(uchar *ctemp, T* u_new, T* d_new, T* l_new, T* r_new, + const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, + T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, + T* data_cost_selected, const T* data_cost, size_t msg_step, + int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); + + template + void calc_all_iterations(uchar *ctemp, T* u, T* d, T* l, T* r, const T* data_cost_selected, + const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream); + + template + void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step, + const PtrStepSz& disp, int nr_plane, cudaStream_t stream); + } +}}} diff --git a/modules/cudastereo/src/disparity_bilateral_filter.cpp b/modules/cudastereo/src/disparity_bilateral_filter.cpp index 75cbce48a..c59e3b2cb 100644 --- a/modules/cudastereo/src/disparity_bilateral_filter.cpp +++ b/modules/cudastereo/src/disparity_bilateral_filter.cpp @@ -51,16 +51,7 @@ Ptr cv::cuda::createDisparityBilateralFilter(int #else /* !defined (HAVE_CUDA) */ -namespace cv { namespace cuda { namespace device -{ - namespace disp_bilateral_filter - { - void disp_load_constants(float* table_color, PtrStepSzf table_space, int ndisp, int radius, short edge_disc, short max_disc); - - template - void disp_bilateral_filter(PtrStepSz disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream); - } -}}} +#include "cuda/disparity_bilateral_filter.hpp" namespace { @@ -165,7 +156,7 @@ namespace const short edge_disc = std::max(short(1), short(ndisp * edge_threshold + 0.5)); const short max_disc = short(ndisp * max_disc_threshold + 0.5); - disp_load_constants(table_color.ptr(), table_space, ndisp, radius, edge_disc, max_disc); + size_t table_space_step = table_space.step / sizeof(float); _dst.create(disp.size(), disp.type()); GpuMat dst = _dst.getGpuMat(); @@ -173,7 +164,7 @@ namespace if (dst.data != disp.data) disp.copyTo(dst, stream); - disp_bilateral_filter(dst, img, img.channels(), iters, StreamAccessor::getStream(stream)); + disp_bilateral_filter(dst, img, img.channels(), iters, table_color.ptr(), (float *)table_space.data, table_space_step, radius, edge_disc, max_disc, StreamAccessor::getStream(stream)); } void DispBilateralFilterImpl::apply(InputArray _disp, InputArray _image, OutputArray dst, Stream& stream) diff --git a/modules/cudastereo/src/stereocsbp.cpp b/modules/cudastereo/src/stereocsbp.cpp index 474562baf..ded5fa20e 100644 --- a/modules/cudastereo/src/stereocsbp.cpp +++ b/modules/cudastereo/src/stereocsbp.cpp @@ -53,37 +53,7 @@ Ptr cv::cuda::createStereoConstantSpaceBP(int, int, #else /* !defined (HAVE_CUDA) */ -namespace cv { namespace cuda { namespace device -{ - namespace stereocsbp - { - void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th, - const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& temp); - - template - void init_data_cost(int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, - int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream); - - template - void compute_data_cost(const T* disp_selected_pyr, T* data_cost, size_t msg_step, - int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream); - - template - void init_message(T* u_new, T* d_new, T* l_new, T* r_new, - const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, - T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, - T* data_cost_selected, const T* data_cost, size_t msg_step, - int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); - - template - void calc_all_iterations(T* u, T* d, T* l, T* r, const T* data_cost_selected, - const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream); - - template - void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step, - const PtrStepSz& disp, int nr_plane, cudaStream_t stream); - } -}}} +#include "cuda/stereocsbp.hpp" namespace { @@ -252,8 +222,6 @@ namespace //////////////////////////////////////////////////////////////////////////// // Compute - load_constants(ndisp_, max_data_term_, data_weight_, max_disc_term_, disc_single_jump_, min_disp_th_, left, right, temp_); - l[0].setTo(0, _stream); d[0].setTo(0, _stream); r[0].setTo(0, _stream); @@ -275,17 +243,18 @@ namespace { if (i == levels_ - 1) { - init_data_cost(left.rows, left.cols, disp_selected_pyr[cur_idx].ptr(), data_cost_selected.ptr(), - elem_step, rows_pyr[i], cols_pyr[i], i, nr_plane_pyr[i], ndisp_, left.channels(), use_local_init_data_cost_, stream); + init_data_cost(left.ptr(), right.ptr(), temp_.ptr(), left.step, left.rows, left.cols, disp_selected_pyr[cur_idx].ptr(), data_cost_selected.ptr(), + elem_step, rows_pyr[i], cols_pyr[i], i, nr_plane_pyr[i], ndisp_, left.channels(), data_weight_, max_data_term_, min_disp_th_, use_local_init_data_cost_, stream); } else { - compute_data_cost(disp_selected_pyr[cur_idx].ptr(), data_cost.ptr(), elem_step, - left.rows, left.cols, rows_pyr[i], cols_pyr[i], rows_pyr[i+1], i, nr_plane_pyr[i+1], left.channels(), stream); + compute_data_cost(left.ptr(), right.ptr(), left.step, disp_selected_pyr[cur_idx].ptr(), data_cost.ptr(), elem_step, + left.rows, left.cols, rows_pyr[i], cols_pyr[i], rows_pyr[i+1], i, nr_plane_pyr[i+1], left.channels(), data_weight_, max_data_term_, min_disp_th_, stream); int new_idx = (cur_idx + 1) & 1; - init_message(u[new_idx].ptr(), d[new_idx].ptr(), l[new_idx].ptr(), r[new_idx].ptr(), + init_message(temp_.ptr(), + u[new_idx].ptr(), d[new_idx].ptr(), l[new_idx].ptr(), r[new_idx].ptr(), u[cur_idx].ptr(), d[cur_idx].ptr(), l[cur_idx].ptr(), r[cur_idx].ptr(), disp_selected_pyr[new_idx].ptr(), disp_selected_pyr[cur_idx].ptr(), data_cost_selected.ptr(), data_cost.ptr(), elem_step, rows_pyr[i], @@ -294,9 +263,9 @@ namespace cur_idx = new_idx; } - calc_all_iterations(u[cur_idx].ptr(), d[cur_idx].ptr(), l[cur_idx].ptr(), r[cur_idx].ptr(), + calc_all_iterations(temp_.ptr(), u[cur_idx].ptr(), d[cur_idx].ptr(), l[cur_idx].ptr(), r[cur_idx].ptr(), data_cost_selected.ptr(), disp_selected_pyr[cur_idx].ptr(), elem_step, - rows_pyr[i], cols_pyr[i], nr_plane_pyr[i], iters_, stream); + rows_pyr[i], cols_pyr[i], nr_plane_pyr[i], iters_, max_disc_term_, disc_single_jump_, stream); } } else @@ -305,17 +274,18 @@ namespace { if (i == levels_ - 1) { - init_data_cost(left.rows, left.cols, disp_selected_pyr[cur_idx].ptr(), data_cost_selected.ptr(), - elem_step, rows_pyr[i], cols_pyr[i], i, nr_plane_pyr[i], ndisp_, left.channels(), use_local_init_data_cost_, stream); + init_data_cost(left.ptr(), right.ptr(), temp_.ptr(), left.step, left.rows, left.cols, disp_selected_pyr[cur_idx].ptr(), data_cost_selected.ptr(), + elem_step, rows_pyr[i], cols_pyr[i], i, nr_plane_pyr[i], ndisp_, left.channels(), data_weight_, max_data_term_, min_disp_th_, use_local_init_data_cost_, stream); } else { - compute_data_cost(disp_selected_pyr[cur_idx].ptr(), data_cost.ptr(), elem_step, - left.rows, left.cols, rows_pyr[i], cols_pyr[i], rows_pyr[i+1], i, nr_plane_pyr[i+1], left.channels(), stream); + compute_data_cost(left.ptr(), right.ptr(), left.step, disp_selected_pyr[cur_idx].ptr(), data_cost.ptr(), elem_step, + left.rows, left.cols, rows_pyr[i], cols_pyr[i], rows_pyr[i+1], i, nr_plane_pyr[i+1], left.channels(), data_weight_, max_data_term_, min_disp_th_, stream); int new_idx = (cur_idx + 1) & 1; - init_message(u[new_idx].ptr(), d[new_idx].ptr(), l[new_idx].ptr(), r[new_idx].ptr(), + init_message(temp_.ptr(), + u[new_idx].ptr(), d[new_idx].ptr(), l[new_idx].ptr(), r[new_idx].ptr(), u[cur_idx].ptr(), d[cur_idx].ptr(), l[cur_idx].ptr(), r[cur_idx].ptr(), disp_selected_pyr[new_idx].ptr(), disp_selected_pyr[cur_idx].ptr(), data_cost_selected.ptr(), data_cost.ptr(), elem_step, rows_pyr[i], @@ -324,9 +294,9 @@ namespace cur_idx = new_idx; } - calc_all_iterations(u[cur_idx].ptr(), d[cur_idx].ptr(), l[cur_idx].ptr(), r[cur_idx].ptr(), + calc_all_iterations(temp_.ptr(), u[cur_idx].ptr(), d[cur_idx].ptr(), l[cur_idx].ptr(), r[cur_idx].ptr(), data_cost_selected.ptr(), disp_selected_pyr[cur_idx].ptr(), elem_step, - rows_pyr[i], cols_pyr[i], nr_plane_pyr[i], iters_, stream); + rows_pyr[i], cols_pyr[i], nr_plane_pyr[i], iters_, max_disc_term_, disc_single_jump_, stream); } } diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index 409fe54b7..4aa7dd349 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -31,7 +31,7 @@ Detects corners using the FAST algorithm Detects corners using the FAST algorithm by [Rosten06]_. -..note:: In Python API, types are given as ``cv2.FAST_FEATURE_DETECTOR_TYPE_5_8``, ``cv2.FAST_FEATURE_DETECTOR_TYPE_7_12`` and ``cv2.FAST_FEATURE_DETECTOR_TYPE_9_16``. For corner detection, use ``cv2.FAST.detect()`` method. +.. note:: In Python API, types are given as ``cv2.FAST_FEATURE_DETECTOR_TYPE_5_8``, ``cv2.FAST_FEATURE_DETECTOR_TYPE_7_12`` and ``cv2.FAST_FEATURE_DETECTOR_TYPE_9_16``. For corner detection, use ``cv2.FAST.detect()`` method. .. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006. @@ -254,7 +254,17 @@ KAZE ---- .. ocv:class:: KAZE : public Feature2D -Class implementing the KAZE keypoint detector and descriptor extractor, described in [ABD12]_. +Class implementing the KAZE keypoint detector and descriptor extractor, described in [ABD12]_. :: + + class CV_EXPORTS_W KAZE : public Feature2D + { + public: + CV_WRAP KAZE(); + CV_WRAP explicit KAZE(bool extended, bool upright, float threshold = 0.001f, + int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); + }; + +.. note:: AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision (ECCV), Fiorenze, Italy, October 2012. @@ -262,12 +272,14 @@ KAZE::KAZE ---------- The KAZE constructor -.. ocv:function:: KAZE::KAZE(bool extended, bool upright) +.. ocv:function:: KAZE::KAZE(bool extended, bool upright, float threshold, int octaves, int sublevels, int diffusivity) :param extended: Set to enable extraction of extended (128-byte) descriptor. :param upright: Set to enable use of upright descriptors (non rotation-invariant). - - + :param threshold: Detector response threshold to accept point + :param octaves: Maximum octave evolution of the image + :param sublevels: Default number of sublevels per scale level + :param diffusivity: Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or DIFF_CHARBONNIER AKAZE ----- @@ -278,25 +290,25 @@ Class implementing the AKAZE keypoint detector and descriptor extractor, describ class CV_EXPORTS_W AKAZE : public Feature2D { public: - /// AKAZE Descriptor Type - enum DESCRIPTOR_TYPE { - DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_KAZE = 3, - DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_MLDB = 5 - }; CV_WRAP AKAZE(); - explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3); + CV_WRAP explicit AKAZE(int descriptor_type, int descriptor_size = 0, int descriptor_channels = 3, + float threshold = 0.001f, int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); }; +.. note:: AKAZE descriptor can only be used with KAZE or AKAZE keypoints + .. [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. AKAZE::AKAZE ------------ The AKAZE constructor -.. ocv:function:: AKAZE::AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3) +.. ocv:function:: AKAZE::AKAZE(int descriptor_type, int descriptor_size, int descriptor_channels, float threshold, int octaves, int sublevels, int diffusivity) - :param descriptor_type: Type of the extracted descriptor. + :param descriptor_type: Type of the extracted descriptor: DESCRIPTOR_KAZE, DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT. :param descriptor_size: Size of the descriptor in bits. 0 -> Full size - :param descriptor_channels: Number of channels in the descriptor (1, 2, 3). + :param descriptor_channels: Number of channels in the descriptor (1, 2, 3) + :param threshold: Detector response threshold to accept point + :param octaves: Maximum octave evolution of the image + :param sublevels: Default number of sublevels per scale level + :param diffusivity: Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or DIFF_CHARBONNIER diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 9f46ee23e..5aeab1c39 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -895,6 +895,22 @@ protected: PixelTestFn test_fn_; }; +// KAZE/AKAZE diffusivity +enum { + DIFF_PM_G1 = 0, + DIFF_PM_G2 = 1, + DIFF_WEICKERT = 2, + DIFF_CHARBONNIER = 3 +}; + +// AKAZE descriptor type +enum { + DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_KAZE = 3, + DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_MLDB = 5 +}; + /*! KAZE implementation */ @@ -902,7 +918,8 @@ class CV_EXPORTS_W KAZE : public Feature2D { public: CV_WRAP KAZE(); - CV_WRAP explicit KAZE(bool extended, bool upright); + CV_WRAP explicit KAZE(bool extended, bool upright, float threshold = 0.001f, + int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); virtual ~KAZE(); @@ -928,6 +945,10 @@ protected: CV_PROP bool extended; CV_PROP bool upright; + CV_PROP float threshold; + CV_PROP int octaves; + CV_PROP int sublevels; + CV_PROP int diffusivity; }; /*! @@ -936,16 +957,9 @@ AKAZE implementation class CV_EXPORTS_W AKAZE : public Feature2D { public: - /// AKAZE Descriptor Type - enum DESCRIPTOR_TYPE { - DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_KAZE = 3, - DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_MLDB = 5 - }; - CV_WRAP AKAZE(); - explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3); + CV_WRAP explicit AKAZE(int descriptor_type, int descriptor_size = 0, int descriptor_channels = 3, + float threshold = 0.001f, int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); virtual ~AKAZE(); @@ -973,7 +987,10 @@ protected: CV_PROP int descriptor; CV_PROP int descriptor_channels; CV_PROP int descriptor_size; - + CV_PROP float threshold; + CV_PROP int octaves; + CV_PROP int sublevels; + CV_PROP int diffusivity; }; /****************************************************************************************\ * Distance * diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 4b1eb196a..1d09d0615 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -49,7 +49,10 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd */ #include "precomp.hpp" -#include "akaze/AKAZEFeatures.h" +#include "kaze/AKAZEFeatures.h" + +#include +using namespace std; namespace cv { @@ -57,13 +60,22 @@ namespace cv : descriptor(DESCRIPTOR_MLDB) , descriptor_channels(3) , descriptor_size(0) + , threshold(0.001f) + , octaves(4) + , sublevels(4) + , diffusivity(DIFF_PM_G2) { } - AKAZE::AKAZE(DESCRIPTOR_TYPE _descriptor_type, int _descriptor_size, int _descriptor_channels) + AKAZE::AKAZE(int _descriptor_type, int _descriptor_size, int _descriptor_channels, + float _threshold, int _octaves, int _sublevels, int _diffusivity) : descriptor(_descriptor_type) , descriptor_channels(_descriptor_channels) , descriptor_size(_descriptor_size) + , threshold(_threshold) + , octaves(_octaves) + , sublevels(_sublevels) + , diffusivity(_diffusivity) { } @@ -78,12 +90,12 @@ namespace cv { switch (descriptor) { - case cv::AKAZE::DESCRIPTOR_KAZE: - case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: + case cv::DESCRIPTOR_KAZE: + case cv::DESCRIPTOR_KAZE_UPRIGHT: return 64; - case cv::AKAZE::DESCRIPTOR_MLDB: - case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: + case cv::DESCRIPTOR_MLDB: + case cv::DESCRIPTOR_MLDB_UPRIGHT: // We use the full length binary descriptor -> 486 bits if (descriptor_size == 0) { @@ -106,12 +118,12 @@ namespace cv { switch (descriptor) { - case cv::AKAZE::DESCRIPTOR_KAZE: - case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: + case cv::DESCRIPTOR_KAZE: + case cv::DESCRIPTOR_KAZE_UPRIGHT: return CV_32F; - case cv::AKAZE::DESCRIPTOR_MLDB: - case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: + case cv::DESCRIPTOR_MLDB: + case cv::DESCRIPTOR_MLDB_UPRIGHT: return CV_8U; default: @@ -124,12 +136,12 @@ namespace cv { switch (descriptor) { - case cv::AKAZE::DESCRIPTOR_KAZE: - case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: + case cv::DESCRIPTOR_KAZE: + case cv::DESCRIPTOR_KAZE_UPRIGHT: return cv::NORM_L2; - case cv::AKAZE::DESCRIPTOR_MLDB: - case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: + case cv::DESCRIPTOR_MLDB: + case cv::DESCRIPTOR_MLDB_UPRIGHT: return cv::NORM_HAMMING; default: @@ -153,11 +165,15 @@ namespace cv cv::Mat& desc = descriptors.getMatRef(); AKAZEOptions options; - options.descriptor = static_cast(descriptor); + options.descriptor = descriptor; options.descriptor_channels = descriptor_channels; options.descriptor_size = descriptor_size; options.img_width = img.cols; options.img_height = img.rows; + options.dthreshold = threshold; + options.omax = octaves; + options.nsublevels = sublevels; + options.diffusivity = diffusivity; AKAZEFeatures impl(options); impl.Create_Nonlinear_Scale_Space(img1_32); @@ -188,7 +204,7 @@ namespace cv img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); AKAZEOptions options; - options.descriptor = static_cast(descriptor); + options.descriptor = descriptor; options.descriptor_channels = descriptor_channels; options.descriptor_size = descriptor_size; options.img_width = img.cols; @@ -216,7 +232,7 @@ namespace cv cv::Mat& desc = descriptors.getMatRef(); AKAZEOptions options; - options.descriptor = static_cast(descriptor); + options.descriptor = descriptor; options.descriptor_channels = descriptor_channels; options.descriptor_size = descriptor_size; options.img_width = img.cols; @@ -229,4 +245,4 @@ namespace cv CV_Assert((!desc.rows || desc.cols == descriptorSize())); CV_Assert((!desc.rows || (desc.type() == descriptorType()))); } -} \ No newline at end of file +} diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp deleted file mode 100644 index e5955b21c..000000000 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ /dev/null @@ -1,1941 +0,0 @@ -/** - * @file AKAZEFeatures.cpp - * @brief Main class for detecting and describing binary features in an - * accelerated nonlinear scale space - * @date Sep 15, 2013 - * @author Pablo F. Alcantarilla, Jesus Nuevo - */ - -#include "AKAZEFeatures.h" -#include "../kaze/fed.h" -#include "../kaze/nldiffusion_functions.h" - -using namespace std; -using namespace cv; -using namespace cv::details::kaze; - -/* ************************************************************************* */ -/** - * @brief AKAZEFeatures constructor with input options - * @param options AKAZEFeatures configuration options - * @note This constructor allocates memory for the nonlinear scale space - */ -AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { - - ncycles_ = 0; - reordering_ = true; - - if (options_.descriptor_size > 0 && options_.descriptor >= cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT) - { - generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, - options_.descriptor_pattern_size, options_.descriptor_channels); - } - - Allocate_Memory_Evolution(); -} - -/* ************************************************************************* */ -/** - * @brief This method allocates the memory for the nonlinear diffusion evolution - */ -void AKAZEFeatures::Allocate_Memory_Evolution(void) { - - float rfactor = 0.0f; - int level_height = 0, level_width = 0; - - // Allocate the dimension of the matrices for the evolution - for (int i = 0; i <= options_.omax - 1; i++) { - rfactor = 1.0f / pow(2.f, i); - level_height = (int)(options_.img_height*rfactor); - level_width = (int)(options_.img_width*rfactor); - - // Smallest possible octave and allow one scale if the image is small - if ((level_width < 80 || level_height < 40) && i != 0) { - options_.omax = i; - break; - } - - for (int j = 0; j < options_.nsublevels; j++) { - TEvolution step; - step.Lx = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Ly = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lxx = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lxy = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lyy = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lt = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Ldet = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lflow = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lstep = cv::Mat::zeros(level_height, level_width, CV_32F); - step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i); - step.sigma_size = fRound(step.esigma); - step.etime = 0.5f*(step.esigma*step.esigma); - step.octave = i; - step.sublevel = j; - evolution_.push_back(step); - } - } - - // Allocate memory for the number of cycles and time steps - for (size_t i = 1; i < evolution_.size(); i++) { - int naux = 0; - vector tau; - float ttime = 0.0f; - ttime = evolution_[i].etime - evolution_[i - 1].etime; - naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau); - nsteps_.push_back(naux); - tsteps_.push_back(tau); - ncycles_++; - } -} - -/* ************************************************************************* */ -/** - * @brief This method creates the nonlinear scale space for a given image - * @param img Input image for which the nonlinear scale space needs to be created - * @return 0 if the nonlinear scale space was created successfully, -1 otherwise - */ -int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) -{ - CV_Assert(evolution_.size() > 0); - - // Copy the original image to the first level of the evolution - img.copyTo(evolution_[0].Lt); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); - evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); - - // First compute the kcontrast factor - options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, 1.0f, options_.kcontrast_nbins, 0, 0); - - // Now generate the rest of evolution levels - for (size_t i = 1; i < evolution_.size(); i++) { - - if (evolution_[i].octave > evolution_[i - 1].octave) { - halfsample_image(evolution_[i - 1].Lt, evolution_[i].Lt); - options_.kcontrast = options_.kcontrast*0.75f; - } - else { - evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); - } - - gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0f); - - // Compute the Gaussian derivatives Lx and Ly - image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0); - image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1); - - // Compute the conductivity equation - switch (options_.diffusivity) { - case AKAZEOptions::PM_G1: - pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - case AKAZEOptions::PM_G2: - pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - case AKAZEOptions::WEICKERT: - weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - case AKAZEOptions::CHARBONNIER: - charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - default: - CV_Error(options_.diffusivity, "Diffusivity is not supported"); - break; - } - - // Perform FED n inner steps - for (int j = 0; j < nsteps_[i - 1]; j++) { - cv::details::kaze::nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i - 1][j]); - } - } - - return 0; -} - -/* ************************************************************************* */ -/** - * @brief This method selects interesting keypoints through the nonlinear scale space - * @param kpts Vector of detected keypoints - */ -void AKAZEFeatures::Feature_Detection(std::vector& kpts) -{ - kpts.clear(); - - Compute_Determinant_Hessian_Response(); - Find_Scale_Space_Extrema(kpts); - Do_Subpixel_Refinement(kpts); -} - -/* ************************************************************************* */ - -class MultiscaleDerivativesInvoker : public cv::ParallelLoopBody -{ -public: - explicit MultiscaleDerivativesInvoker(std::vector& ev, const AKAZEOptions& opt) - : evolution_(&ev) - , options_(opt) - { - } - - - void operator()(const cv::Range& range) const - { - std::vector& evolution = *evolution_; - - for (int i = range.start; i < range.end; i++) - { - float ratio = pow(2.f, (float)evolution[i].octave); - int sigma_size_ = fRound(evolution[i].esigma * options_.derivative_factor / ratio); - - compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Lx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Ly, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution[i].Ly, evolution[i].Lyy, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxy, 0, 1, sigma_size_); - - evolution[i].Lx = evolution[i].Lx*((sigma_size_)); - evolution[i].Ly = evolution[i].Ly*((sigma_size_)); - evolution[i].Lxx = evolution[i].Lxx*((sigma_size_)*(sigma_size_)); - evolution[i].Lxy = evolution[i].Lxy*((sigma_size_)*(sigma_size_)); - evolution[i].Lyy = evolution[i].Lyy*((sigma_size_)*(sigma_size_)); - } - } - -private: - std::vector* evolution_; - AKAZEOptions options_; -}; - -/** - * @brief This method computes the multiscale derivatives for the nonlinear scale space - */ -void AKAZEFeatures::Compute_Multiscale_Derivatives(void) -{ - cv::parallel_for_(cv::Range(0, (int)evolution_.size()), - MultiscaleDerivativesInvoker(evolution_, options_)); -} - -/* ************************************************************************* */ -/** - * @brief This method computes the feature detector response for the nonlinear scale space - * @note We use the Hessian determinant as the feature detector response - */ -void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { - - // Firstly compute the multiscale derivatives - Compute_Multiscale_Derivatives(); - - for (size_t i = 0; i < evolution_.size(); i++) - { - for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) - { - for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) - { - float lxx = *(evolution_[i].Lxx.ptr(ix)+jx); - float lxy = *(evolution_[i].Lxy.ptr(ix)+jx); - float lyy = *(evolution_[i].Lyy.ptr(ix)+jx); - *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy - lxy*lxy); - } - } - } -} - -/* ************************************************************************* */ -/** - * @brief This method finds extrema in the nonlinear scale space - * @param kpts Vector of detected keypoints - */ -void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) -{ - - float value = 0.0; - float dist = 0.0, ratio = 0.0, smax = 0.0; - int npoints = 0, id_repeated = 0; - int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0; - bool is_extremum = false, is_repeated = false, is_out = false; - cv::KeyPoint point; - vector kpts_aux; - - // Set maximum size - if (options_.descriptor == cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == cv::AKAZE::DESCRIPTOR_MLDB) { - smax = 10.0f*sqrtf(2.0f); - } - else if (options_.descriptor == cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == cv::AKAZE::DESCRIPTOR_KAZE) { - smax = 12.0f*sqrtf(2.0f); - } - - for (size_t i = 0; i < evolution_.size(); i++) { - for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) { - for (int jx = 1; jx < evolution_[i].Ldet.cols - 1; jx++) { - is_extremum = false; - is_repeated = false; - is_out = false; - value = *(evolution_[i].Ldet.ptr(ix)+jx); - - // Filter the points with the detector threshold - if (value > options_.dthreshold && value >= options_.min_dthreshold && - value > *(evolution_[i].Ldet.ptr(ix)+jx - 1) && - value > *(evolution_[i].Ldet.ptr(ix)+jx + 1) && - value > *(evolution_[i].Ldet.ptr(ix - 1) + jx - 1) && - value > *(evolution_[i].Ldet.ptr(ix - 1) + jx) && - value > *(evolution_[i].Ldet.ptr(ix - 1) + jx + 1) && - value > *(evolution_[i].Ldet.ptr(ix + 1) + jx - 1) && - value > *(evolution_[i].Ldet.ptr(ix + 1) + jx) && - value > *(evolution_[i].Ldet.ptr(ix + 1) + jx + 1)) { - - is_extremum = true; - point.response = fabs(value); - point.size = evolution_[i].esigma*options_.derivative_factor; - point.octave = (int)evolution_[i].octave; - point.class_id = (int)i; - ratio = pow(2.f, point.octave); - sigma_size_ = fRound(point.size / ratio); - point.pt.x = static_cast(jx); - point.pt.y = static_cast(ix); - - // Compare response with the same and lower scale - for (size_t ik = 0; ik < kpts_aux.size(); ik++) { - - if ((point.class_id - 1) == kpts_aux[ik].class_id || - point.class_id == kpts_aux[ik].class_id) { - dist = sqrt(pow(point.pt.x*ratio - kpts_aux[ik].pt.x, 2) + pow(point.pt.y*ratio - kpts_aux[ik].pt.y, 2)); - if (dist <= point.size) { - if (point.response > kpts_aux[ik].response) { - id_repeated = (int)ik; - is_repeated = true; - } - else { - is_extremum = false; - } - break; - } - } - } - - // Check out of bounds - if (is_extremum == true) { - - // Check that the point is under the image limits for the descriptor computation - left_x = fRound(point.pt.x - smax*sigma_size_) - 1; - right_x = fRound(point.pt.x + smax*sigma_size_) + 1; - up_y = fRound(point.pt.y - smax*sigma_size_) - 1; - down_y = fRound(point.pt.y + smax*sigma_size_) + 1; - - if (left_x < 0 || right_x >= evolution_[i].Ldet.cols || - up_y < 0 || down_y >= evolution_[i].Ldet.rows) { - is_out = true; - } - - if (is_out == false) { - if (is_repeated == false) { - point.pt.x *= ratio; - point.pt.y *= ratio; - kpts_aux.push_back(point); - npoints++; - } - else { - point.pt.x *= ratio; - point.pt.y *= ratio; - kpts_aux[id_repeated] = point; - } - } // if is_out - } //if is_extremum - } - } // for jx - } // for ix - } // for i - - // Now filter points with the upper scale level - for (size_t i = 0; i < kpts_aux.size(); i++) { - - is_repeated = false; - const cv::KeyPoint& pt = kpts_aux[i]; - for (size_t j = i + 1; j < kpts_aux.size(); j++) { - - // Compare response with the upper scale - if ((pt.class_id + 1) == kpts_aux[j].class_id) { - dist = sqrt(pow(pt.pt.x - kpts_aux[j].pt.x, 2) + pow(pt.pt.y - kpts_aux[j].pt.y, 2)); - if (dist <= pt.size) { - if (pt.response < kpts_aux[j].response) { - is_repeated = true; - break; - } - } - } - } - - if (is_repeated == false) - kpts.push_back(pt); - } -} - -/* ************************************************************************* */ -/** - * @brief This method performs subpixel refinement of the detected keypoints - * @param kpts Vector of detected keypoints - */ -void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) -{ - float Dx = 0.0, Dy = 0.0, ratio = 0.0; - float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; - int x = 0, y = 0; - cv::Mat A = cv::Mat::zeros(2, 2, CV_32F); - cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); - cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); - - for (size_t i = 0; i < kpts.size(); i++) { - ratio = pow(2.f, kpts[i].octave); - x = fRound(kpts[i].pt.x / ratio); - y = fRound(kpts[i].pt.y / ratio); - - // Compute the gradient - Dx = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) - - *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1)); - Dy = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) - - *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x)); - - // Compute the Hessian - Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) - + *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1) - - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); - - Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) - + *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x) - - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); - - Dxy = (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x + 1) - + (*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x - 1))) - - (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x + 1) - + (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x - 1))); - - // Solve the linear system - *(A.ptr(0)) = Dxx; - *(A.ptr(1) + 1) = Dyy; - *(A.ptr(0) + 1) = *(A.ptr(1)) = Dxy; - *(b.ptr(0)) = -Dx; - *(b.ptr(1)) = -Dy; - - cv::solve(A, b, dst, DECOMP_LU); - - if (fabs(*(dst.ptr(0))) <= 1.0f && fabs(*(dst.ptr(1))) <= 1.0f) { - kpts[i].pt.x = x + (*(dst.ptr(0))); - kpts[i].pt.y = y + (*(dst.ptr(1))); - kpts[i].pt.x *= powf(2.f, (float)evolution_[kpts[i].class_id].octave); - kpts[i].pt.y *= powf(2.f, (float)evolution_[kpts[i].class_id].octave); - kpts[i].angle = 0.0; - - // In OpenCV the size of a keypoint its the diameter - kpts[i].size *= 2.0f; - } - // Delete the point since its not stable - else { - kpts.erase(kpts.begin() + i); - i--; - } - } -} - -/* ************************************************************************* */ - -class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody -{ -public: - SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - { - } - - void operator() (const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - Get_SURF_Descriptor_Upright_64((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; -}; - -class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody -{ -public: - SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - { - } - - void operator()(const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); - Get_SURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; -}; - -class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody -{ -public: - MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - { - } - - void operator()(const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - Get_MSURF_Upright_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; -}; - -class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody -{ -public: - MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - { - } - - void operator() (const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); - Get_MSURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; -}; - -class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody -{ -public: - Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - , options_(&options) - { - } - - void operator() (const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - Get_Upright_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; - AKAZEOptions* options_; -}; - -class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody -{ -public: - Upright_MLDB_Descriptor_Subset_Invoker(std::vector& kpts, - cv::Mat& desc, - std::vector& evolution, - AKAZEOptions& options, - cv::Mat descriptorSamples, - cv::Mat descriptorBits) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - , options_(&options) - , descriptorSamples_(descriptorSamples) - , descriptorBits_(descriptorBits) - { - } - - void operator() (const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - Get_Upright_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; - AKAZEOptions* options_; - - cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. - cv::Mat descriptorBits_; -}; - -class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody -{ -public: - MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - , options_(&options) - { - } - - void operator() (const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); - Get_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; - AKAZEOptions* options_; -}; - -class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody -{ -public: - MLDB_Descriptor_Subset_Invoker(std::vector& kpts, - cv::Mat& desc, - std::vector& evolution, - AKAZEOptions& options, - cv::Mat descriptorSamples, - cv::Mat descriptorBits) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - , options_(&options) - , descriptorSamples_(descriptorSamples) - , descriptorBits_(descriptorBits) - { - } - - void operator() (const Range& range) const - { - for (int i = range.start; i < range.end; i++) - { - AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); - Get_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr(i)); - } - } - - void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; - -private: - std::vector* keypoints_; - cv::Mat* descriptors_; - std::vector* evolution_; - AKAZEOptions* options_; - - cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. - cv::Mat descriptorBits_; -}; - -/** - * @brief This method computes the set of descriptors through the nonlinear scale space - * @param kpts Vector of detected keypoints - * @param desc Matrix to store the descriptors - */ -void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) -{ - // Allocate memory for the matrix with the descriptors - if (options_.descriptor < cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT) { - desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); - } - else { - // We use the full length binary descriptor -> 486 bits - if (options_.descriptor_size == 0) { - int t = (6 + 36 + 120)*options_.descriptor_channels; - desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1); - } - else { - // We use the random bit selection length binary descriptor - desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1); - } - } - - switch (options_.descriptor) - { - case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation - { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); - } - break; - case cv::AKAZE::DESCRIPTOR_KAZE: - { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); - } - break; - case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation - { - if (options_.descriptor_size == 0) - cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); - else - cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); - } - break; - case cv::AKAZE::DESCRIPTOR_MLDB: - { - if (options_.descriptor_size == 0) - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); - else - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); - } - break; - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the main orientation for a given keypoint - * @param kpt Input keypoint - * @note The orientation is computed using a similar approach as described in the - * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 - */ -void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) { - - int ix = 0, iy = 0, idx = 0, s = 0, level = 0; - float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; - std::vector resX(109), resY(109), Ang(109); - const int id[] = { 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6 }; - - // Variables for computing the dominant direction - float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; - - // Get the information from the keypoint - level = kpt.class_id; - ratio = (float)(1 << evolution_[level].octave); - s = fRound(0.5f*kpt.size / ratio); - xf = kpt.pt.x / ratio; - yf = kpt.pt.y / ratio; - - // Calculate derivatives responses for points within radius of 6*scale - for (int i = -6; i <= 6; ++i) { - for (int j = -6; j <= 6; ++j) { - if (i*i + j*j < 36) { - iy = fRound(yf + j*s); - ix = fRound(xf + i*s); - - gweight = gauss25[id[i + 6]][id[j + 6]]; - resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); - resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); - - Ang[idx] = get_angle(resX[idx], resY[idx]); - ++idx; - } - } - } - - // Loop slides pi/3 window around feature point - for (ang1 = 0; ang1 < (float)(2.0 * CV_PI); ang1 += 0.15f) { - ang2 = (ang1 + (float)(CV_PI / 3.0) >(float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0)); - sumX = sumY = 0.f; - - for (size_t k = 0; k < Ang.size(); ++k) { - // Get angle from the x-axis of the sample point - const float & ang = Ang[k]; - - // Determine whether the point is within the window - if (ang1 < ang2 && ang1 < ang && ang < ang2) { - sumX += resX[k]; - sumY += resY[k]; - } - else if (ang2 < ang1 && - ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0f*CV_PI))) { - sumX += resX[k]; - sumY += resY[k]; - } - } - - // if the vector produced from this window is longer than all - // previous vectors then this forms the new dominant direction - if (sumX*sumX + sumY*sumY > max) { - // store largest orientation - max = sumX*sumX + sumY*sumY; - kpt.angle = get_angle(sumX, sumY); - } - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the upright descriptor (not rotation invariant) of - * the provided keypoint - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired - * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, - * ECCV 2008 - */ -void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { - - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int scale = 0, dsize = 0, level = 0; - - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5f, cy = 0.5f; - - const std::vector& evolution = *evolution_; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 12; - - // Get the information from the keypoint - ratio = (float)(1 << kpt.octave); - scale = fRound(0.5f*kpt.size / ratio); - level = kpt.class_id; - yf = kpt.pt.y / ratio; - xf = kpt.pt.x / ratio; - - i = -8; - - // Calculate descriptor for this interest point - // Area of size 24 s x 24 s - while (i < pattern_size) { - j = -8; - i = i - 4; - - cx += 1.0f; - cy = -0.5f; - - while (j < pattern_size) { - dx = dy = mdx = mdy = 0.0; - cy += 1.0f; - j = j - 4; - - ky = i + sample_step; - kx = j + sample_step; - - ys = yf + (ky*scale); - xs = xf + (kx*scale); - - for (int k = i; k < i + 9; k++) { - for (int l = j; l < j + 9; l++) { - sample_y = k*scale + yf; - sample_x = l*scale + xf; - - //Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50f*scale); - - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); - - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution[level].Lx.ptr(y1)+x1); - res2 = *(evolution[level].Lx.ptr(y1)+x2); - res3 = *(evolution[level].Lx.ptr(y2)+x1); - res4 = *(evolution[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution[level].Ly.ptr(y1)+x1); - res2 = *(evolution[level].Ly.ptr(y1)+x2); - res3 = *(evolution[level].Ly.ptr(y2)+x1); - res4 = *(evolution[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - rx = gauss_s1*rx; - ry = gauss_s1*ry; - - // Sum the derivatives to the cumulative descriptor - dx += rx; - dy += ry; - mdx += fabs(rx); - mdy += fabs(ry); - } - } - - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); - - desc[dcount++] = dx*gauss_s2; - desc[dcount++] = dy*gauss_s2; - desc[dcount++] = mdx*gauss_s2; - desc[dcount++] = mdy*gauss_s2; - - len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; - - j += 9; - } - - i += 9; - } - - // convert to unit vector - len = sqrt(len); - - for (i = 0; i < dsize; i++) { - desc[i] /= len; - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the descriptor of the provided keypoint given the - * main orientation of the keypoint - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired - * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, - * ECCV 2008 - */ -void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { - - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; - int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - int scale = 0, dsize = 0, level = 0; - - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5f, cy = 0.5f; - - const std::vector& evolution = *evolution_; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 12; - - // Get the information from the keypoint - ratio = (float)(1 << kpt.octave); - scale = fRound(0.5f*kpt.size / ratio); - angle = kpt.angle; - level = kpt.class_id; - yf = kpt.pt.y / ratio; - xf = kpt.pt.x / ratio; - co = cos(angle); - si = sin(angle); - - i = -8; - - // Calculate descriptor for this interest point - // Area of size 24 s x 24 s - while (i < pattern_size) { - j = -8; - i = i - 4; - - cx += 1.0f; - cy = -0.5f; - - while (j < pattern_size) { - dx = dy = mdx = mdy = 0.0; - cy += 1.0f; - j = j - 4; - - ky = i + sample_step; - kx = j + sample_step; - - xs = xf + (-kx*scale*si + ky*scale*co); - ys = yf + (kx*scale*co + ky*scale*si); - - for (int k = i; k < i + 9; ++k) { - for (int l = j; l < j + 9; ++l) { - // Get coords of sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - // Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale); - - y1 = fRound(sample_y - 0.5f); - x1 = fRound(sample_x - 0.5f); - - y2 = fRound(sample_y + 0.5f); - x2 = fRound(sample_x + 0.5f); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution[level].Lx.ptr(y1)+x1); - res2 = *(evolution[level].Lx.ptr(y1)+x2); - res3 = *(evolution[level].Lx.ptr(y2)+x1); - res4 = *(evolution[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution[level].Ly.ptr(y1)+x1); - res2 = *(evolution[level].Ly.ptr(y1)+x2); - res3 = *(evolution[level].Ly.ptr(y2)+x1); - res4 = *(evolution[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Get the x and y derivatives on the rotated axis - rry = gauss_s1*(rx*co + ry*si); - rrx = gauss_s1*(-rx*si + ry*co); - - // Sum the derivatives to the cumulative descriptor - dx += rrx; - dy += rry; - mdx += fabs(rrx); - mdy += fabs(rry); - } - } - - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); - desc[dcount++] = dx*gauss_s2; - desc[dcount++] = dy*gauss_s2; - desc[dcount++] = mdx*gauss_s2; - desc[dcount++] = mdy*gauss_s2; - - len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; - - j += 9; - } - - i += 9; - } - - // convert to unit vector - len = sqrt(len); - - for (i = 0; i < dsize; i++) { - desc[i] /= len; - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the rupright descriptor (not rotation invariant) of - * the provided keypoint - * @param kpt Input keypoint - * @param desc Descriptor vector - */ -void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { - - float di = 0.0, dx = 0.0, dy = 0.0; - float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, ratio = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int level = 0, nsamples = 0, scale = 0; - int dcount1 = 0, dcount2 = 0; - - const AKAZEOptions & options = *options_; - const std::vector& evolution = *evolution_; - - // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); - cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); - - // Get the information from the keypoint - ratio = (float)(1 << kpt.octave); - scale = fRound(0.5f*kpt.size / ratio); - level = kpt.class_id; - yf = kpt.pt.y / ratio; - xf = kpt.pt.x / ratio; - - // First 2x2 grid - pattern_size = options_->descriptor_pattern_size; - sample_step = pattern_size; - - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - di = dx = dy = 0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + l*scale; - sample_x = xf + k*scale; - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution[level].Lt.ptr(y1)+x1); - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - - di += ri; - dx += rx; - dy += ry; - nsamples++; - } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_1.ptr(dcount2)) = di; - *(values_1.ptr(dcount2)+1) = dx; - *(values_1.ptr(dcount2)+2) = dy; - dcount2++; - } - } - - // Do binary comparison first level - for (int i = 0; i < 4; i++) { - for (int j = i + 1; j < 4; j++) { - if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - - if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - - if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - - // Second 3x3 grid - sample_step = static_cast(ceil(pattern_size*2. / 3.)); - dcount2 = 0; - - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - di = dx = dy = 0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + l*scale; - sample_x = xf + k*scale; - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution[level].Lt.ptr(y1)+x1); - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - - di += ri; - dx += rx; - dy += ry; - nsamples++; - } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_2.ptr(dcount2)) = di; - *(values_2.ptr(dcount2)+1) = dx; - *(values_2.ptr(dcount2)+2) = dy; - dcount2++; - } - } - - //Do binary comparison second level - dcount2 = 0; - for (int i = 0; i < 9; i++) { - for (int j = i + 1; j < 9; j++) { - if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - - if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - - if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - - // Third 4x4 grid - sample_step = pattern_size / 2; - dcount2 = 0; - - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - di = dx = dy = 0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + l*scale; - sample_x = xf + k*scale; - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution[level].Lt.ptr(y1)+x1); - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - - di += ri; - dx += rx; - dy += ry; - nsamples++; - } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_3.ptr(dcount2)) = di; - *(values_3.ptr(dcount2)+1) = dx; - *(values_3.ptr(dcount2)+2) = dy; - dcount2++; - } - } - - //Do binary comparison third level - dcount2 = 0; - for (int i = 0; i < 16; i++) { - for (int j = i + 1; j < 16; j++) { - if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - - if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - - if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the descriptor of the provided keypoint given the - * main orientation of the keypoint - * @param kpt Input keypoint - * @param desc Descriptor vector - */ -void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { - - float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; - float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int level = 0, nsamples = 0, scale = 0; - int dcount1 = 0, dcount2 = 0; - - const AKAZEOptions & options = *options_; - const std::vector& evolution = *evolution_; - - // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); - cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); - - // Get the information from the keypoint - ratio = (float)(1 << kpt.octave); - scale = fRound(0.5f*kpt.size / ratio); - angle = kpt.angle; - level = kpt.class_id; - yf = kpt.pt.y / ratio; - xf = kpt.pt.x / ratio; - co = cos(angle); - si = sin(angle); - - // First 2x2 grid - pattern_size = options.descriptor_pattern_size; - sample_step = pattern_size; - - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - di = dx = dy = 0.0; - nsamples = 0; - - for (float k = (float)i; k < i + sample_step; k++) { - for (float l = (float)j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution[level].Lt.ptr(y1)+x1); - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - - di += ri; - - if (options.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - dx += rrx; - dy += rry; - } - - nsamples++; - } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_1.ptr(dcount2)) = di; - if (options.descriptor_channels > 1) { - *(values_1.ptr(dcount2)+1) = dx; - } - - if (options.descriptor_channels > 2) { - *(values_1.ptr(dcount2)+2) = dy; - } - - dcount2++; - } - } - - // Do binary comparison first level - for (int i = 0; i < 4; i++) { - for (int j = i + 1; j < 4; j++) { - if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - - if (options.descriptor_channels > 1) { - for (int i = 0; i < 4; i++) { - for (int j = i + 1; j < 4; j++) { - if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - - dcount1++; - } - } - } - - if (options.descriptor_channels > 2) { - for (int i = 0; i < 4; i++) { - for (int j = i + 1; j < 4; j++) { - if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - } - - // Second 3x3 grid - sample_step = static_cast(ceil(pattern_size*2. / 3.)); - dcount2 = 0; - - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - di = dx = dy = 0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution[level].Lt.ptr(y1)+x1); - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - di += ri; - - if (options.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - dx += rrx; - dy += rry; - } - - nsamples++; - } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_2.ptr(dcount2)) = di; - if (options.descriptor_channels > 1) { - *(values_2.ptr(dcount2)+1) = dx; - } - - if (options.descriptor_channels > 2) { - *(values_2.ptr(dcount2)+2) = dy; - } - - dcount2++; - } - } - - // Do binary comparison second level - for (int i = 0; i < 9; i++) { - for (int j = i + 1; j < 9; j++) { - if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - - if (options.descriptor_channels > 1) { - for (int i = 0; i < 9; i++) { - for (int j = i + 1; j < 9; j++) { - if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - } - - if (options.descriptor_channels > 2) { - for (int i = 0; i < 9; i++) { - for (int j = i + 1; j < 9; j++) { - if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - } - - // Third 4x4 grid - sample_step = pattern_size / 2; - dcount2 = 0; - - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - di = dx = dy = 0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution[level].Lt.ptr(y1)+x1); - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - di += ri; - - if (options.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - dx += rrx; - dy += rry; - } - - nsamples++; - } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_3.ptr(dcount2)) = di; - if (options.descriptor_channels > 1) - *(values_3.ptr(dcount2)+1) = dx; - - if (options.descriptor_channels > 2) - *(values_3.ptr(dcount2)+2) = dy; - - dcount2++; - } - } - - // Do binary comparison third level - for (int i = 0; i < 16; i++) { - for (int j = i + 1; j < 16; j++) { - if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - - if (options.descriptor_channels > 1) { - for (int i = 0; i < 16; i++) { - for (int j = i + 1; j < 16; j++) { - if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - } - - if (options.descriptor_channels > 2) { - for (int i = 0; i < 16; i++) { - for (int j = i + 1; j < 16; j++) { - if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { - desc[dcount1 / 8] |= (1 << (dcount1 % 8)); - } - dcount1++; - } - } - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the M-LDB descriptor of the provided keypoint given the - * main orientation of the keypoint. The descriptor is computed based on a subset of - * the bits of the whole descriptor - * @param kpt Input keypoint - * @param desc Descriptor vector - */ -void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { - - float di = 0.f, dx = 0.f, dy = 0.f; - float rx = 0.f, ry = 0.f; - float sample_x = 0.f, sample_y = 0.f; - int x1 = 0, y1 = 0; - - const AKAZEOptions & options = *options_; - const std::vector& evolution = *evolution_; - - // Get the information from the keypoint - float ratio = (float)(1 << kpt.octave); - int scale = fRound(0.5f*kpt.size / ratio); - float angle = kpt.angle; - int level = kpt.class_id; - float yf = kpt.pt.y / ratio; - float xf = kpt.pt.x / ratio; - float co = cos(angle); - float si = sin(angle); - - // Allocate memory for the matrix of values - cv::Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); - - // Sample everything, but only do the comparisons - vector steps(3); - steps.at(0) = options.descriptor_pattern_size; - steps.at(1) = (int)ceil(2.f*options.descriptor_pattern_size / 3.f); - steps.at(2) = options.descriptor_pattern_size / 2; - - for (int i = 0; i < descriptorSamples_.rows; i++) { - const int *coords = descriptorSamples_.ptr(i); - int sample_step = steps.at(coords[0]); - di = 0.0f; - dx = 0.0f; - dy = 0.0f; - - for (int k = coords[1]; k < coords[1] + sample_step; k++) { - for (int l = coords[2]; l < coords[2] + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - di += *(evolution[level].Lt.ptr(y1)+x1); - - if (options.descriptor_channels > 1) { - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - - if (options.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - dx += rx*co + ry*si; - dy += -rx*si + ry*co; - } - } - } - } - - *(values.ptr(options.descriptor_channels*i)) = di; - - if (options.descriptor_channels == 2) { - *(values.ptr(options.descriptor_channels*i + 1)) = dx; - } - else if (options.descriptor_channels == 3) { - *(values.ptr(options.descriptor_channels*i + 1)) = dx; - *(values.ptr(options.descriptor_channels*i + 2)) = dy; - } - } - - // Do the comparisons - const float *vals = values.ptr(0); - const int *comps = descriptorBits_.ptr(0); - - for (int i = 0; i vals[comps[2 * i + 1]]) { - desc[i / 8] |= (1 << (i % 8)); - } - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the upright (not rotation invariant) M-LDB descriptor - * of the provided keypoint given the main orientation of the keypoint. - * The descriptor is computed based on a subset of the bits of the whole descriptor - * @param kpt Input keypoint - * @param desc Descriptor vector - */ -void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { - - float di = 0.0f, dx = 0.0f, dy = 0.0f; - float rx = 0.0f, ry = 0.0f; - float sample_x = 0.0f, sample_y = 0.0f; - int x1 = 0, y1 = 0; - - const AKAZEOptions & options = *options_; - const std::vector& evolution = *evolution_; - - // Get the information from the keypoint - float ratio = (float)(1 << kpt.octave); - int scale = fRound(0.5f*kpt.size / ratio); - int level = kpt.class_id; - float yf = kpt.pt.y / ratio; - float xf = kpt.pt.x / ratio; - - // Allocate memory for the matrix of values - Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); - - vector steps(3); - steps.at(0) = options.descriptor_pattern_size; - steps.at(1) = static_cast(ceil(2.f*options.descriptor_pattern_size / 3.f)); - steps.at(2) = options.descriptor_pattern_size / 2; - - for (int i = 0; i < descriptorSamples_.rows; i++) { - const int *coords = descriptorSamples_.ptr(i); - int sample_step = steps.at(coords[0]); - di = 0.0f, dx = 0.0f, dy = 0.0f; - - for (int k = coords[1]; k < coords[1] + sample_step; k++) { - for (int l = coords[2]; l < coords[2] + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + l*scale; - sample_x = xf + k*scale; - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - di += *(evolution[level].Lt.ptr(y1)+x1); - - if (options.descriptor_channels > 1) { - rx = *(evolution[level].Lx.ptr(y1)+x1); - ry = *(evolution[level].Ly.ptr(y1)+x1); - - if (options.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options.descriptor_channels == 3) { - dx += rx; - dy += ry; - } - } - } - } - - *(values.ptr(options.descriptor_channels*i)) = di; - - if (options.descriptor_channels == 2) { - *(values.ptr(options.descriptor_channels*i + 1)) = dx; - } - else if (options.descriptor_channels == 3) { - *(values.ptr(options.descriptor_channels*i + 1)) = dx; - *(values.ptr(options.descriptor_channels*i + 2)) = dy; - } - } - - // Do the comparisons - const float *vals = values.ptr(0); - const int *comps = descriptorBits_.ptr(0); - - for (int i = 0; i vals[comps[2 * i + 1]]) { - desc[i / 8] |= (1 << (i % 8)); - } - } -} - -/* ************************************************************************* */ -/** - * @brief This function computes a (quasi-random) list of bits to be taken - * from the full descriptor. To speed the extraction, the function creates - * a list of the samples that are involved in generating at least a bit (sampleList) - * and a list of the comparisons between those samples (comparisons) - * @param sampleList - * @param comparisons The matrix with the binary comparisons - * @param nbits The number of bits of the descriptor - * @param pattern_size The pattern size for the binary descriptor - * @param nchannels Number of channels to consider in the descriptor (1-3) - * @note The function keeps the 18 bits (3-channels by 6 comparisons) of the - * coarser grid, since it provides the most robust estimations - */ -void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int nbits, - int pattern_size, int nchannels) { - - int ssz = 0; - for (int i = 0; i < 3; i++) { - int gz = (i + 2)*(i + 2); - ssz += gz*(gz - 1) / 2; - } - ssz *= nchannels; - - CV_Assert(nbits <= ssz); // Descriptor size can't be bigger than full descriptor - - // Since the full descriptor is usually under 10k elements, we pick - // the selection from the full matrix. We take as many samples per - // pick as the number of channels. For every pick, we - // take the two samples involved and put them in the sampling list - - Mat_ fullM(ssz / nchannels, 5); - for (int i = 0, c = 0; i < 3; i++) { - int gdiv = i + 2; //grid divisions, per row - int gsz = gdiv*gdiv; - int psz = (int)ceil(2.f*pattern_size / (float)gdiv); - - for (int j = 0; j < gsz; j++) { - for (int k = j + 1; k < gsz; k++, c++) { - fullM(c, 0) = i; - fullM(c, 1) = psz*(j % gdiv) - pattern_size; - fullM(c, 2) = psz*(j / gdiv) - pattern_size; - fullM(c, 3) = psz*(k % gdiv) - pattern_size; - fullM(c, 4) = psz*(k / gdiv) - pattern_size; - } - } - } - - srand(1024); - Mat_ comps = Mat_(nchannels * (int)ceil(nbits / (float)nchannels), 2); - comps = 1000; - - // Select some samples. A sample includes all channels - int count = 0; - int npicks = (int)ceil(nbits / (float)nchannels); - Mat_ samples(29, 3); - Mat_ fullcopy = fullM.clone(); - samples = -1; - - for (int i = 0; i < npicks; i++) { - int k = rand() % (fullM.rows - i); - if (i < 6) { - // Force use of the coarser grid values and comparisons - k = i; - } - - bool n = true; - - for (int j = 0; j < count; j++) { - if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 1) && samples(j, 2) == fullcopy(k, 2)) { - n = false; - comps(i*nchannels, 0) = nchannels*j; - comps(i*nchannels + 1, 0) = nchannels*j + 1; - comps(i*nchannels + 2, 0) = nchannels*j + 2; - break; - } - } - - if (n) { - samples(count, 0) = fullcopy(k, 0); - samples(count, 1) = fullcopy(k, 1); - samples(count, 2) = fullcopy(k, 2); - comps(i*nchannels, 0) = nchannels*count; - comps(i*nchannels + 1, 0) = nchannels*count + 1; - comps(i*nchannels + 2, 0) = nchannels*count + 2; - count++; - } - - n = true; - for (int j = 0; j < count; j++) { - if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 3) && samples(j, 2) == fullcopy(k, 4)) { - n = false; - comps(i*nchannels, 1) = nchannels*j; - comps(i*nchannels + 1, 1) = nchannels*j + 1; - comps(i*nchannels + 2, 1) = nchannels*j + 2; - break; - } - } - - if (n) { - samples(count, 0) = fullcopy(k, 0); - samples(count, 1) = fullcopy(k, 3); - samples(count, 2) = fullcopy(k, 4); - comps(i*nchannels, 1) = nchannels*count; - comps(i*nchannels + 1, 1) = nchannels*count + 1; - comps(i*nchannels + 2, 1) = nchannels*count + 2; - count++; - } - - Mat tmp = fullcopy.row(k); - fullcopy.row(fullcopy.rows - i - 1).copyTo(tmp); - } - - sampleList = samples.rowRange(0, count).clone(); - comparisons = comps.rowRange(0, nbits).clone(); -} - -/* ************************************************************************* */ -/** - * @brief This function computes the angle from the vector given by (X Y). From 0 to 2*Pi - */ -inline float get_angle(float x, float y) { - - if (x >= 0 && y >= 0) { - return atanf(y / x); - } - - if (x < 0 && y >= 0) { - return static_cast(CV_PI)-atanf(-y / x); - } - - if (x < 0 && y < 0) { - return static_cast(CV_PI)+atanf(y / x); - } - - if (x >= 0 && y < 0) { - return static_cast(2.0 * CV_PI) - atanf(-y / x); - } - - return 0; -} - -/* ************************************************************************* */ -/** - * @brief This function computes the value of a 2D Gaussian function - * @param x X Position - * @param y Y Position - * @param sig Standard Deviation - */ -inline float gaussian(float x, float y, float sigma) { - return expf(-(x*x + y*y) / (2.0f*sigma*sigma)); -} - -/* ************************************************************************* */ -/** - * @brief This function checks descriptor limits - * @param x X Position - * @param y Y Position - * @param width Image width - * @param height Image height - */ -inline void check_descriptor_limits(int &x, int &y, int width, int height) { - - if (x < 0) { - x = 0; - } - - if (y < 0) { - y = 0; - } - - if (x > width - 1) { - x = width - 1; - } - - if (y > height - 1) { - y = height - 1; - } -} - -/* ************************************************************************* */ -/** - * @brief This funtion rounds float to nearest integer - * @param flt Input float - * @return dst Nearest integer - */ -inline int fRound(float flt) { - return (int)(flt + 0.5f); -} \ No newline at end of file diff --git a/modules/features2d/src/akaze/AKAZEFeatures.h b/modules/features2d/src/akaze/AKAZEFeatures.h deleted file mode 100644 index 302ef0d06..000000000 --- a/modules/features2d/src/akaze/AKAZEFeatures.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * @file AKAZE.h - * @brief Main class for detecting and computing binary descriptors in an - * accelerated nonlinear scale space - * @date Mar 27, 2013 - * @author Pablo F. Alcantarilla, Jesus Nuevo - */ - -#pragma once - -/* ************************************************************************* */ -// Includes -#include "precomp.hpp" -#include "AKAZEConfig.h" - -/* ************************************************************************* */ -// AKAZE Class Declaration -class AKAZEFeatures { - -private: - - AKAZEOptions options_; ///< Configuration options for AKAZE - std::vector evolution_; ///< Vector of nonlinear diffusion evolution - - /// FED parameters - int ncycles_; ///< Number of cycles - bool reordering_; ///< Flag for reordering time steps - std::vector > tsteps_; ///< Vector of FED dynamic time steps - std::vector nsteps_; ///< Vector of number of steps per cycle - - /// Matrices for the M-LDB descriptor computation - cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. - cv::Mat descriptorBits_; - cv::Mat bitMask_; - -public: - - /// Constructor with input arguments - AKAZEFeatures(const AKAZEOptions& options); - - /// Scale Space methods - void Allocate_Memory_Evolution(); - int Create_Nonlinear_Scale_Space(const cv::Mat& img); - void Feature_Detection(std::vector& kpts); - void Compute_Determinant_Hessian_Response(void); - void Compute_Multiscale_Derivatives(void); - void Find_Scale_Space_Extrema(std::vector& kpts); - void Do_Subpixel_Refinement(std::vector& kpts); - - // Feature description methods - void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); - - static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_); -}; - -/* ************************************************************************* */ -// Inline functions - -// Inline functions -void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, - int nbits, int pattern_size, int nchannels); -float get_angle(float x, float y); -float gaussian(float x, float y, float sigma); -void check_descriptor_limits(int& x, int& y, int width, int height); -int fRound(float flt); diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 88fb999d5..910ec271f 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -55,12 +55,21 @@ namespace cv KAZE::KAZE() : extended(false) , upright(false) + , threshold(0.001f) + , octaves(4) + , sublevels(4) + , diffusivity(DIFF_PM_G2) { } - KAZE::KAZE(bool _extended, bool _upright) + KAZE::KAZE(bool _extended, bool _upright, float _threshold, int _octaves, + int _sublevels, int _diffusivity) : extended(_extended) , upright(_upright) + , threshold(_threshold) + , octaves(_octaves) + , sublevels(_sublevels) + , diffusivity(_diffusivity) { } @@ -111,6 +120,10 @@ namespace cv options.img_height = img.rows; options.extended = extended; options.upright = upright; + options.dthreshold = threshold; + options.omax = octaves; + options.nsublevels = sublevels; + options.diffusivity = diffusivity; KAZEFeatures impl(options); impl.Create_Nonlinear_Scale_Space(img1_32); @@ -180,4 +193,4 @@ namespace cv CV_Assert((!desc.rows || desc.cols == descriptorSize())); CV_Assert((!desc.rows || (desc.type() == descriptorType()))); } -} \ No newline at end of file +} diff --git a/modules/features2d/src/akaze/AKAZEConfig.h b/modules/features2d/src/kaze/AKAZEConfig.h similarity index 70% rename from modules/features2d/src/akaze/AKAZEConfig.h rename to modules/features2d/src/kaze/AKAZEConfig.h index 1c1203f57..c7ac1cfc0 100644 --- a/modules/features2d/src/akaze/AKAZEConfig.h +++ b/modules/features2d/src/kaze/AKAZEConfig.h @@ -5,7 +5,8 @@ * @author Pablo F. Alcantarilla, Jesus Nuevo */ -#pragma once +#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ +#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ /* ************************************************************************* */ // OpenCV @@ -28,14 +29,6 @@ const float gauss25[7][7] = { /// AKAZE configuration options structure struct AKAZEOptions { - /// AKAZE Diffusivities - enum DIFFUSIVITY_TYPE { - PM_G1 = 0, - PM_G2 = 1, - WEICKERT = 2, - CHARBONNIER = 3 - }; - AKAZEOptions() : omax(4) , nsublevels(4) @@ -44,12 +37,12 @@ struct AKAZEOptions { , soffset(1.6f) , derivative_factor(1.5f) , sderivatives(1.0) - , diffusivity(PM_G2) + , diffusivity(cv::DIFF_PM_G2) , dthreshold(0.001f) , min_dthreshold(0.00001f) - , descriptor(cv::AKAZE::DESCRIPTOR_MLDB) + , descriptor(cv::DESCRIPTOR_MLDB) , descriptor_size(0) , descriptor_channels(3) , descriptor_pattern_size(10) @@ -67,12 +60,12 @@ struct AKAZEOptions { float soffset; ///< Base scale offset (sigma units) float derivative_factor; ///< Factor for the multiscale derivatives float sderivatives; ///< Smoothing factor for the derivatives - DIFFUSIVITY_TYPE diffusivity; ///< Diffusivity type + int diffusivity; ///< Diffusivity type float dthreshold; ///< Detector response threshold to accept point float min_dthreshold; ///< Minimum detector threshold to accept a point - cv::AKAZE::DESCRIPTOR_TYPE descriptor; ///< Type of descriptor + int descriptor; ///< Type of descriptor int descriptor_size; ///< Size of the descriptor in bits. 0->Full size int descriptor_channels; ///< Number of channels in the descriptor (1, 2, 3) int descriptor_pattern_size; ///< Actual patch size is 2*pattern_size*point.scale @@ -82,28 +75,4 @@ struct AKAZEOptions { int kcontrast_nbins; ///< Number of bins for the contrast factor histogram }; -/* ************************************************************************* */ -/// AKAZE nonlinear diffusion filtering evolution -struct TEvolution { - - TEvolution() { - etime = 0.0f; - esigma = 0.0f; - octave = 0; - sublevel = 0; - sigma_size = 0; - } - - cv::Mat Lx, Ly; // First order spatial derivatives - cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives - cv::Mat Lflow; // Diffusivity image - cv::Mat Lt; // Evolution image - cv::Mat Lsmooth; // Smoothed image - cv::Mat Lstep; // Evolution step update - cv::Mat Ldet; // Detector response - float etime; // Evolution time - float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 - size_t octave; // Image octave - size_t sublevel; // Image sublevel in each octave - size_t sigma_size; // Integer sigma. For computing the feature detector responses -}; \ No newline at end of file +#endif diff --git a/modules/features2d/src/kaze/AKAZEFeatures.cpp b/modules/features2d/src/kaze/AKAZEFeatures.cpp new file mode 100644 index 000000000..97222dfa4 --- /dev/null +++ b/modules/features2d/src/kaze/AKAZEFeatures.cpp @@ -0,0 +1,1880 @@ +/** + * @file AKAZEFeatures.cpp + * @brief Main class for detecting and describing binary features in an + * accelerated nonlinear scale space + * @date Sep 15, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ + +#include "AKAZEFeatures.h" +#include "fed.h" +#include "nldiffusion_functions.h" +#include "utils.h" + +#include + +// Namespaces +using namespace std; +using namespace cv; +using namespace cv::details::kaze; + +/* ************************************************************************* */ +/** + * @brief AKAZEFeatures constructor with input options + * @param options AKAZEFeatures configuration options + * @note This constructor allocates memory for the nonlinear scale space + */ +AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { + + ncycles_ = 0; + reordering_ = true; + + if (options_.descriptor_size > 0 && options_.descriptor >= cv::DESCRIPTOR_MLDB_UPRIGHT) { + generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, + options_.descriptor_pattern_size, options_.descriptor_channels); + } + + Allocate_Memory_Evolution(); +} + +/* ************************************************************************* */ +/** + * @brief This method allocates the memory for the nonlinear diffusion evolution + */ +void AKAZEFeatures::Allocate_Memory_Evolution(void) { + + float rfactor = 0.0f; + int level_height = 0, level_width = 0; + + // Allocate the dimension of the matrices for the evolution + for (int i = 0; i <= options_.omax - 1; i++) { + rfactor = 1.0f / pow(2.f, i); + level_height = (int)(options_.img_height*rfactor); + level_width = (int)(options_.img_width*rfactor); + + // Smallest possible octave and allow one scale if the image is small + if ((level_width < 80 || level_height < 40) && i != 0) { + options_.omax = i; + break; + } + + for (int j = 0; j < options_.nsublevels; j++) { + TEvolution step; + step.Lx = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Ly = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lxx = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lxy = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lyy = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lt = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Ldet = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lsmooth = cv::Mat::zeros(level_height, level_width, CV_32F); + step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i); + step.sigma_size = fRound(step.esigma); + step.etime = 0.5f*(step.esigma*step.esigma); + step.octave = i; + step.sublevel = j; + evolution_.push_back(step); + } + } + + // Allocate memory for the number of cycles and time steps + for (size_t i = 1; i < evolution_.size(); i++) { + int naux = 0; + vector tau; + float ttime = 0.0f; + ttime = evolution_[i].etime - evolution_[i - 1].etime; + naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau); + nsteps_.push_back(naux); + tsteps_.push_back(tau); + ncycles_++; + } +} + +/* ************************************************************************* */ +/** + * @brief This method creates the nonlinear scale space for a given image + * @param img Input image for which the nonlinear scale space needs to be created + * @return 0 if the nonlinear scale space was created successfully, -1 otherwise + */ +int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) +{ + CV_Assert(evolution_.size() > 0); + + // Copy the original image to the first level of the evolution + img.copyTo(evolution_[0].Lt); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); + evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); + + // Allocate memory for the flow and step images + cv::Mat Lflow = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); + cv::Mat Lstep = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); + + // First compute the kcontrast factor + options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, 1.0f, options_.kcontrast_nbins, 0, 0); + + // Now generate the rest of evolution levels + for (size_t i = 1; i < evolution_.size(); i++) { + + if (evolution_[i].octave > evolution_[i - 1].octave) { + halfsample_image(evolution_[i - 1].Lt, evolution_[i].Lt); + options_.kcontrast = options_.kcontrast*0.75f; + + // Allocate memory for the resized flow and step images + Lflow = cv::Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F); + Lstep = cv::Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F); + } + else { + evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); + } + + gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0f); + + // Compute the Gaussian derivatives Lx and Ly + image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0); + image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1); + + // Compute the conductivity equation + switch (options_.diffusivity) { + case cv::DIFF_PM_G1: + pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); + break; + case cv::DIFF_PM_G2: + pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); + break; + case cv::DIFF_WEICKERT: + weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); + break; + case cv::DIFF_CHARBONNIER: + charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); + break; + default: + CV_Error(options_.diffusivity, "Diffusivity is not supported"); + break; + } + + // Perform FED n inner steps + for (int j = 0; j < nsteps_[i - 1]; j++) { + cv::details::kaze::nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]); + } + } + + return 0; +} + +/* ************************************************************************* */ +/** + * @brief This method selects interesting keypoints through the nonlinear scale space + * @param kpts Vector of detected keypoints + */ +void AKAZEFeatures::Feature_Detection(std::vector& kpts) +{ + kpts.clear(); + Compute_Determinant_Hessian_Response(); + Find_Scale_Space_Extrema(kpts); + Do_Subpixel_Refinement(kpts); +} + +/* ************************************************************************* */ +class MultiscaleDerivativesAKAZEInvoker : public cv::ParallelLoopBody +{ +public: + explicit MultiscaleDerivativesAKAZEInvoker(std::vector& ev, const AKAZEOptions& opt) + : evolution_(&ev) + , options_(opt) + { + } + + void operator()(const cv::Range& range) const + { + std::vector& evolution = *evolution_; + + for (int i = range.start; i < range.end; i++) + { + float ratio = pow(2.f, (float)evolution[i].octave); + int sigma_size_ = fRound(evolution[i].esigma * options_.derivative_factor / ratio); + + compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Lx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Ly, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution[i].Ly, evolution[i].Lyy, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxy, 0, 1, sigma_size_); + + evolution[i].Lx = evolution[i].Lx*((sigma_size_)); + evolution[i].Ly = evolution[i].Ly*((sigma_size_)); + evolution[i].Lxx = evolution[i].Lxx*((sigma_size_)*(sigma_size_)); + evolution[i].Lxy = evolution[i].Lxy*((sigma_size_)*(sigma_size_)); + evolution[i].Lyy = evolution[i].Lyy*((sigma_size_)*(sigma_size_)); + } + } + +private: + std::vector* evolution_; + AKAZEOptions options_; +}; + +/* ************************************************************************* */ +/** + * @brief This method computes the multiscale derivatives for the nonlinear scale space + */ +void AKAZEFeatures::Compute_Multiscale_Derivatives(void) +{ + cv::parallel_for_(cv::Range(0, (int)evolution_.size()), + MultiscaleDerivativesAKAZEInvoker(evolution_, options_)); +} + +/* ************************************************************************* */ +/** + * @brief This method computes the feature detector response for the nonlinear scale space + * @note We use the Hessian determinant as the feature detector response + */ +void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { + + // Firstly compute the multiscale derivatives + Compute_Multiscale_Derivatives(); + + for (size_t i = 0; i < evolution_.size(); i++) + { + for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) + { + for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) + { + float lxx = *(evolution_[i].Lxx.ptr(ix)+jx); + float lxy = *(evolution_[i].Lxy.ptr(ix)+jx); + float lyy = *(evolution_[i].Lyy.ptr(ix)+jx); + *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy - lxy*lxy); + } + } + } +} + +/* ************************************************************************* */ +/** + * @brief This method finds extrema in the nonlinear scale space + * @param kpts Vector of detected keypoints + */ +void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) +{ + + float value = 0.0; + float dist = 0.0, ratio = 0.0, smax = 0.0; + int npoints = 0, id_repeated = 0; + int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0; + bool is_extremum = false, is_repeated = false, is_out = false; + cv::KeyPoint point; + vector kpts_aux; + + // Set maximum size + if (options_.descriptor == cv::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_MLDB) { + smax = 10.0f*sqrtf(2.0f); + } + else if (options_.descriptor == cv::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_KAZE) { + smax = 12.0f*sqrtf(2.0f); + } + + for (size_t i = 0; i < evolution_.size(); i++) { + for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) { + for (int jx = 1; jx < evolution_[i].Ldet.cols - 1; jx++) { + is_extremum = false; + is_repeated = false; + is_out = false; + value = *(evolution_[i].Ldet.ptr(ix)+jx); + + // Filter the points with the detector threshold + if (value > options_.dthreshold && value >= options_.min_dthreshold && + value > *(evolution_[i].Ldet.ptr(ix)+jx - 1) && + value > *(evolution_[i].Ldet.ptr(ix)+jx + 1) && + value > *(evolution_[i].Ldet.ptr(ix - 1) + jx - 1) && + value > *(evolution_[i].Ldet.ptr(ix - 1) + jx) && + value > *(evolution_[i].Ldet.ptr(ix - 1) + jx + 1) && + value > *(evolution_[i].Ldet.ptr(ix + 1) + jx - 1) && + value > *(evolution_[i].Ldet.ptr(ix + 1) + jx) && + value > *(evolution_[i].Ldet.ptr(ix + 1) + jx + 1)) { + + is_extremum = true; + point.response = fabs(value); + point.size = evolution_[i].esigma*options_.derivative_factor; + point.octave = (int)evolution_[i].octave; + point.class_id = (int)i; + ratio = pow(2.f, point.octave); + sigma_size_ = fRound(point.size / ratio); + point.pt.x = static_cast(jx); + point.pt.y = static_cast(ix); + + // Compare response with the same and lower scale + for (size_t ik = 0; ik < kpts_aux.size(); ik++) { + + if ((point.class_id - 1) == kpts_aux[ik].class_id || + point.class_id == kpts_aux[ik].class_id) { + dist = sqrt(pow(point.pt.x*ratio - kpts_aux[ik].pt.x, 2) + pow(point.pt.y*ratio - kpts_aux[ik].pt.y, 2)); + if (dist <= point.size) { + if (point.response > kpts_aux[ik].response) { + id_repeated = (int)ik; + is_repeated = true; + } + else { + is_extremum = false; + } + break; + } + } + } + + // Check out of bounds + if (is_extremum == true) { + + // Check that the point is under the image limits for the descriptor computation + left_x = fRound(point.pt.x - smax*sigma_size_) - 1; + right_x = fRound(point.pt.x + smax*sigma_size_) + 1; + up_y = fRound(point.pt.y - smax*sigma_size_) - 1; + down_y = fRound(point.pt.y + smax*sigma_size_) + 1; + + if (left_x < 0 || right_x >= evolution_[i].Ldet.cols || + up_y < 0 || down_y >= evolution_[i].Ldet.rows) { + is_out = true; + } + + if (is_out == false) { + if (is_repeated == false) { + point.pt.x *= ratio; + point.pt.y *= ratio; + kpts_aux.push_back(point); + npoints++; + } + else { + point.pt.x *= ratio; + point.pt.y *= ratio; + kpts_aux[id_repeated] = point; + } + } // if is_out + } //if is_extremum + } + } // for jx + } // for ix + } // for i + + // Now filter points with the upper scale level + for (size_t i = 0; i < kpts_aux.size(); i++) { + + is_repeated = false; + const cv::KeyPoint& pt = kpts_aux[i]; + for (size_t j = i + 1; j < kpts_aux.size(); j++) { + + // Compare response with the upper scale + if ((pt.class_id + 1) == kpts_aux[j].class_id) { + dist = sqrt(pow(pt.pt.x - kpts_aux[j].pt.x, 2) + pow(pt.pt.y - kpts_aux[j].pt.y, 2)); + if (dist <= pt.size) { + if (pt.response < kpts_aux[j].response) { + is_repeated = true; + break; + } + } + } + } + + if (is_repeated == false) + kpts.push_back(pt); + } +} + +/* ************************************************************************* */ +/** + * @brief This method performs subpixel refinement of the detected keypoints + * @param kpts Vector of detected keypoints + */ +void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) +{ + float Dx = 0.0, Dy = 0.0, ratio = 0.0; + float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; + int x = 0, y = 0; + cv::Mat A = cv::Mat::zeros(2, 2, CV_32F); + cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); + cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); + + for (size_t i = 0; i < kpts.size(); i++) { + ratio = pow(2.f, kpts[i].octave); + x = fRound(kpts[i].pt.x / ratio); + y = fRound(kpts[i].pt.y / ratio); + + // Compute the gradient + Dx = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) + - *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1)); + Dy = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) + - *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x)); + + // Compute the Hessian + Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) + + *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1) + - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + + Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) + + *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x) + - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + + Dxy = (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x + 1) + + (*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x - 1))) + - (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x + 1) + + (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x - 1))); + + // Solve the linear system + *(A.ptr(0)) = Dxx; + *(A.ptr(1) + 1) = Dyy; + *(A.ptr(0) + 1) = *(A.ptr(1)) = Dxy; + *(b.ptr(0)) = -Dx; + *(b.ptr(1)) = -Dy; + + cv::solve(A, b, dst, DECOMP_LU); + + if (fabs(*(dst.ptr(0))) <= 1.0f && fabs(*(dst.ptr(1))) <= 1.0f) { + kpts[i].pt.x = x + (*(dst.ptr(0))); + kpts[i].pt.y = y + (*(dst.ptr(1))); + kpts[i].pt.x *= powf(2.f, (float)evolution_[kpts[i].class_id].octave); + kpts[i].pt.y *= powf(2.f, (float)evolution_[kpts[i].class_id].octave); + kpts[i].angle = 0.0; + + // In OpenCV the size of a keypoint its the diameter + kpts[i].size *= 2.0f; + } + // Delete the point since its not stable + else { + kpts.erase(kpts.begin() + i); + i--; + } + } +} + +/* ************************************************************************* */ + +class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody +{ +public: + SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_SURF_Descriptor_Upright_64((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; +}; + +class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody +{ +public: + SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + { + } + + void operator()(const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_SURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; +}; + +class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody +{ +public: + MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + { + } + + void operator()(const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_MSURF_Upright_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; +}; + +class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody +{ +public: + MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_MSURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; +}; + +class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody +{ +public: + Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_Upright_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; +}; + +class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody +{ +public: + Upright_MLDB_Descriptor_Subset_Invoker(std::vector& kpts, + cv::Mat& desc, + std::vector& evolution, + AKAZEOptions& options, + cv::Mat descriptorSamples, + cv::Mat descriptorBits) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) + , descriptorSamples_(descriptorSamples) + , descriptorBits_(descriptorBits) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_Upright_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; + + cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + cv::Mat descriptorBits_; +}; + +class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody +{ +public: + MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; +}; + +class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody +{ +public: + MLDB_Descriptor_Subset_Invoker(std::vector& kpts, + cv::Mat& desc, + std::vector& evolution, + AKAZEOptions& options, + cv::Mat descriptorSamples, + cv::Mat descriptorBits) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) + , descriptorSamples_(descriptorSamples) + , descriptorBits_(descriptorBits) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr(i)); + } + } + + void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; + + cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + cv::Mat descriptorBits_; +}; + +/** + * @brief This method computes the set of descriptors through the nonlinear scale space + * @param kpts Vector of detected keypoints + * @param desc Matrix to store the descriptors + */ +void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) +{ + for(size_t i = 0; i < kpts.size(); i++) + { + CV_Assert(0 <= kpts[i].class_id && kpts[i].class_id < static_cast(evolution_.size())); + } + + // Allocate memory for the matrix with the descriptors + if (options_.descriptor < cv::DESCRIPTOR_MLDB_UPRIGHT) { + desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); + } + else { + // We use the full length binary descriptor -> 486 bits + if (options_.descriptor_size == 0) { + int t = (6 + 36 + 120)*options_.descriptor_channels; + desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1); + } + else { + // We use the random bit selection length binary descriptor + desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1); + } + } + + switch (options_.descriptor) + { + case cv::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation + { + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); + } + break; + case cv::DESCRIPTOR_KAZE: + { + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); + } + break; + case cv::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation + { + if (options_.descriptor_size == 0) + cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + else + cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + } + break; + case cv::DESCRIPTOR_MLDB: + { + if (options_.descriptor_size == 0) + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + else + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + } + break; + } +} + +/* ************************************************************************* */ +/** + * @brief This method computes the main orientation for a given keypoint + * @param kpt Input keypoint + * @note The orientation is computed using a similar approach as described in the + * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 + */ +void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) { + + int ix = 0, iy = 0, idx = 0, s = 0, level = 0; + float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; + std::vector resX(109), resY(109), Ang(109); + const int id[] = { 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6 }; + + // Variables for computing the dominant direction + float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; + + // Get the information from the keypoint + level = kpt.class_id; + ratio = (float)(1 << evolution_[level].octave); + s = fRound(0.5f*kpt.size / ratio); + xf = kpt.pt.x / ratio; + yf = kpt.pt.y / ratio; + + // Calculate derivatives responses for points within radius of 6*scale + for (int i = -6; i <= 6; ++i) { + for (int j = -6; j <= 6; ++j) { + if (i*i + j*j < 36) { + iy = fRound(yf + j*s); + ix = fRound(xf + i*s); + + gweight = gauss25[id[i + 6]][id[j + 6]]; + resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); + resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); + + Ang[idx] = getAngle(resX[idx], resY[idx]); + ++idx; + } + } + } + // Loop slides pi/3 window around feature point + for (ang1 = 0; ang1 < (float)(2.0 * CV_PI); ang1 += 0.15f) { + ang2 = (ang1 + (float)(CV_PI / 3.0) >(float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0)); + sumX = sumY = 0.f; + + for (size_t k = 0; k < Ang.size(); ++k) { + // Get angle from the x-axis of the sample point + const float & ang = Ang[k]; + + // Determine whether the point is within the window + if (ang1 < ang2 && ang1 < ang && ang < ang2) { + sumX += resX[k]; + sumY += resY[k]; + } + else if (ang2 < ang1 && + ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0f*CV_PI))) { + sumX += resX[k]; + sumY += resY[k]; + } + } + + // if the vector produced from this window is longer than all + // previous vectors then this forms the new dominant direction + if (sumX*sumX + sumY*sumY > max) { + // store largest orientation + max = sumX*sumX + sumY*sumY; + kpt.angle = getAngle(sumX, sumY); + } + } +} + +/* ************************************************************************* */ +/** + * @brief This method computes the upright descriptor (not rotation invariant) of + * the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 + */ +void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { + + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int scale = 0, dsize = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5f, cy = 0.5f; + + const std::vector& evolution = *evolution_; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + ratio = (float)(1 << kpt.octave); + scale = fRound(0.5f*kpt.size / ratio); + level = kpt.class_id; + yf = kpt.pt.y / ratio; + xf = kpt.pt.x / ratio; + + i = -8; + + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { + j = -8; + i = i - 4; + + cx += 1.0f; + cy = -0.5f; + + while (j < pattern_size) { + dx = dy = mdx = mdy = 0.0; + cy += 1.0f; + j = j - 4; + + ky = i + sample_step; + kx = j + sample_step; + + ys = yf + (ky*scale); + xs = xf + (kx*scale); + + for (int k = i; k < i + 9; k++) { + for (int l = j; l < j + 9; l++) { + sample_y = k*scale + yf; + sample_x = l*scale + xf; + + //Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50f*scale); + + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); + + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); + + fx = sample_x - x1; + fy = sample_y - y1; + + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; + + rx = gauss_s1*rx; + ry = gauss_s1*ry; + + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; + } + + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (i = 0; i < dsize; i++) { + desc[i] /= len; + } +} + +/* ************************************************************************* */ +/** + * @brief This method computes the descriptor of the provided keypoint given the + * main orientation of the keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 + */ +void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { + + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; + int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + int scale = 0, dsize = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5f, cy = 0.5f; + + const std::vector& evolution = *evolution_; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + ratio = (float)(1 << kpt.octave); + scale = fRound(0.5f*kpt.size / ratio); + angle = kpt.angle; + level = kpt.class_id; + yf = kpt.pt.y / ratio; + xf = kpt.pt.x / ratio; + co = cos(angle); + si = sin(angle); + + i = -8; + + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { + j = -8; + i = i - 4; + + cx += 1.0f; + cy = -0.5f; + + while (j < pattern_size) { + dx = dy = mdx = mdy = 0.0; + cy += 1.0f; + j = j - 4; + + ky = i + sample_step; + kx = j + sample_step; + + xs = xf + (-kx*scale*si + ky*scale*co); + ys = yf + (kx*scale*co + ky*scale*si); + + for (int k = i; k < i + 9; ++k) { + for (int l = j; l < j + 9; ++l) { + // Get coords of sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + // Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale); + + y1 = fRound(sample_y - 0.5f); + x1 = fRound(sample_x - 0.5f); + + y2 = fRound(sample_y + 0.5f); + x2 = fRound(sample_x + 0.5f); + + fx = sample_x - x1; + fy = sample_y - y1; + + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; + + // Get the x and y derivatives on the rotated axis + rry = gauss_s1*(rx*co + ry*si); + rrx = gauss_s1*(-rx*si + ry*co); + + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; + } + + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (i = 0; i < dsize; i++) { + desc[i] /= len; + } +} + +/* ************************************************************************* */ +/** + * @brief This method computes the rupright descriptor (not rotation invariant) of + * the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + */ +void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { + + float di = 0.0, dx = 0.0, dy = 0.0; + float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, ratio = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int level = 0, nsamples = 0, scale = 0; + int dcount1 = 0, dcount2 = 0; + + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + + // Matrices for the M-LDB descriptor + cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); + + // Get the information from the keypoint + ratio = (float)(1 << kpt.octave); + scale = fRound(0.5f*kpt.size / ratio); + level = kpt.class_id; + yf = kpt.pt.y / ratio; + xf = kpt.pt.x / ratio; + + // First 2x2 grid + pattern_size = options_->descriptor_pattern_size; + sample_step = pattern_size; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_1.ptr(dcount2)) = di; + *(values_1.ptr(dcount2)+1) = dx; + *(values_1.ptr(dcount2)+2) = dy; + dcount2++; + } + } + + // Do binary comparison first level + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + + // Second 3x3 grid + sample_step = static_cast(ceil(pattern_size*2. / 3.)); + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_2.ptr(dcount2)) = di; + *(values_2.ptr(dcount2)+1) = dx; + *(values_2.ptr(dcount2)+2) = dy; + dcount2++; + } + } + + //Do binary comparison second level + dcount2 = 0; + for (int i = 0; i < 9; i++) { + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + + // Third 4x4 grid + sample_step = pattern_size / 2; + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_3.ptr(dcount2)) = di; + *(values_3.ptr(dcount2)+1) = dx; + *(values_3.ptr(dcount2)+2) = dy; + dcount2++; + } + } + + //Do binary comparison third level + dcount2 = 0; + for (int i = 0; i < 16; i++) { + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } +} + +/* ************************************************************************* */ +/** + * @brief This method computes the descriptor of the provided keypoint given the + * main orientation of the keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + */ +void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { + + float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; + float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int level = 0, nsamples = 0, scale = 0; + int dcount1 = 0, dcount2 = 0; + + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + + // Matrices for the M-LDB descriptor + cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); + + // Get the information from the keypoint + ratio = (float)(1 << kpt.octave); + scale = fRound(0.5f*kpt.size / ratio); + angle = kpt.angle; + level = kpt.class_id; + yf = kpt.pt.y / ratio; + xf = kpt.pt.x / ratio; + co = cos(angle); + si = sin(angle); + + // First 2x2 grid + pattern_size = options.descriptor_pattern_size; + sample_step = pattern_size; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + + di = dx = dy = 0.0; + nsamples = 0; + + for (float k = (float)i; k < i + sample_step; k++) { + for (float l = (float)j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + + di += ri; + + if (options.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_1.ptr(dcount2)) = di; + if (options.descriptor_channels > 1) { + *(values_1.ptr(dcount2)+1) = dx; + } + + if (options.descriptor_channels > 2) { + *(values_1.ptr(dcount2)+2) = dy; + } + + dcount2++; + } + } + + // Do binary comparison first level + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + + if (options.descriptor_channels > 1) { + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + + dcount1++; + } + } + } + + if (options.descriptor_channels > 2) { + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + } + + // Second 3x3 grid + sample_step = static_cast(ceil(pattern_size*2. / 3.)); + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + di += ri; + + if (options.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_2.ptr(dcount2)) = di; + if (options.descriptor_channels > 1) { + *(values_2.ptr(dcount2)+1) = dx; + } + + if (options.descriptor_channels > 2) { + *(values_2.ptr(dcount2)+2) = dy; + } + + dcount2++; + } + } + + // Do binary comparison second level + for (int i = 0; i < 9; i++) { + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + + if (options.descriptor_channels > 1) { + for (int i = 0; i < 9; i++) { + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + } + + if (options.descriptor_channels > 2) { + for (int i = 0; i < 9; i++) { + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + } + + // Third 4x4 grid + sample_step = pattern_size / 2; + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + di += ri; + + if (options.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_3.ptr(dcount2)) = di; + if (options.descriptor_channels > 1) + *(values_3.ptr(dcount2)+1) = dx; + + if (options.descriptor_channels > 2) + *(values_3.ptr(dcount2)+2) = dy; + + dcount2++; + } + } + + // Do binary comparison third level + for (int i = 0; i < 16; i++) { + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + + if (options.descriptor_channels > 1) { + for (int i = 0; i < 16; i++) { + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + } + + if (options.descriptor_channels > 2) { + for (int i = 0; i < 16; i++) { + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + } +} + +/* ************************************************************************* */ +/** + * @brief This method computes the M-LDB descriptor of the provided keypoint given the + * main orientation of the keypoint. The descriptor is computed based on a subset of + * the bits of the whole descriptor + * @param kpt Input keypoint + * @param desc Descriptor vector + */ +void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { + + float di = 0.f, dx = 0.f, dy = 0.f; + float rx = 0.f, ry = 0.f; + float sample_x = 0.f, sample_y = 0.f; + int x1 = 0, y1 = 0; + + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + + // Get the information from the keypoint + float ratio = (float)(1 << kpt.octave); + int scale = fRound(0.5f*kpt.size / ratio); + float angle = kpt.angle; + int level = kpt.class_id; + float yf = kpt.pt.y / ratio; + float xf = kpt.pt.x / ratio; + float co = cos(angle); + float si = sin(angle); + + // Allocate memory for the matrix of values + cv::Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); + + // Sample everything, but only do the comparisons + vector steps(3); + steps.at(0) = options.descriptor_pattern_size; + steps.at(1) = (int)ceil(2.f*options.descriptor_pattern_size / 3.f); + steps.at(2) = options.descriptor_pattern_size / 2; + + for (int i = 0; i < descriptorSamples_.rows; i++) { + const int *coords = descriptorSamples_.ptr(i); + int sample_step = steps.at(coords[0]); + di = 0.0f; + dx = 0.0f; + dy = 0.0f; + + for (int k = coords[1]; k < coords[1] + sample_step; k++) { + for (int l = coords[2]; l < coords[2] + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + di += *(evolution[level].Lt.ptr(y1)+x1); + + if (options.descriptor_channels > 1) { + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + + if (options.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + dx += rx*co + ry*si; + dy += -rx*si + ry*co; + } + } + } + } + + *(values.ptr(options.descriptor_channels*i)) = di; + + if (options.descriptor_channels == 2) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; + } + else if (options.descriptor_channels == 3) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; + *(values.ptr(options.descriptor_channels*i + 2)) = dy; + } + } + + // Do the comparisons + const float *vals = values.ptr(0); + const int *comps = descriptorBits_.ptr(0); + + for (int i = 0; i vals[comps[2 * i + 1]]) { + desc[i / 8] |= (1 << (i % 8)); + } + } +} + +/* ************************************************************************* */ +/** + * @brief This method computes the upright (not rotation invariant) M-LDB descriptor + * of the provided keypoint given the main orientation of the keypoint. + * The descriptor is computed based on a subset of the bits of the whole descriptor + * @param kpt Input keypoint + * @param desc Descriptor vector + */ +void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { + + float di = 0.0f, dx = 0.0f, dy = 0.0f; + float rx = 0.0f, ry = 0.0f; + float sample_x = 0.0f, sample_y = 0.0f; + int x1 = 0, y1 = 0; + + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + + // Get the information from the keypoint + float ratio = (float)(1 << kpt.octave); + int scale = fRound(0.5f*kpt.size / ratio); + int level = kpt.class_id; + float yf = kpt.pt.y / ratio; + float xf = kpt.pt.x / ratio; + + // Allocate memory for the matrix of values + Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); + + vector steps(3); + steps.at(0) = options.descriptor_pattern_size; + steps.at(1) = static_cast(ceil(2.f*options.descriptor_pattern_size / 3.f)); + steps.at(2) = options.descriptor_pattern_size / 2; + + for (int i = 0; i < descriptorSamples_.rows; i++) { + const int *coords = descriptorSamples_.ptr(i); + int sample_step = steps.at(coords[0]); + di = 0.0f, dx = 0.0f, dy = 0.0f; + + for (int k = coords[1]; k < coords[1] + sample_step; k++) { + for (int l = coords[2]; l < coords[2] + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + di += *(evolution[level].Lt.ptr(y1)+x1); + + if (options.descriptor_channels > 1) { + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); + + if (options.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options.descriptor_channels == 3) { + dx += rx; + dy += ry; + } + } + } + } + + *(values.ptr(options.descriptor_channels*i)) = di; + + if (options.descriptor_channels == 2) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; + } + else if (options.descriptor_channels == 3) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; + *(values.ptr(options.descriptor_channels*i + 2)) = dy; + } + } + + // Do the comparisons + const float *vals = values.ptr(0); + const int *comps = descriptorBits_.ptr(0); + + for (int i = 0; i vals[comps[2 * i + 1]]) { + desc[i / 8] |= (1 << (i % 8)); + } + } +} + +/* ************************************************************************* */ +/** + * @brief This function computes a (quasi-random) list of bits to be taken + * from the full descriptor. To speed the extraction, the function creates + * a list of the samples that are involved in generating at least a bit (sampleList) + * and a list of the comparisons between those samples (comparisons) + * @param sampleList + * @param comparisons The matrix with the binary comparisons + * @param nbits The number of bits of the descriptor + * @param pattern_size The pattern size for the binary descriptor + * @param nchannels Number of channels to consider in the descriptor (1-3) + * @note The function keeps the 18 bits (3-channels by 6 comparisons) of the + * coarser grid, since it provides the most robust estimations + */ +void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int nbits, + int pattern_size, int nchannels) { + + int ssz = 0; + for (int i = 0; i < 3; i++) { + int gz = (i + 2)*(i + 2); + ssz += gz*(gz - 1) / 2; + } + ssz *= nchannels; + + CV_Assert(nbits <= ssz); // Descriptor size can't be bigger than full descriptor + + // Since the full descriptor is usually under 10k elements, we pick + // the selection from the full matrix. We take as many samples per + // pick as the number of channels. For every pick, we + // take the two samples involved and put them in the sampling list + + Mat_ fullM(ssz / nchannels, 5); + for (int i = 0, c = 0; i < 3; i++) { + int gdiv = i + 2; //grid divisions, per row + int gsz = gdiv*gdiv; + int psz = (int)ceil(2.f*pattern_size / (float)gdiv); + + for (int j = 0; j < gsz; j++) { + for (int k = j + 1; k < gsz; k++, c++) { + fullM(c, 0) = i; + fullM(c, 1) = psz*(j % gdiv) - pattern_size; + fullM(c, 2) = psz*(j / gdiv) - pattern_size; + fullM(c, 3) = psz*(k % gdiv) - pattern_size; + fullM(c, 4) = psz*(k / gdiv) - pattern_size; + } + } + } + + srand(1024); + Mat_ comps = Mat_(nchannels * (int)ceil(nbits / (float)nchannels), 2); + comps = 1000; + + // Select some samples. A sample includes all channels + int count = 0; + int npicks = (int)ceil(nbits / (float)nchannels); + Mat_ samples(29, 3); + Mat_ fullcopy = fullM.clone(); + samples = -1; + + for (int i = 0; i < npicks; i++) { + int k = rand() % (fullM.rows - i); + if (i < 6) { + // Force use of the coarser grid values and comparisons + k = i; + } + + bool n = true; + + for (int j = 0; j < count; j++) { + if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 1) && samples(j, 2) == fullcopy(k, 2)) { + n = false; + comps(i*nchannels, 0) = nchannels*j; + comps(i*nchannels + 1, 0) = nchannels*j + 1; + comps(i*nchannels + 2, 0) = nchannels*j + 2; + break; + } + } + + if (n) { + samples(count, 0) = fullcopy(k, 0); + samples(count, 1) = fullcopy(k, 1); + samples(count, 2) = fullcopy(k, 2); + comps(i*nchannels, 0) = nchannels*count; + comps(i*nchannels + 1, 0) = nchannels*count + 1; + comps(i*nchannels + 2, 0) = nchannels*count + 2; + count++; + } + + n = true; + for (int j = 0; j < count; j++) { + if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 3) && samples(j, 2) == fullcopy(k, 4)) { + n = false; + comps(i*nchannels, 1) = nchannels*j; + comps(i*nchannels + 1, 1) = nchannels*j + 1; + comps(i*nchannels + 2, 1) = nchannels*j + 2; + break; + } + } + + if (n) { + samples(count, 0) = fullcopy(k, 0); + samples(count, 1) = fullcopy(k, 3); + samples(count, 2) = fullcopy(k, 4); + comps(i*nchannels, 1) = nchannels*count; + comps(i*nchannels + 1, 1) = nchannels*count + 1; + comps(i*nchannels + 2, 1) = nchannels*count + 2; + count++; + } + + Mat tmp = fullcopy.row(k); + fullcopy.row(fullcopy.rows - i - 1).copyTo(tmp); + } + + sampleList = samples.rowRange(0, count).clone(); + comparisons = comps.rowRange(0, nbits).clone(); +} diff --git a/modules/features2d/src/kaze/AKAZEFeatures.h b/modules/features2d/src/kaze/AKAZEFeatures.h new file mode 100644 index 000000000..f8ce7a488 --- /dev/null +++ b/modules/features2d/src/kaze/AKAZEFeatures.h @@ -0,0 +1,62 @@ +/** + * @file AKAZE.h + * @brief Main class for detecting and computing binary descriptors in an + * accelerated nonlinear scale space + * @date Mar 27, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ + +#ifndef __OPENCV_FEATURES_2D_AKAZE_FEATURES_H__ +#define __OPENCV_FEATURES_2D_AKAZE_FEATURES_H__ + +/* ************************************************************************* */ +// Includes +#include "precomp.hpp" +#include "AKAZEConfig.h" +#include "TEvolution.h" + +/* ************************************************************************* */ +// AKAZE Class Declaration +class AKAZEFeatures { + +private: + + AKAZEOptions options_; ///< Configuration options for AKAZE + std::vector evolution_; ///< Vector of nonlinear diffusion evolution + + /// FED parameters + int ncycles_; ///< Number of cycles + bool reordering_; ///< Flag for reordering time steps + std::vector > tsteps_; ///< Vector of FED dynamic time steps + std::vector nsteps_; ///< Vector of number of steps per cycle + + /// Matrices for the M-LDB descriptor computation + cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + cv::Mat descriptorBits_; + cv::Mat bitMask_; + +public: + + /// Constructor with input arguments + AKAZEFeatures(const AKAZEOptions& options); + + /// Scale Space methods + void Allocate_Memory_Evolution(); + int Create_Nonlinear_Scale_Space(const cv::Mat& img); + void Feature_Detection(std::vector& kpts); + void Compute_Determinant_Hessian_Response(void); + void Compute_Multiscale_Derivatives(void); + void Find_Scale_Space_Extrema(std::vector& kpts); + void Do_Subpixel_Refinement(std::vector& kpts); + + /// Feature description methods + void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); + static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_); +}; + +/* ************************************************************************* */ +/// Inline functions +void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, + int nbits, int pattern_size, int nchannels); + +#endif diff --git a/modules/features2d/src/kaze/KAZEConfig.h b/modules/features2d/src/kaze/KAZEConfig.h index 988e24737..21489a07a 100644 --- a/modules/features2d/src/kaze/KAZEConfig.h +++ b/modules/features2d/src/kaze/KAZEConfig.h @@ -5,7 +5,8 @@ * @author Pablo F. Alcantarilla */ -#pragma once +#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ +#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ // OpenCV Includes #include "precomp.hpp" @@ -15,14 +16,8 @@ struct KAZEOptions { - enum DIFFUSIVITY_TYPE { - PM_G1 = 0, - PM_G2 = 1, - WEICKERT = 2 - }; - KAZEOptions() - : diffusivity(PM_G2) + : diffusivity(cv::DIFF_PM_G2) , soffset(1.60f) , omax(4) @@ -33,20 +28,13 @@ struct KAZEOptions { , dthreshold(0.001f) , kcontrast(0.01f) , kcontrast_percentille(0.7f) - , kcontrast_bins(300) - - , use_fed(true) + , kcontrast_bins(300) , upright(false) , extended(false) - - , use_clipping_normalilzation(false) - , clipping_normalization_ratio(1.6f) - , clipping_normalization_niter(5) { } - DIFFUSIVITY_TYPE diffusivity; - + int diffusivity; float soffset; int omax; int nsublevels; @@ -57,27 +45,8 @@ struct KAZEOptions { float kcontrast; float kcontrast_percentille; int kcontrast_bins; - - bool use_fed; bool upright; bool extended; - - bool use_clipping_normalilzation; - float clipping_normalization_ratio; - int clipping_normalization_niter; }; -struct TEvolution { - cv::Mat Lx, Ly; // First order spatial derivatives - cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives - cv::Mat Lflow; // Diffusivity image - cv::Mat Lt; // Evolution image - cv::Mat Lsmooth; // Smoothed image - cv::Mat Lstep; // Evolution step update - cv::Mat Ldet; // Detector response - float etime; // Evolution time - float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 - float octave; // Image octave - float sublevel; // Image sublevel in each octave - int sigma_size; // Integer esigma. For computing the feature detector responses -}; +#endif diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 634f68da8..69e9a4b69 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -22,22 +22,21 @@ */ #include "KAZEFeatures.h" +#include "utils.h" // Namespaces using namespace std; using namespace cv; using namespace cv::details::kaze; -//******************************************************************************* -//******************************************************************************* - +/* ************************************************************************* */ /** * @brief KAZE constructor with input options * @param options KAZE configuration options * @note The constructor allocates memory for the nonlinear scale space */ -KAZEFeatures::KAZEFeatures(KAZEOptions& _options) - : options(_options) +KAZEFeatures::KAZEFeatures(KAZEOptions& options) + : options_(options) { ncycles_ = 0; reordering_ = true; @@ -46,70 +45,48 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& _options) Allocate_Memory_Evolution(); } -//******************************************************************************* -//******************************************************************************* - +/* ************************************************************************* */ /** * @brief This method allocates the memory for the nonlinear diffusion evolution */ void KAZEFeatures::Allocate_Memory_Evolution(void) { // Allocate the dimension of the matrices for the evolution - for (int i = 0; i <= options.omax - 1; i++) { - for (int j = 0; j <= options.nsublevels - 1; j++) { + for (int i = 0; i <= options_.omax - 1; i++) { + for (int j = 0; j <= options_.nsublevels - 1; j++) { TEvolution aux; - aux.Lx = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Ly = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Lxx = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Lxy = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Lyy = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Lflow = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Lt = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Lsmooth = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Lstep = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.Ldet = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); - aux.esigma = options.soffset*pow((float)2.0f, (float)(j) / (float)(options.nsublevels)+i); + aux.Lx = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Ly = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lxx = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lxy = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lyy = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lt = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lsmooth = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Ldet = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.esigma = options_.soffset*pow((float)2.0f, (float)(j) / (float)(options_.nsublevels)+i); aux.etime = 0.5f*(aux.esigma*aux.esigma); aux.sigma_size = fRound(aux.esigma); - aux.octave = (float)i; - aux.sublevel = (float)j; + aux.octave = i; + aux.sublevel = j; evolution_.push_back(aux); } } // Allocate memory for the FED number of cycles and time steps - if (options.use_fed) { - for (size_t i = 1; i < evolution_.size(); i++) { - int naux = 0; - vector tau; - float ttime = 0.0; - ttime = evolution_[i].etime - evolution_[i - 1].etime; - naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau); - nsteps_.push_back(naux); - tsteps_.push_back(tau); - ncycles_++; - } + for (size_t i = 1; i < evolution_.size(); i++) { + int naux = 0; + vector tau; + float ttime = 0.0; + ttime = evolution_[i].etime - evolution_[i - 1].etime; + naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau); + nsteps_.push_back(naux); + tsteps_.push_back(tau); + ncycles_++; } - else { - // Allocate memory for the auxiliary variables that are used in the AOS scheme - Ltx_ = Mat::zeros(options.img_width, options.img_height, CV_32F); // TODO? IS IT A BUG??? - Lty_ = Mat::zeros(options.img_height, options.img_width, CV_32F); - px_ = Mat::zeros(options.img_height, options.img_width, CV_32F); - py_ = Mat::zeros(options.img_height, options.img_width, CV_32F); - ax_ = Mat::zeros(options.img_height, options.img_width, CV_32F); - ay_ = Mat::zeros(options.img_height, options.img_width, CV_32F); - bx_ = Mat::zeros(options.img_height - 1, options.img_width, CV_32F); - by_ = Mat::zeros(options.img_height - 1, options.img_width, CV_32F); - qr_ = Mat::zeros(options.img_height - 1, options.img_width, CV_32F); - qc_ = Mat::zeros(options.img_height, options.img_width - 1, CV_32F); - } - } -//******************************************************************************* -//******************************************************************************* - +/* ************************************************************************* */ /** * @brief This method creates the nonlinear scale space for a given image * @param img Input image for which the nonlinear scale space needs to be created @@ -121,52 +98,47 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options.soffset); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options.sderivatives); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options_.sderivatives); // Firstly compute the kcontrast factor - Compute_KContrast(evolution_[0].Lt, options.kcontrast_percentille); + Compute_KContrast(evolution_[0].Lt, options_.kcontrast_percentille); + + // Allocate memory for the flow and step images + cv::Mat Lflow = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); + cv::Mat Lstep = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); // Now generate the rest of evolution levels for (size_t i = 1; i < evolution_.size(); i++) { evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); - gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options.sderivatives); + gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options_.sderivatives); // Compute the Gaussian derivatives Lx and Ly Scharr(evolution_[i].Lsmooth, evolution_[i].Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT); Scharr(evolution_[i].Lsmooth, evolution_[i].Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT); // Compute the conductivity equation - if (options.diffusivity == KAZEOptions::PM_G1) { - pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options.kcontrast); + if (options_.diffusivity == cv::DIFF_PM_G1) { + pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); } - else if (options.diffusivity == KAZEOptions::PM_G2) { - pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options.kcontrast); + else if (options_.diffusivity == cv::DIFF_PM_G2) { + pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); } - else if (options.diffusivity == KAZEOptions::WEICKERT) { - weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options.kcontrast); + else if (options_.diffusivity == cv::DIFF_WEICKERT) { + weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); } // Perform FED n inner steps - if (options.use_fed) { - for (int j = 0; j < nsteps_[i - 1]; j++) { - nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i - 1][j]); - } - } - else { - // Perform the evolution step with AOS - AOS_Step_Scalar(evolution_[i].Lt, evolution_[i - 1].Lt, evolution_[i].Lflow, - evolution_[i].etime - evolution_[i - 1].etime); + for (int j = 0; j < nsteps_[i - 1]; j++) { + nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]); } } return 0; } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the k contrast factor * @param img Input image @@ -174,38 +146,10 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) */ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { - options.kcontrast = compute_k_percentile(img, kpercentile, options.sderivatives, options.kcontrast_bins, 0, 0); + options_.kcontrast = compute_k_percentile(img, kpercentile, options_.sderivatives, options_.kcontrast_bins, 0, 0); } -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method computes the multiscale derivatives for the nonlinear scale space - */ -void KAZEFeatures::Compute_Multiscale_Derivatives(void) -{ - // TODO: use cv::parallel_for_ - for (size_t i = 0; i < evolution_.size(); i++) - { - // Compute multiscale derivatives for the detector - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, evolution_[i].sigma_size); - - evolution_[i].Lx = evolution_[i].Lx*((evolution_[i].sigma_size)); - evolution_[i].Ly = evolution_[i].Ly*((evolution_[i].sigma_size)); - evolution_[i].Lxx = evolution_[i].Lxx*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); - evolution_[i].Lxy = evolution_[i].Lxy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); - evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); - } -} - -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the feature detector response for the nonlinear scale space * @note We use the Hessian determinant as feature detector @@ -219,9 +163,9 @@ void KAZEFeatures::Compute_Detector_Response(void) for (size_t i = 0; i < evolution_.size(); i++) { - for (int ix = 0; ix < options.img_height; ix++) + for (int ix = 0; ix < options_.img_height; ix++) { - for (int jx = 0; jx < options.img_width; jx++) + for (int jx = 0; jx < options_.img_width; jx++) { lxx = *(evolution_[i].Lxx.ptr(ix)+jx); lxy = *(evolution_[i].Lxy.ptr(ix)+jx); @@ -232,9 +176,7 @@ void KAZEFeatures::Compute_Detector_Response(void) } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of keypoints @@ -242,27 +184,127 @@ void KAZEFeatures::Compute_Detector_Response(void) void KAZEFeatures::Feature_Detection(std::vector& kpts) { kpts.clear(); - - // Firstly compute the detector response for each pixel and scale level - Compute_Detector_Response(); - - // Find scale space extrema - Determinant_Hessian_Parallel(kpts); - - // Perform some subpixel refinement + Compute_Detector_Response(); + Determinant_Hessian(kpts); Do_Subpixel_Refinement(kpts); } -//************************************************************************************* -//************************************************************************************* +/* ************************************************************************* */ +class MultiscaleDerivativesKAZEInvoker : public cv::ParallelLoopBody +{ +public: + explicit MultiscaleDerivativesKAZEInvoker(std::vector& ev) : evolution_(&ev) + { + } + void operator()(const cv::Range& range) const + { + std::vector& evolution = *evolution_; + for (int i = range.start; i < range.end; i++) + { + compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Lx, 1, 0, evolution[i].sigma_size); + compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Ly, 0, 1, evolution[i].sigma_size); + compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxx, 1, 0, evolution[i].sigma_size); + compute_scharr_derivatives(evolution[i].Ly, evolution[i].Lyy, 0, 1, evolution[i].sigma_size); + compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxy, 0, 1, evolution[i].sigma_size); + + evolution[i].Lx = evolution[i].Lx*((evolution[i].sigma_size)); + evolution[i].Ly = evolution[i].Ly*((evolution[i].sigma_size)); + evolution[i].Lxx = evolution[i].Lxx*((evolution[i].sigma_size)*(evolution[i].sigma_size)); + evolution[i].Lxy = evolution[i].Lxy*((evolution[i].sigma_size)*(evolution[i].sigma_size)); + evolution[i].Lyy = evolution[i].Lyy*((evolution[i].sigma_size)*(evolution[i].sigma_size)); + } + } + +private: + std::vector* evolution_; +}; + +/* ************************************************************************* */ +/** + * @brief This method computes the multiscale derivatives for the nonlinear scale space + */ +void KAZEFeatures::Compute_Multiscale_Derivatives(void) +{ + cv::parallel_for_(cv::Range(0, (int)evolution_.size()), + MultiscaleDerivativesKAZEInvoker(evolution_)); +} + + +/* ************************************************************************* */ +class FindExtremumKAZEInvoker : public cv::ParallelLoopBody +{ +public: + explicit FindExtremumKAZEInvoker(std::vector& ev, std::vector >& kpts_par, + const KAZEOptions& options) : evolution_(&ev), kpts_par_(&kpts_par), options_(options) + { + } + + void operator()(const cv::Range& range) const + { + std::vector& evolution = *evolution_; + std::vector >& kpts_par = *kpts_par_; + for (int i = range.start; i < range.end; i++) + { + float value = 0.0; + bool is_extremum = false; + + for (int ix = 1; ix < options_.img_height - 1; ix++) { + for (int jx = 1; jx < options_.img_width - 1; jx++) { + + is_extremum = false; + value = *(evolution[i].Ldet.ptr(ix)+jx); + + // Filter the points with the detector threshold + if (value > options_.dthreshold) { + if (value >= *(evolution[i].Ldet.ptr(ix)+jx - 1)) { + // First check on the same scale + if (check_maximum_neighbourhood(evolution[i].Ldet, 1, value, ix, jx, 1)) { + // Now check on the lower scale + if (check_maximum_neighbourhood(evolution[i - 1].Ldet, 1, value, ix, jx, 0)) { + // Now check on the upper scale + if (check_maximum_neighbourhood(evolution[i + 1].Ldet, 1, value, ix, jx, 0)) { + is_extremum = true; + } + } + } + } + } + + // Add the point of interest!! + if (is_extremum == true) { + cv::KeyPoint point; + point.pt.x = (float)jx; + point.pt.y = (float)ix; + point.response = fabs(value); + point.size = evolution[i].esigma; + point.octave = (int)evolution[i].octave; + point.class_id = i; + + // We use the angle field for the sublevel value + // Then, we will replace this angle field with the main orientation + point.angle = static_cast(evolution[i].sublevel); + kpts_par[i - 1].push_back(point); + } + } + } + } + } + +private: + std::vector* evolution_; + std::vector >* kpts_par_; + KAZEOptions options_; +}; + +/* ************************************************************************* */ /** * @brief This method performs the detection of keypoints by using the normalized * score of the Hessian determinant through the nonlinear scale space * @param kpts Vector of keypoints * @note We compute features for each of the nonlinear scale space level in a different processing thread */ -void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) +void KAZEFeatures::Determinant_Hessian(std::vector& kpts) { int level = 0; float dist = 0.0, smax = 3.0; @@ -283,10 +325,8 @@ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) kpts_par_.push_back(aux); } - // TODO: Use cv::parallel_for_ - for (int i = 1; i < (int)evolution_.size() - 1; i++) { - Find_Extremum_Threading(i); - } + cv::parallel_for_(cv::Range(1, (int)evolution_.size()-1), + FindExtremumKAZEInvoker(evolution_, kpts_par_, options_)); // Now fill the vector of keypoints!!! for (int i = 0; i < (int)kpts_par_.size(); i++) { @@ -343,63 +383,7 @@ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) } } -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method is called by the thread which is responsible of finding extrema - * at a given nonlinear scale level - * @param level Index in the nonlinear scale space evolution - */ -void KAZEFeatures::Find_Extremum_Threading(const int& level) { - - float value = 0.0; - bool is_extremum = false; - - for (int ix = 1; ix < options.img_height - 1; ix++) { - for (int jx = 1; jx < options.img_width - 1; jx++) { - - is_extremum = false; - value = *(evolution_[level].Ldet.ptr(ix)+jx); - - // Filter the points with the detector threshold - if (value > options.dthreshold) { - if (value >= *(evolution_[level].Ldet.ptr(ix)+jx - 1)) { - // First check on the same scale - if (check_maximum_neighbourhood(evolution_[level].Ldet, 1, value, ix, jx, 1)) { - // Now check on the lower scale - if (check_maximum_neighbourhood(evolution_[level - 1].Ldet, 1, value, ix, jx, 0)) { - // Now check on the upper scale - if (check_maximum_neighbourhood(evolution_[level + 1].Ldet, 1, value, ix, jx, 0)) { - is_extremum = true; - } - } - } - } - } - - // Add the point of interest!! - if (is_extremum == true) { - KeyPoint point; - point.pt.x = (float)jx; - point.pt.y = (float)ix; - point.response = fabs(value); - point.size = evolution_[level].esigma; - point.octave = (int)evolution_[level].octave; - point.class_id = level; - - // We use the angle field for the sublevel value - // Then, we will replace this angle field with the main orientation - point.angle = evolution_[level].sublevel; - kpts_par_[level - 1].push_back(point); - } - } - } -} - -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints @@ -475,10 +459,10 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { if (fabs(*(dst.ptr(0))) <= 1.0f && fabs(*(dst.ptr(1))) <= 1.0f && fabs(*(dst.ptr(2))) <= 1.0f) { kpts_[i].pt.x += *(dst.ptr(0)); kpts_[i].pt.y += *(dst.ptr(1)); - dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr(2))) / ((float)(options.nsublevels)); + dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr(2))) / ((float)(options_.nsublevels)); // In OpenCV the size of a keypoint is the diameter!! - kpts_[i].size = 2.0f*options.soffset*pow((float)2.0f, dsc); + kpts_[i].size = 2.0f*options_.soffset*pow((float)2.0f, dsc); kpts_[i].angle = 0.0; } // Set the points to be deleted after the for loop @@ -497,17 +481,15 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ class KAZE_Descriptor_Invoker : public cv::ParallelLoopBody { public: - KAZE_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& _options) - : _kpts(&kpts) - , _desc(&desc) - , _evolution(&evolution) - , options(_options) + KAZE_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& options) + : kpts_(&kpts) + , desc_(&desc) + , evolution_(&evolution) + , options_(options) { } @@ -517,26 +499,26 @@ public: void operator() (const cv::Range& range) const { - std::vector &kpts = *_kpts; - cv::Mat &desc = *_desc; - std::vector &evolution = *_evolution; + std::vector &kpts = *kpts_; + cv::Mat &desc = *desc_; + std::vector &evolution = *evolution_; for (int i = range.start; i < range.end; i++) { kpts[i].angle = 0.0; - if (options.upright) + if (options_.upright) { kpts[i].angle = 0.0; - if (options.extended) + if (options_.extended) Get_KAZE_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); else Get_KAZE_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); } else { - KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options); + KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options_); - if (options.extended) + if (options_.extended) Get_KAZE_Descriptor_128(kpts[i], desc.ptr((int)i)); else Get_KAZE_Descriptor_64(kpts[i], desc.ptr((int)i)); @@ -549,12 +531,13 @@ private: void Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; void Get_KAZE_Descriptor_128(const cv::KeyPoint& kpt, float *desc) const; - std::vector * _kpts; - cv::Mat * _desc; - std::vector * _evolution; - KAZEOptions options; + std::vector * kpts_; + cv::Mat * desc_; + std::vector * evolution_; + KAZEOptions options_; }; +/* ************************************************************************* */ /** * @brief This method computes the set of descriptors through the nonlinear scale space * @param kpts Vector of keypoints @@ -562,20 +545,23 @@ private: */ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) { + for(size_t i = 0; i < kpts.size(); i++) + { + CV_Assert(0 <= kpts[i].class_id && kpts[i].class_id < static_cast(evolution_.size())); + } + // Allocate memory for the matrix of descriptors - if (options.extended == true) { + if (options_.extended == true) { desc = Mat::zeros((int)kpts.size(), 128, CV_32FC1); } else { desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1); } - cv::parallel_for_(cv::Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options_)); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the main orientation for a given keypoint * @param kpt Input keypoint @@ -651,9 +637,7 @@ void KAZEFeatures::Compute_Main_Orientation(cv::KeyPoint &kpt, const std::vector } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the upright descriptor (not rotation invariant) of * the provided keypoint @@ -673,7 +657,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; int dsize = 0, scale = 0, level = 0; - std::vector& evolution_ = *_evolution; + std::vector& evolution = *evolution_; // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; @@ -724,26 +708,26 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); + checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); + checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height); fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; rx = gauss_s1*rx; @@ -779,15 +763,9 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint for (i = 0; i < dsize; i++) { desc[i] /= len; } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the descriptor of the provided keypoint given the * main orientation of the keypoint @@ -807,7 +785,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, fl int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; - std::vector& evolution_ = *_evolution; + std::vector& evolution = *evolution_; // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; @@ -862,26 +840,26 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, fl y1 = fRound(sample_y - 0.5f); x1 = fRound(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); + checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); + checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height); fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis @@ -914,15 +892,9 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, fl for (i = 0; i < dsize; i++) { desc[i] /= len; } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the extended upright descriptor (not rotation invariant) of * the provided keypoint @@ -947,7 +919,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; - std::vector& evolution_ = *_evolution; + std::vector& evolution = *evolution_; // Set the descriptor size and the sample and pattern sizes dsize = 128; @@ -998,26 +970,26 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); + checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); + checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height); fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; rx = gauss_s1*rx; @@ -1072,15 +1044,9 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint for (i = 0; i < dsize; i++) { desc[i] /= len; } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the extended G-SURF descriptor of the provided keypoint * given the main orientation of the keypoint @@ -1102,7 +1068,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, f int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; - std::vector& evolution_ = *_evolution; + std::vector& evolution = *evolution_; // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; @@ -1160,26 +1126,26 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, f y1 = fRound(sample_y - 0.5f); x1 = fRound(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); + checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); + checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height); fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis @@ -1235,298 +1201,4 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, f for (i = 0; i < dsize; i++) { desc[i] /= len; } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method performs a scalar non-linear diffusion step using AOS schemes - * @param Ld Image at a given evolution step - * @param Ldprev Image at a previous evolution step - * @param c Conductivity image - * @param stepsize Stepsize for the nonlinear diffusion evolution - * @note If c is constant, the diffusion will be linear - * If c is a matrix of the same size as Ld, the diffusion will be nonlinear - * The stepsize can be arbitrarily large - */ -void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { - - AOS_Rows(Ldprev, c, stepsize); - AOS_Columns(Ldprev, c, stepsize); - - Ld = 0.5f*(Lty_ + Ltx_.t()); -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method performs performs 1D-AOS for the image rows - * @param Ldprev Image at a previous evolution step - * @param c Conductivity image - * @param stepsize Stepsize for the nonlinear diffusion evolution - */ -void KAZEFeatures::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { - - // Operate on rows - for (int i = 0; i < qr_.rows; i++) { - for (int j = 0; j < qr_.cols; j++) { - *(qr_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i + 1) + j); - } - } - - for (int j = 0; j < py_.cols; j++) { - *(py_.ptr(0) + j) = *(qr_.ptr(0) + j); - } - - for (int j = 0; j < py_.cols; j++) { - *(py_.ptr(py_.rows - 1) + j) = *(qr_.ptr(qr_.rows - 1) + j); - } - - for (int i = 1; i < py_.rows - 1; i++) { - for (int j = 0; j < py_.cols; j++) { - *(py_.ptr(i)+j) = *(qr_.ptr(i - 1) + j) + *(qr_.ptr(i)+j); - } - } - - // a = 1 + t.*p; (p is -1*p) - // b = -t.*q; - ay_ = 1.0f + stepsize*py_; // p is -1*p - by_ = -stepsize*qr_; - - // Do Thomas algorithm to solve the linear system of equations - Thomas(ay_, by_, Ldprev, Lty_); -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method performs performs 1D-AOS for the image columns - * @param Ldprev Image at a previous evolution step - * @param c Conductivity image - * @param stepsize Stepsize for the nonlinear diffusion evolution - */ -void KAZEFeatures::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { - - // Operate on columns - for (int j = 0; j < qc_.cols; j++) { - for (int i = 0; i < qc_.rows; i++) { - *(qc_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i)+j + 1); - } - } - - for (int i = 0; i < px_.rows; i++) { - *(px_.ptr(i)) = *(qc_.ptr(i)); - } - - for (int i = 0; i < px_.rows; i++) { - *(px_.ptr(i)+px_.cols - 1) = *(qc_.ptr(i)+qc_.cols - 1); - } - - for (int j = 1; j < px_.cols - 1; j++) { - for (int i = 0; i < px_.rows; i++) { - *(px_.ptr(i)+j) = *(qc_.ptr(i)+j - 1) + *(qc_.ptr(i)+j); - } - } - - // a = 1 + t.*p'; - ax_ = 1.0f + stepsize*px_.t(); - - // b = -t.*q'; - bx_ = -stepsize*qc_.t(); - - // But take care since we need to transpose the solution!! - Mat Ldprevt = Ldprev.t(); - - // Do Thomas algorithm to solve the linear system of equations - Thomas(ax_, bx_, Ldprevt, Ltx_); -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method does the Thomas algorithm for solving a tridiagonal linear system - * @note The matrix A must be strictly diagonally dominant for a stable solution - */ -void KAZEFeatures::Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x) { - - // Auxiliary variables - int n = a.rows; - Mat m = cv::Mat::zeros(a.rows, a.cols, CV_32F); - Mat l = cv::Mat::zeros(b.rows, b.cols, CV_32F); - Mat y = cv::Mat::zeros(Ld.rows, Ld.cols, CV_32F); - - /** A*x = d; */ - /** / a1 b1 0 0 0 ... 0 \ / x1 \ = / d1 \ */ - /** | c1 a2 b2 0 0 ... 0 | | x2 | = | d2 | */ - /** | 0 c2 a3 b3 0 ... 0 | | x3 | = | d3 | */ - /** | : : : : 0 ... 0 | | : | = | : | */ - /** | : : : : 0 cn-1 an | | xn | = | dn | */ - - /** 1. LU decomposition - / L = / 1 \ U = / m1 r1 \ - / | l1 1 | | m2 r2 | - / | l2 1 | | m3 r3 | - / | : : : | | : : : | - / \ ln-1 1 / \ mn / */ - - for (int j = 0; j < m.cols; j++) { - *(m.ptr(0) + j) = *(a.ptr(0) + j); - } - - for (int j = 0; j < y.cols; j++) { - *(y.ptr(0) + j) = *(Ld.ptr(0) + j); - } - - // 1. Forward substitution L*y = d for y - for (int k = 1; k < n; k++) { - for (int j = 0; j < l.cols; j++) { - *(l.ptr(k - 1) + j) = *(b.ptr(k - 1) + j) / *(m.ptr(k - 1) + j); - } - - for (int j = 0; j < m.cols; j++) { - *(m.ptr(k)+j) = *(a.ptr(k)+j) - *(l.ptr(k - 1) + j)*(*(b.ptr(k - 1) + j)); - } - - for (int j = 0; j < y.cols; j++) { - *(y.ptr(k)+j) = *(Ld.ptr(k)+j) - *(l.ptr(k - 1) + j)*(*(y.ptr(k - 1) + j)); - } - } - - // 2. Backward substitution U*x = y - for (int j = 0; j < y.cols; j++) { - *(x.ptr(n - 1) + j) = (*(y.ptr(n - 1) + j)) / (*(m.ptr(n - 1) + j)); - } - - for (int i = n - 2; i >= 0; i--) { - for (int j = 0; j < x.cols; j++) { - *(x.ptr(i)+j) = (*(y.ptr(i)+j) - (*(b.ptr(i)+j))*(*(x.ptr(i + 1) + j))) / (*(m.ptr(i)+j)); - } - } -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function computes the angle from the vector given by (X Y). From 0 to 2*Pi - */ -inline float getAngle(const float& x, const float& y) { - - if (x >= 0 && y >= 0) - { - return atan(y / x); - } - - if (x < 0 && y >= 0) { - return (float)CV_PI - atan(-y / x); - } - - if (x < 0 && y < 0) { - return (float)CV_PI + atan(y / x); - } - - if (x >= 0 && y < 0) { - return 2.0f * (float)CV_PI - atan(-y / x); - } - - return 0; -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function performs descriptor clipping - * @param desc_ Pointer to the descriptor vector - * @param dsize Size of the descriptor vector - * @param iter Number of iterations - * @param ratio Clipping ratio - */ -inline void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio) { - - float cratio = ratio / sqrtf(static_cast(dsize)); - float len = 0.0; - - for (int i = 0; i < niter; i++) { - len = 0.0; - for (int j = 0; j < dsize; j++) { - if (desc[j] > cratio) { - desc[j] = cratio; - } - else if (desc[j] < -cratio) { - desc[j] = -cratio; - } - len += desc[j] * desc[j]; - } - - // Normalize again - len = sqrt(len); - - for (int j = 0; j < dsize; j++) { - desc[j] = desc[j] / len; - } - } -} - -//************************************************************************************** -//************************************************************************************** - -/** - * @brief This function computes the value of a 2D Gaussian function - * @param x X Position - * @param y Y Position - * @param sig Standard Deviation - */ -inline float gaussian(const float& x, const float& y, const float& sig) { - return exp(-(x*x + y*y) / (2.0f*sig*sig)); -} - -//************************************************************************************** -//************************************************************************************** - -/** - * @brief This function checks descriptor limits - * @param x X Position - * @param y Y Position - * @param width Image width - * @param height Image height - */ -inline void checkDescriptorLimits(int &x, int &y, const int& width, const int& height) { - - if (x < 0) { - x = 0; - } - - if (y < 0) { - y = 0; - } - - if (x > width - 1) { - x = width - 1; - } - - if (y > height - 1) { - y = height - 1; - } -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This funtion rounds float to nearest integer - * @param flt Input float - * @return dst Nearest integer - */ -inline int fRound(const float& flt) -{ - return (int)(flt + 0.5f); } diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index 81509c47d..b62f94831 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -7,84 +7,53 @@ * @author Pablo F. Alcantarilla */ -#ifndef KAZE_H_ -#define KAZE_H_ - -//************************************************************************************* -//************************************************************************************* +#ifndef __OPENCV_FEATURES_2D_KAZE_FEATURES_H__ +#define __OPENCV_FEATURES_2D_KAZE_FEATURES_H__ +/* ************************************************************************* */ // Includes #include "KAZEConfig.h" #include "nldiffusion_functions.h" #include "fed.h" +#include "TEvolution.h" -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ // KAZE Class Declaration class KAZEFeatures { private: - KAZEOptions options; + /// Parameters of the Nonlinear diffusion class + KAZEOptions options_; ///< Configuration options for KAZE + std::vector evolution_; ///< Vector of nonlinear diffusion evolution - // Parameters of the Nonlinear diffusion class - std::vector evolution_; // Vector of nonlinear diffusion evolution - - // Vector of keypoint vectors for finding extrema in multiple threads + /// Vector of keypoint vectors for finding extrema in multiple threads std::vector > kpts_par_; - // FED parameters - int ncycles_; // Number of cycles - bool reordering_; // Flag for reordering time steps - std::vector > tsteps_; // Vector of FED dynamic time steps - std::vector nsteps_; // Vector of number of steps per cycle - - // Some auxiliary variables used in the AOS step - cv::Mat Ltx_, Lty_, px_, py_, ax_, ay_, bx_, by_, qr_, qc_; + /// FED parameters + int ncycles_; ///< Number of cycles + bool reordering_; ///< Flag for reordering time steps + std::vector > tsteps_; ///< Vector of FED dynamic time steps + std::vector nsteps_; ///< Vector of number of steps per cycle public: - // Constructor + /// Constructor KAZEFeatures(KAZEOptions& options); - // Public methods for KAZE interface + /// Public methods for KAZE interface void Allocate_Memory_Evolution(void); int Create_Nonlinear_Scale_Space(const cv::Mat& img); void Feature_Detection(std::vector& kpts); void Feature_Description(std::vector& kpts, cv::Mat& desc); - static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_, const KAZEOptions& options); -private: - - // Feature Detection Methods + /// Feature Detection Methods void Compute_KContrast(const cv::Mat& img, const float& kper); void Compute_Multiscale_Derivatives(void); void Compute_Detector_Response(void); - void Determinant_Hessian_Parallel(std::vector& kpts); - void Find_Extremum_Threading(const int& level); + void Determinant_Hessian(std::vector& kpts); void Do_Subpixel_Refinement(std::vector& kpts); - - // AOS Methods - void AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); - void AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); - void AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); - void Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x); - }; -//************************************************************************************* -//************************************************************************************* - -// Inline functions -float getAngle(const float& x, const float& y); -float gaussian(const float& x, const float& y, const float& sig); -void checkDescriptorLimits(int &x, int &y, const int& width, const int& height); -void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio); -int fRound(const float& flt); - -//************************************************************************************* -//************************************************************************************* - -#endif // KAZE_H_ +#endif diff --git a/modules/features2d/src/kaze/TEvolution.h b/modules/features2d/src/kaze/TEvolution.h new file mode 100644 index 000000000..bc7654ef0 --- /dev/null +++ b/modules/features2d/src/kaze/TEvolution.h @@ -0,0 +1,35 @@ +/** + * @file TEvolution.h + * @brief Header file with the declaration of the TEvolution struct + * @date Jun 02, 2014 + * @author Pablo F. Alcantarilla + */ + +#ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__ +#define __OPENCV_FEATURES_2D_TEVOLUTION_H__ + +/* ************************************************************************* */ +/// KAZE/A-KAZE nonlinear diffusion filtering evolution +struct TEvolution { + + TEvolution() { + etime = 0.0f; + esigma = 0.0f; + octave = 0; + sublevel = 0; + sigma_size = 0; + } + + cv::Mat Lx, Ly; ///< First order spatial derivatives + cv::Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives + cv::Mat Lt; ///< Evolution image + cv::Mat Lsmooth; ///< Smoothed image + cv::Mat Ldet; ///< Detector response + float etime; ///< Evolution time + float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2 + int octave; ///< Image octave + int sublevel; ///< Image sublevel in each octave + int sigma_size; ///< Integer esigma. For computing the feature detector responses +}; + +#endif diff --git a/modules/features2d/src/kaze/fed.h b/modules/features2d/src/kaze/fed.h index c313b8134..0cfa4005c 100644 --- a/modules/features2d/src/kaze/fed.h +++ b/modules/features2d/src/kaze/fed.h @@ -1,5 +1,5 @@ -#ifndef FED_H -#define FED_H +#ifndef __OPENCV_FEATURES_2D_FED_H__ +#define __OPENCV_FEATURES_2D_FED_H__ //****************************************************************************** //****************************************************************************** @@ -22,4 +22,4 @@ bool fed_is_prime_internal(const int& number); //************************************************************************************* //************************************************************************************* -#endif // FED_H +#endif // __OPENCV_FEATURES_2D_FED_H__ diff --git a/modules/features2d/src/kaze/nldiffusion_functions.h b/modules/features2d/src/kaze/nldiffusion_functions.h index 773f7e461..5c161a6e7 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.h +++ b/modules/features2d/src/kaze/nldiffusion_functions.h @@ -8,8 +8,8 @@ * @author Pablo F. Alcantarilla */ -#ifndef KAZE_NLDIFFUSION_FUNCTIONS_H -#define KAZE_NLDIFFUSION_FUNCTIONS_H +#ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__ +#define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__ /* ************************************************************************* */ // Includes diff --git a/modules/features2d/src/kaze/utils.h b/modules/features2d/src/kaze/utils.h new file mode 100644 index 000000000..13ae35247 --- /dev/null +++ b/modules/features2d/src/kaze/utils.h @@ -0,0 +1,77 @@ +#ifndef __OPENCV_FEATURES_2D_KAZE_UTILS_H__ +#define __OPENCV_FEATURES_2D_KAZE_UTILS_H__ + +/* ************************************************************************* */ +/** + * @brief This function computes the angle from the vector given by (X Y). From 0 to 2*Pi + */ +inline float getAngle(float x, float y) { + + if (x >= 0 && y >= 0) { + return atanf(y / x); + } + + if (x < 0 && y >= 0) { + return static_cast(CV_PI)-atanf(-y / x); + } + + if (x < 0 && y < 0) { + return static_cast(CV_PI)+atanf(y / x); + } + + if (x >= 0 && y < 0) { + return static_cast(2.0 * CV_PI) - atanf(-y / x); + } + + return 0; +} + +/* ************************************************************************* */ +/** + * @brief This function computes the value of a 2D Gaussian function + * @param x X Position + * @param y Y Position + * @param sig Standard Deviation + */ +inline float gaussian(float x, float y, float sigma) { + return expf(-(x*x + y*y) / (2.0f*sigma*sigma)); +} + +/* ************************************************************************* */ +/** + * @brief This function checks descriptor limits + * @param x X Position + * @param y Y Position + * @param width Image width + * @param height Image height + */ +inline void checkDescriptorLimits(int &x, int &y, int width, int height) { + + if (x < 0) { + x = 0; + } + + if (y < 0) { + y = 0; + } + + if (x > width - 1) { + x = width - 1; + } + + if (y > height - 1) { + y = height - 1; + } +} + +/* ************************************************************************* */ +/** + * @brief This funtion rounds float to nearest integer + * @param flt Input float + * @return dst Nearest integer + */ +inline int fRound(float flt) { + return (int)(flt + 0.5f); +} + +#endif diff --git a/modules/features2d/test/test_descriptors_regression.cpp b/modules/features2d/test/test_descriptors_regression.cpp index 281df244a..3c6ae97cc 100644 --- a/modules/features2d/test/test_descriptors_regression.cpp +++ b/modules/features2d/test/test_descriptors_regression.cpp @@ -101,8 +101,14 @@ public: typedef typename Distance::ResultType DistanceType; CV_DescriptorExtractorTest( const string _name, DistanceType _maxDist, const Ptr& _dextractor, - Distance d = Distance() ): - name(_name), maxDist(_maxDist), dextractor(_dextractor), distance(d) {} + Distance d = Distance(), Ptr _detector = Ptr()): + name(_name), maxDist(_maxDist), dextractor(_dextractor), distance(d) , detector(_detector) {} + + ~CV_DescriptorExtractorTest() + { + if(!detector.empty()) + detector.release(); + } protected: virtual void createDescriptorExtractor() {} @@ -189,7 +195,6 @@ protected: // Read the test image. string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME; - Mat img = imread( imgFilename ); if( img.empty() ) { @@ -197,13 +202,15 @@ protected: ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); return; } - vector keypoints; FileStorage fs( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::READ ); - if( fs.isOpened() ) - { + if(!detector.empty()) { + detector->detect(img, keypoints); + } else { read( fs.getFirstTopLevelNode(), keypoints ); - + } + if(!keypoints.empty()) + { Mat calcDescriptors; double t = (double)getTickCount(); dextractor->compute( img, keypoints, calcDescriptors ); @@ -244,7 +251,7 @@ protected: } } } - else + if(!fs.isOpened()) { ts->printf( cvtest::TS::LOG, "Compute and write keypoints.\n" ); fs.open( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::WRITE ); @@ -295,6 +302,7 @@ protected: const DistanceType maxDist; Ptr dextractor; Distance distance; + Ptr detector; private: CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; } @@ -340,3 +348,19 @@ TEST( Features2d_DescriptorExtractor_OpponentBRIEF, regression ) DescriptorExtractor::create("OpponentBRIEF") ); test.safe_run(); } + +TEST( Features2d_DescriptorExtractor_KAZE, regression ) +{ + CV_DescriptorExtractorTest< L2 > test( "descriptor-kaze", 0.03f, + DescriptorExtractor::create("KAZE"), + L2(), FeatureDetector::create("KAZE")); + test.safe_run(); +} + +TEST( Features2d_DescriptorExtractor_AKAZE, regression ) +{ + CV_DescriptorExtractorTest test( "descriptor-akaze", (CV_DescriptorExtractorTest::DistanceType)12.f, + DescriptorExtractor::create("AKAZE"), + Hamming(), FeatureDetector::create("AKAZE")); + test.safe_run(); +} diff --git a/modules/features2d/test/test_detectors_regression.cpp b/modules/features2d/test/test_detectors_regression.cpp index 8f34913a9..25e2c7f68 100644 --- a/modules/features2d/test/test_detectors_regression.cpp +++ b/modules/features2d/test/test_detectors_regression.cpp @@ -289,6 +289,18 @@ TEST( Features2d_Detector_ORB, regression ) test.safe_run(); } +TEST( Features2d_Detector_KAZE, regression ) +{ + CV_FeatureDetectorTest test( "detector-kaze", FeatureDetector::create("KAZE") ); + test.safe_run(); +} + +TEST( Features2d_Detector_AKAZE, regression ) +{ + CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") ); + test.safe_run(); +} + TEST( Features2d_Detector_GridFAST, regression ) { CV_FeatureDetectorTest test( "detector-grid-fast", FeatureDetector::create("GridFAST") ); diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index 6f9a7e1ed..a9f30b11b 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -167,19 +167,17 @@ TEST(Features2d_Detector_Keypoints_Dense, validation) test.safe_run(); } -// FIXIT #2807 Crash on Windows 7 x64 MSVS 2012, Linux Fedora 19 x64 with GCC 4.8.2, Linux Ubuntu 14.04 LTS x64 with GCC 4.8.2 -TEST(Features2d_Detector_Keypoints_KAZE, DISABLED_validation) +TEST(Features2d_Detector_Keypoints_KAZE, validation) { CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.KAZE")); test.safe_run(); } -// FIXIT #2807 Crash on Windows 7 x64 MSVS 2012, Linux Fedora 19 x64 with GCC 4.8.2, Linux Ubuntu 14.04 LTS x64 with GCC 4.8.2 -TEST(Features2d_Detector_Keypoints_AKAZE, DISABLED_validation) +TEST(Features2d_Detector_Keypoints_AKAZE, validation) { - CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr(new cv::AKAZE(cv::AKAZE::DESCRIPTOR_KAZE))); + CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr(new cv::AKAZE(cv::DESCRIPTOR_KAZE))); test_kaze.safe_run(); - CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr(new cv::AKAZE(cv::AKAZE::DESCRIPTOR_MLDB))); + CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr(new cv::AKAZE(cv::DESCRIPTOR_MLDB))); test_mldb.safe_run(); } diff --git a/modules/features2d/test/test_rotation_and_scale_invariance.cpp b/modules/features2d/test/test_rotation_and_scale_invariance.cpp index 69ba6f052..fa6a136f3 100644 --- a/modules/features2d/test/test_rotation_and_scale_invariance.cpp +++ b/modules/features2d/test/test_rotation_and_scale_invariance.cpp @@ -652,8 +652,7 @@ TEST(Features2d_ScaleInvariance_Detector_BRISK, regression) test.safe_run(); } -// FIXIT #2807 Crash on Windows 7 x64 MSVS 2012, Linux Fedora 19 x64 with GCC 4.8.2, Linux Ubuntu 14.04 LTS x64 with GCC 4.8.2 -TEST(Features2d_ScaleInvariance_Detector_KAZE, DISABLED_regression) +TEST(Features2d_ScaleInvariance_Detector_KAZE, regression) { DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.KAZE"), 0.08f, @@ -661,8 +660,7 @@ TEST(Features2d_ScaleInvariance_Detector_KAZE, DISABLED_regression) test.safe_run(); } -// FIXIT #2807 Crash on Windows 7 x64 MSVS 2012, Linux Fedora 19 x64 with GCC 4.8.2, Linux Ubuntu 14.04 LTS x64 with GCC 4.8.2 -TEST(Features2d_ScaleInvariance_Detector_AKAZE, DISABLED_regression) +TEST(Features2d_ScaleInvariance_Detector_AKAZE, regression) { DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.AKAZE"), 0.08f, diff --git a/modules/highgui/doc/user_interface.rst b/modules/highgui/doc/user_interface.rst index 0d0ccde94..565ea6d44 100644 --- a/modules/highgui/doc/user_interface.rst +++ b/modules/highgui/doc/user_interface.rst @@ -83,6 +83,9 @@ If window was created with OpenGL support, ``imshow`` also support :ocv:class:`o .. note:: This function should be followed by ``waitKey`` function which displays the image for specified milliseconds. Otherwise, it won't display the image. For example, ``waitKey(0)`` will display the window infinitely until any keypress (it is suitable for image display). ``waitKey(25)`` will display a frame for 25 ms, after which display will be automatically closed. (If you put it in a loop to read videos, it will display the video frame-by-frame) +.. note:: + + [Windows Backend Only] Pressing Ctrl+C will copy the image to the clipboard. namedWindow --------------- diff --git a/modules/highgui/src/window_w32.cpp b/modules/highgui/src/window_w32.cpp index 8b317dff7..bcf1bae8b 100644 --- a/modules/highgui/src/window_w32.cpp +++ b/modules/highgui/src/window_w32.cpp @@ -1286,6 +1286,10 @@ MainWindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam ) switch(uMsg) { + case WM_COPY: + ::WindowProc(hwnd, uMsg, wParam, lParam); // call highgui proc. There may be a better way to do this. + break; + case WM_DESTROY: icvRemoveWindow(window); @@ -1448,6 +1452,81 @@ static LRESULT CALLBACK HighGUIProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM // Process the message switch(uMsg) { + case WM_COPY: + { + if (!::OpenClipboard(hwnd) ) + break; + + HDC hDC = 0; + HDC memDC = 0; + HBITMAP memBM = 0; + + // We'll use a do-while(0){} scope as a single-run breakable scope + // Upon any error we can jump out of the single-time while scope to clean up the resources. + do + { + if (!::EmptyClipboard()) + break; + + if(!window->image) + break; + + // Get window device context + if (0 == (hDC = ::GetDC(hwnd))) + break; + + // Create another DC compatible with hDC + if (0 == (memDC = ::CreateCompatibleDC( hDC ))) + break; + + // Determine the bitmap's dimensions + int nchannels = 3; + SIZE size = {0,0}; + icvGetBitmapData( window, &size, &nchannels, 0 ); + + // Create bitmap to draw on and it in the new DC + if (0 == (memBM = ::CreateCompatibleBitmap ( hDC, size.cx, size.cy))) + break; + + if (!::SelectObject( memDC, memBM )) + break; + + // Begin drawing to DC + if (!::SetStretchBltMode(memDC, COLORONCOLOR)) + break; + + RGBQUAD table[256]; + if( 1 == nchannels ) + { + for(int i = 0; i < 256; ++i) + { + table[i].rgbBlue = (unsigned char)i; + table[i].rgbGreen = (unsigned char)i; + table[i].rgbRed = (unsigned char)i; + } + if (!::SetDIBColorTable(window->dc, 0, 255, table)) + break; + } + + // The image copied to the clipboard will be in its original size, regardless if the window itself was resized. + + // Render the image to the dc/bitmap (at original size). + if (!::BitBlt( memDC, 0, 0, size.cx, size.cy, window->dc, 0, 0, SRCCOPY )) + break; + + // Finally, set bitmap to clipboard + ::SetClipboardData(CF_BITMAP, memBM); + } while (0,0); // (0,0) instead of (0) to avoid MSVC compiler warning C4127: "conditional expression is constant" + + ////////////////////////////////////////////////////////////////////////// + // if handle is allocated (i.e. != 0) then clean-up. + if (memBM) ::DeleteObject(memBM); + if (memDC) ::DeleteDC(memDC); + if (hDC) ::ReleaseDC(hwnd, hDC); + ::CloseClipboard(); + break; + } + case WM_WINDOWPOSCHANGING: { LPWINDOWPOS pos = (LPWINDOWPOS)lParam; @@ -1798,6 +1877,11 @@ cvWaitKey( int delay ) is_processed = 1; return (int)(message.wParam << 16); } + + // Intercept Ctrl+C for copy to clipboard + if ('C' == message.wParam && (::GetKeyState(VK_CONTROL)>>15)) + ::PostMessage(message.hwnd, WM_COPY, 0, 0); + default: DispatchMessage(&message); is_processed = 1; diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp index 81f8a45f6..97fff83e1 100644 --- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp +++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp @@ -56,14 +56,17 @@ enum { IMREAD_UNCHANGED = -1, // 8bit, color or not IMREAD_ANYCOLOR = 4 // ?, any color }; -enum { IMWRITE_JPEG_QUALITY = 1, - IMWRITE_JPEG_PROGRESSIVE = 2, - IMWRITE_JPEG_OPTIMIZE = 3, - IMWRITE_PNG_COMPRESSION = 16, - IMWRITE_PNG_STRATEGY = 17, - IMWRITE_PNG_BILEVEL = 18, - IMWRITE_PXM_BINARY = 32, - IMWRITE_WEBP_QUALITY = 64 +enum { IMWRITE_JPEG_QUALITY = 1, + IMWRITE_JPEG_PROGRESSIVE = 2, + IMWRITE_JPEG_OPTIMIZE = 3, + IMWRITE_JPEG_RST_INTERVAL = 4, + IMWRITE_JPEG_LUMA_QUALITY = 5, + IMWRITE_JPEG_CHROMA_QUALITY = 6, + IMWRITE_PNG_COMPRESSION = 16, + IMWRITE_PNG_STRATEGY = 17, + IMWRITE_PNG_BILEVEL = 18, + IMWRITE_PXM_BINARY = 32, + IMWRITE_WEBP_QUALITY = 64 }; enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0, diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs_c.h b/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs_c.h index f0c2ae13f..ccd29a7c1 100644 --- a/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs_c.h +++ b/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs_c.h @@ -76,6 +76,9 @@ enum CV_IMWRITE_JPEG_QUALITY =1, CV_IMWRITE_JPEG_PROGRESSIVE =2, CV_IMWRITE_JPEG_OPTIMIZE =3, + CV_IMWRITE_JPEG_RST_INTERVAL =4, + CV_IMWRITE_JPEG_LUMA_QUALITY =5, + CV_IMWRITE_JPEG_CHROMA_QUALITY =6, CV_IMWRITE_PNG_COMPRESSION =16, CV_IMWRITE_PNG_STRATEGY =17, CV_IMWRITE_PNG_BILEVEL =18, diff --git a/modules/imgcodecs/src/grfmt_jpeg.cpp b/modules/imgcodecs/src/grfmt_jpeg.cpp index 147f185e4..ec1793287 100644 --- a/modules/imgcodecs/src/grfmt_jpeg.cpp +++ b/modules/imgcodecs/src/grfmt_jpeg.cpp @@ -600,6 +600,9 @@ bool JpegEncoder::write( const Mat& img, const std::vector& params ) int quality = 95; int progressive = 0; int optimize = 0; + int rst_interval = 0; + int luma_quality = -1; + int chroma_quality = -1; for( size_t i = 0; i < params.size(); i += 2 ) { @@ -618,15 +621,64 @@ bool JpegEncoder::write( const Mat& img, const std::vector& params ) { optimize = params[i+1]; } + + if( params[i] == CV_IMWRITE_JPEG_LUMA_QUALITY ) + { + if (params[i+1] >= 0) + { + luma_quality = MIN(MAX(params[i+1], 0), 100); + + quality = luma_quality; + + if (chroma_quality < 0) + { + chroma_quality = luma_quality; + } + } + } + + if( params[i] == CV_IMWRITE_JPEG_CHROMA_QUALITY ) + { + if (params[i+1] >= 0) + { + chroma_quality = MIN(MAX(params[i+1], 0), 100); + } + } + + if( params[i] == CV_IMWRITE_JPEG_RST_INTERVAL ) + { + rst_interval = params[i+1]; + rst_interval = MIN(MAX(rst_interval, 0), 65535L); + } } jpeg_set_defaults( &cinfo ); + cinfo.restart_interval = rst_interval; + jpeg_set_quality( &cinfo, quality, TRUE /* limit to baseline-JPEG values */ ); if( progressive ) jpeg_simple_progression( &cinfo ); if( optimize ) cinfo.optimize_coding = TRUE; + +#if JPEG_LIB_VERSION >= 70 + if (luma_quality >= 0 && chroma_quality >= 0) + { + cinfo.q_scale_factor[0] = jpeg_quality_scaling(luma_quality); + cinfo.q_scale_factor[1] = jpeg_quality_scaling(chroma_quality); + if ( luma_quality != chroma_quality ) + { + /* disable subsampling - ref. Libjpeg.txt */ + cinfo.comp_info[0].v_samp_factor = 1; + cinfo.comp_info[0].h_samp_factor = 1; + cinfo.comp_info[1].v_samp_factor = 1; + cinfo.comp_info[1].h_samp_factor = 1; + } + jpeg_default_qtables( &cinfo, TRUE ); + } +#endif // #if JPEG_LIB_VERSION >= 70 + jpeg_start_compress( &cinfo, TRUE ); if( channels > 1 ) diff --git a/modules/imgcodecs/src/grfmt_tiff.cpp b/modules/imgcodecs/src/grfmt_tiff.cpp index 9013c39d1..06b2ab6a1 100644 --- a/modules/imgcodecs/src/grfmt_tiff.cpp +++ b/modules/imgcodecs/src/grfmt_tiff.cpp @@ -158,7 +158,7 @@ bool TiffDecoder::readHeader() m_type = CV_MAKETYPE(CV_8U, photometric > 1 ? wanted_channels : 1); break; case 16: - m_type = CV_MAKETYPE(CV_16U, photometric > 1 ? 3 : 1); + m_type = CV_MAKETYPE(CV_16U, photometric > 1 ? wanted_channels : 1); break; case 32: @@ -326,6 +326,21 @@ bool TiffDecoder::readData( Mat& img ) (ushort*)(data + img.step*i) + x*3, 0, cvSize(tile_width,1) ); } + else if (ncn == 4) + { + if (wanted_channels == 4) + { + icvCvt_BGRA2RGBA_16u_C4R(buffer16 + i*tile_width0*ncn, 0, + (ushort*)(data + img.step*i) + x * 4, 0, + cvSize(tile_width, 1)); + } + else + { + icvCvt_BGRA2BGR_16u_C4C3R(buffer16 + i*tile_width0*ncn, 0, + (ushort*)(data + img.step*i) + x * 3, 0, + cvSize(tile_width, 1), 2); + } + } else { icvCvt_BGRA2BGR_16u_C4C3R(buffer16 + i*tile_width0*ncn, 0, diff --git a/modules/imgcodecs/test/test_grfmt.cpp b/modules/imgcodecs/test/test_grfmt.cpp index 9b06c5744..ed991a6f3 100644 --- a/modules/imgcodecs/test/test_grfmt.cpp +++ b/modules/imgcodecs/test/test_grfmt.cpp @@ -139,9 +139,6 @@ public: string filename = cv::tempfile(".jpg"); imwrite(filename, img); - img = imread(filename, IMREAD_UNCHANGED); - - filename = string(ts->get_data_path() + "readwrite/test_" + char(k + 48) + "_c" + char(num_channels + 48) + ".jpg"); ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str()); Mat img_test = imread(filename, IMREAD_UNCHANGED); @@ -160,8 +157,9 @@ public: #endif #ifdef HAVE_TIFF - for (int num_channels = 1; num_channels <= 3; num_channels+=2) + for (int num_channels = 1; num_channels <= 4; num_channels++) { + if (num_channels == 2) continue; // tiff ts->printf(ts->LOG, "image type depth:%d channels:%d ext: %s\n", CV_16U, num_channels, ".tiff"); Mat img(img_r * k, img_c * k, CV_MAKETYPE(CV_16U, num_channels), Scalar::all(0)); @@ -433,6 +431,31 @@ TEST(Imgcodecs_Jpeg, encode_decode_optimize_jpeg) remove(output_optimized.c_str()); } + +TEST(Imgcodecs_Jpeg, encode_decode_rst_jpeg) +{ + cvtest::TS& ts = *cvtest::TS::ptr(); + string input = string(ts.get_data_path()) + "../cv/shared/lena.png"; + cv::Mat img = cv::imread(input); + ASSERT_FALSE(img.empty()); + + std::vector params; + params.push_back(IMWRITE_JPEG_RST_INTERVAL); + params.push_back(1); + + string output_rst = cv::tempfile(".jpg"); + EXPECT_NO_THROW(cv::imwrite(output_rst, img, params)); + cv::Mat img_jpg_rst = cv::imread(output_rst); + + string output_normal = cv::tempfile(".jpg"); + EXPECT_NO_THROW(cv::imwrite(output_normal, img)); + cv::Mat img_jpg_normal = cv::imread(output_normal); + + EXPECT_EQ(0, cvtest::norm(img_jpg_rst, img_jpg_normal, NORM_INF)); + + remove(output_rst.c_str()); +} + #endif diff --git a/modules/imgproc/src/contours.cpp b/modules/imgproc/src/contours.cpp index 5ea4a07c6..ee7f21ac8 100644 --- a/modules/imgproc/src/contours.cpp +++ b/modules/imgproc/src/contours.cpp @@ -1704,8 +1704,10 @@ void cv::findContours( InputOutputArray _image, OutputArrayOfArrays _contours, OutputArray _hierarchy, int mode, int method, Point offset ) { // Sanity check: output must be of type vector> - CV_Assert( _contours.kind() == _InputArray::STD_VECTOR_VECTOR && - _contours.channels() == 2 && _contours.depth() == CV_32S ); + CV_Assert((_contours.kind() == _InputArray::STD_VECTOR_VECTOR || _contours.kind() == _InputArray::STD_VECTOR_MAT || + _contours.kind() == _InputArray::STD_VECTOR_UMAT)); + + CV_Assert(_contours.empty() || (_contours.channels() == 2 && _contours.depth() == CV_32S)); Mat image = _image.getMat(); MemStorage storage(cvCreateMemStorage()); diff --git a/modules/imgproc/src/opencl/warp_affine.cl b/modules/imgproc/src/opencl/warp_affine.cl index 8ee34d0d6..649f10db7 100644 --- a/modules/imgproc/src/opencl/warp_affine.cl +++ b/modules/imgproc/src/opencl/warp_affine.cl @@ -98,15 +98,15 @@ __kernel void warpAffine(__global const uchar * srcptr, int src_step, int src_of { int round_delta = (AB_SCALE >> 1); - int X0_ = rint(M[0] * dx * AB_SCALE); - int Y0_ = rint(M[3] * dx * AB_SCALE); + int X0 = rint(fma(M[0], dx, fma(M[1], dy0, M[2])) * AB_SCALE) + round_delta; + int Y0 = rint(fma(M[3], dx, fma(M[4], dy0, M[5])) * AB_SCALE) + round_delta; + + int XSTEP = (int)(M[1] * AB_SCALE); + int YSTEP = (int)(M[4] * AB_SCALE); int dst_index = mad24(dy0, dst_step, mad24(dx, pixsize, dst_offset)); for (int dy = dy0, dy1 = min(dst_rows, dy0 + rowsPerWI); dy < dy1; ++dy, dst_index += dst_step) { - int X0 = X0_ + rint(fma(M[1], dy, M[2]) * AB_SCALE) + round_delta; - int Y0 = Y0_ + rint(fma(M[4], dy, M[5]) * AB_SCALE) + round_delta; - short sx = convert_short_sat(X0 >> AB_BITS); short sy = convert_short_sat(Y0 >> AB_BITS); @@ -117,6 +117,9 @@ __kernel void warpAffine(__global const uchar * srcptr, int src_step, int src_of } else storepix(scalar, dstptr + dst_index); + + X0 += XSTEP; + Y0 += YSTEP; } } } diff --git a/modules/objdetect/doc/erfilter.rst b/modules/objdetect/doc/erfilter.rst deleted file mode 100644 index 85d6bcc7f..000000000 --- a/modules/objdetect/doc/erfilter.rst +++ /dev/null @@ -1,211 +0,0 @@ -Scene Text Detection -==================== - -.. highlight:: cpp - -Class-specific Extremal Regions for Scene Text Detection --------------------------------------------------------- - -The scene text detection algorithm described below has been initially proposed by Lukás Neumann & Jiri Matas [Neumann12]. The main idea behind Class-specific Extremal Regions is similar to the MSER in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image. However, this technique differs from MSER in that selection of suitable ERs is done by a sequential classifier trained for character detection, i.e. dropping the stability requirement of MSERs and selecting class-specific (not necessarily stable) regions. - -The component tree of an image is constructed by thresholding by an increasing value step-by-step from 0 to 255 and then linking the obtained connected components from successive levels in a hierarchy by their inclusion relation: - -.. image:: pics/component_tree.png - :width: 100% - -The component tree may conatain a huge number of regions even for a very simple image as shown in the previous image. This number can easily reach the order of 1 x 10^6 regions for an average 1 Megapixel image. In order to efficiently select suitable regions among all the ERs the algorithm make use of a sequential classifier with two differentiated stages. - -In the first stage incrementally computable descriptors (area, perimeter, bounding box, and euler number) are computed (in O(1)) for each region r and used as features for a classifier which estimates the class-conditional probability p(r|character). Only the ERs which correspond to local maximum of the probability p(r|character) are selected (if their probability is above a global limit p_min and the difference between local maximum and local minimum is greater than a \delta_min value). - -In the second stage, the ERs that passed the first stage are classified into character and non-character classes using more informative but also more computationally expensive features. (Hole area ratio, convex hull ratio, and the number of outer boundary inflexion points). - -This ER filtering process is done in different single-channel projections of the input image in order to increase the character localization recall. - -After the ER filtering is done on each input channel, character candidates must be grouped in high-level text blocks (i.e. words, text lines, paragraphs, ...). The grouping algorithm used in this implementation has been proposed by Lluis Gomez and Dimosthenis Karatzas in [Gomez13] and basically consist in finding meaningful groups of regions using a perceptual organization based clustering analisys (see :ocv:func:`erGrouping`). - - -To see the text detector at work, have a look at the textdetection demo: https://github.com/Itseez/opencv/blob/master/samples/cpp/textdetection.cpp - - -.. [Neumann12] Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012. The paper is available online at http://cmp.felk.cvut.cz/~neumalu1/neumann-cvpr2012.pdf - -.. [Gomez13] Gomez L. and Karatzas D.: Multi-script Text Extraction from Natural Scenes, ICDAR 2013. The paper is available online at http://158.109.8.37/files/GoK2013.pdf - - -ERStat ------- -.. ocv:struct:: ERStat - -The ERStat structure represents a class-specific Extremal Region (ER). - -An ER is a 4-connected set of pixels with all its grey-level values smaller than the values in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's in the component tree of the image. :: - - struct CV_EXPORTS ERStat - { - public: - //! Constructor - explicit ERStat(int level = 256, int pixel = 0, int x = 0, int y = 0); - //! Destructor - ~ERStat() { } - - //! seed point and threshold (max grey-level value) - int pixel; - int level; - - //! incrementally computable features - int area; - int perimeter; - int euler; //!< euler number - Rect rect; //!< bounding box - double raw_moments[2]; //!< order 1 raw moments to derive the centroid - double central_moments[3]; //!< order 2 central moments to construct the covariance matrix - std::deque *crossings;//!< horizontal crossings - float med_crossings; //!< median of the crossings at three different height levels - - //! 2nd stage features - float hole_area_ratio; - float convex_hull_ratio; - float num_inflexion_points; - - //! probability that the ER belongs to the class we are looking for - double probability; - - //! pointers preserving the tree structure of the component tree - ERStat* parent; - ERStat* child; - ERStat* next; - ERStat* prev; - }; - -computeNMChannels ------------------ -Compute the different channels to be processed independently in the N&M algorithm [Neumann12]. - -.. ocv:function:: void computeNMChannels(InputArray _src, OutputArrayOfArrays _channels, int _mode = ERFILTER_NM_RGBLGrad) - - :param _src: Source image. Must be RGB ``CV_8UC3``. - :param _channels: Output vector where computed channels are stored. - :param _mode: Mode of operation. Currently the only available options are: **ERFILTER_NM_RGBLGrad** (used by default) and **ERFILTER_NM_IHSGrad**. - -In N&M algorithm, the combination of intensity (I), hue (H), saturation (S), and gradient magnitude channels (Grad) are used in order to obtain high localization recall. This implementation also provides an alternative combination of red (R), green (G), blue (B), lightness (L), and gradient magnitude (Grad). - - -ERFilter --------- -.. ocv:class:: ERFilter : public Algorithm - -Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithm [Neumann12]. :: - - class CV_EXPORTS ERFilter : public Algorithm - { - public: - - //! callback with the classifier is made a class. - //! By doing it we hide SVM, Boost etc. Developers can provide their own classifiers - class CV_EXPORTS Callback - { - public: - virtual ~Callback() { } - //! The classifier must return probability measure for the region. - virtual double eval(const ERStat& stat) = 0; - }; - - /*! - the key method. Takes image on input and returns the selected regions in a vector of ERStat - only distinctive ERs which correspond to characters are selected by a sequential classifier - */ - virtual void run( InputArray image, std::vector& regions ) = 0; - - (...) - - }; - - - -ERFilter::Callback ------------------- -Callback with the classifier is made a class. By doing it we hide SVM, Boost etc. Developers can provide their own classifiers to the ERFilter algorithm. - -.. ocv:class:: ERFilter::Callback - -ERFilter::Callback::eval ------------------------- -The classifier must return probability measure for the region. - -.. ocv:function:: double ERFilter::Callback::eval(const ERStat& stat) - - :param stat: The region to be classified - -ERFilter::run -------------- -The key method of ERFilter algorithm. Takes image on input and returns the selected regions in a vector of ERStat only distinctive ERs which correspond to characters are selected by a sequential classifier - -.. ocv:function:: void ERFilter::run( InputArray image, std::vector& regions ) - - :param image: Sinle channel image ``CV_8UC1`` - :param regions: Output for the 1st stage and Input/Output for the 2nd. The selected Extremal Regions are stored here. - -Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier. - -createERFilterNM1 ------------------ -Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm [Neumann12]. - -.. ocv:function:: Ptr createERFilterNM1( const Ptr& cb, int thresholdDelta = 1, float minArea = 0.00025, float maxArea = 0.13, float minProbability = 0.4, bool nonMaxSuppression = true, float minProbabilityDiff = 0.1 ) - - :param cb: Callback with the classifier. Default classifier can be implicitly load with function :ocv:func:`loadClassifierNM1`, e.g. from file in samples/cpp/trained_classifierNM1.xml - :param thresholdDelta: Threshold step in subsequent thresholds when extracting the component tree - :param minArea: The minimum area (% of image size) allowed for retreived ER's - :param minArea: The maximum area (% of image size) allowed for retreived ER's - :param minProbability: The minimum probability P(er|character) allowed for retreived ER's - :param nonMaxSuppression: Whenever non-maximum suppression is done over the branch probabilities - :param minProbability: The minimum probability difference between local maxima and local minima ERs - -The component tree of the image is extracted by a threshold increased step by step from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness, number of holes, and number of horizontal crossings) are computed for each ER and used as features for a classifier which estimates the class-conditional probability P(er|character). The value of P(er|character) is tracked using the inclusion relation of ER across all thresholds and only the ERs which correspond to local maximum of the probability P(er|character) are selected (if the local maximum of the probability is above a global limit pmin and the difference between local maximum and local minimum is greater than minProbabilityDiff). - -createERFilterNM2 ------------------ -Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm [Neumann12]. - -.. ocv:function:: Ptr createERFilterNM2( const Ptr& cb, float minProbability = 0.3 ) - - :param cb: Callback with the classifier. Default classifier can be implicitly load with function :ocv:func:`loadClassifierNM2`, e.g. from file in samples/cpp/trained_classifierNM2.xml - :param minProbability: The minimum probability P(er|character) allowed for retreived ER's - -In the second stage, the ERs that passed the first stage are classified into character and non-character classes using more informative but also more computationally expensive features. The classifier uses all the features calculated in the first stage and the following additional features: hole area ratio, convex hull ratio, and number of outer inflexion points. - -loadClassifierNM1 ------------------ -Allow to implicitly load the default classifier when creating an ERFilter object. - -.. ocv:function:: Ptr loadClassifierNM1(const std::string& filename) - - :param filename: The XML or YAML file with the classifier model (e.g. trained_classifierNM1.xml) - -returns a pointer to ERFilter::Callback. - -loadClassifierNM2 ------------------ -Allow to implicitly load the default classifier when creating an ERFilter object. - -.. ocv:function:: Ptr loadClassifierNM2(const std::string& filename) - - :param filename: The XML or YAML file with the classifier model (e.g. trained_classifierNM2.xml) - -returns a pointer to ERFilter::Callback. - -erGrouping ----------- -Find groups of Extremal Regions that are organized as text blocks. - -.. ocv:function:: void erGrouping( InputArrayOfArrays src, std::vector > ®ions, const std::string& filename, float minProbablity, std::vector &groups) - - :param src: Vector of sinle channel images CV_8UC1 from wich the regions were extracted - :param regions: Vector of ER's retreived from the ERFilter algorithm from each channel - :param filename: The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml) - :param minProbability: The minimum probability for accepting a group - :param groups: The output of the algorithm are stored in this parameter as list of rectangles. - -This function implements the grouping algorithm described in [Gomez13]. Notice that this implementation constrains the results to horizontally-aligned text and latin script (since ERFilter classifiers are trained only for latin script detection). - -The algorithm combines two different clustering techniques in a single parameter-free procedure to detect groups of regions organized as text. The maximally meaningful groups are fist detected in several feature spaces, where each feature space is a combination of proximity information (x,y coordinates) and a similarity measure (intensity, color, size, gradient magnitude, etc.), thus providing a set of hypotheses of text groups. Evidence Accumulation framework is used to combine all these hypotheses to get the final estimate. Each of the resulting groups are finally validated using a classifier in order to assess if they form a valid horizontally-aligned text block. diff --git a/modules/objdetect/doc/latent_svm.rst b/modules/objdetect/doc/latent_svm.rst deleted file mode 100644 index 4b4ff117f..000000000 --- a/modules/objdetect/doc/latent_svm.rst +++ /dev/null @@ -1,262 +0,0 @@ -Latent SVM -=============================================================== - -Discriminatively Trained Part Based Models for Object Detection ---------------------------------------------------------------- - -The object detector described below has been initially proposed by -P.F. Felzenszwalb in [Felzenszwalb2010]_. It is based on a -Dalal-Triggs detector that uses a single filter on histogram of -oriented gradients (HOG) features to represent an object category. -This detector uses a sliding window approach, where a filter is -applied at all positions and scales of an image. The first -innovation is enriching the Dalal-Triggs model using a -star-structured part-based model defined by a "root" filter -(analogous to the Dalal-Triggs filter) plus a set of parts filters -and associated deformation models. The score of one of star models -at a particular position and scale within an image is the score of -the root filter at the given location plus the sum over parts of the -maximum, over placements of that part, of the part filter score on -its location minus a deformation cost easuring the deviation of the -part from its ideal location relative to the root. Both root and -part filter scores are defined by the dot product between a filter -(a set of weights) and a subwindow of a feature pyramid computed -from the input image. Another improvement is a representation of the -class of models by a mixture of star models. The score of a mixture -model at a particular position and scale is the maximum over -components, of the score of that component model at the given -location. - -In OpenCV there are C implementation of Latent SVM and C++ wrapper of it. -C version is the structure :ocv:struct:`CvObjectDetection` and a set of functions -working with this structure (see :ocv:func:`cvLoadLatentSvmDetector`, -:ocv:func:`cvReleaseLatentSvmDetector`, :ocv:func:`cvLatentSvmDetectObjects`). -C++ version is the class :ocv:class:`LatentSvmDetector` and has slightly different -functionality in contrast with C version - it supports loading and detection -of several models. - -There are two examples of Latent SVM usage: ``samples/c/latentsvmdetect.cpp`` -and ``samples/cpp/latentsvm_multidetect.cpp``. - -.. highlight:: c - - -CvLSVMFilterPosition --------------------- -.. ocv:struct:: CvLSVMFilterPosition - - Structure describes the position of the filter in the feature pyramid. - - .. ocv:member:: unsigned int l - - level in the feature pyramid - - .. ocv:member:: unsigned int x - - x-coordinate in level l - - .. ocv:member:: unsigned int y - - y-coordinate in level l - - -CvLSVMFilterObject ------------------- -.. ocv:struct:: CvLSVMFilterObject - - Description of the filter, which corresponds to the part of the object. - - .. ocv:member:: CvLSVMFilterPosition V - - ideal (penalty = 0) position of the partial filter - from the root filter position (V_i in the paper) - - .. ocv:member:: float fineFunction[4] - - vector describes penalty function (d_i in the paper) - pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2 - - .. ocv:member:: int sizeX - .. ocv:member:: int sizeY - - Rectangular map (sizeX x sizeY), - every cell stores feature vector (dimension = p) - - .. ocv:member:: int numFeatures - - number of features - - .. ocv:member:: float *H - - matrix of feature vectors to set and get - feature vectors (i,j) used formula H[(j * sizeX + i) * p + k], - where k - component of feature vector in cell (i, j) - -CvLatentSvmDetector -------------------- -.. ocv:struct:: CvLatentSvmDetector - - Structure contains internal representation of trained Latent SVM detector. - - .. ocv:member:: int num_filters - - total number of filters (root plus part) in model - - .. ocv:member:: int num_components - - number of components in model - - .. ocv:member:: int* num_part_filters - - array containing number of part filters for each component - - .. ocv:member:: CvLSVMFilterObject** filters - - root and part filters for all model components - - .. ocv:member:: float* b - - biases for all model components - - .. ocv:member:: float score_threshold - - confidence level threshold - - -CvObjectDetection ------------------ -.. ocv:struct:: CvObjectDetection - - Structure contains the bounding box and confidence level for detected object. - - .. ocv:member:: CvRect rect - - bounding box for a detected object - - .. ocv:member:: float score - - confidence level - - -cvLoadLatentSvmDetector ------------------------ -Loads trained detector from a file. - -.. ocv:function:: CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename) - - :param filename: Name of the file containing the description of a trained detector - - -cvReleaseLatentSvmDetector --------------------------- -Release memory allocated for CvLatentSvmDetector structure. - -.. ocv:function:: void cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector) - - :param detector: CvLatentSvmDetector structure to be released - - -cvLatentSvmDetectObjects ------------------------- -Find rectangular regions in the given image that are likely to contain objects -and corresponding confidence levels. - -.. ocv:function:: CvSeq* cvLatentSvmDetectObjects( IplImage* image, CvLatentSvmDetector* detector, CvMemStorage* storage, float overlap_threshold=0.5f, int numThreads=-1 ) - - :param image: image - :param detector: LatentSVM detector in internal representation - :param storage: Memory storage to store the resultant sequence of the object candidate rectangles - :param overlap_threshold: Threshold for the non-maximum suppression algorithm - :param numThreads: Number of threads used in parallel version of the algorithm - -.. highlight:: cpp - -LatentSvmDetector ------------------ -.. ocv:class:: LatentSvmDetector - -This is a C++ wrapping class of Latent SVM. It contains internal representation of several -trained Latent SVM detectors (models) and a set of methods to load the detectors and detect objects -using them. - -LatentSvmDetector::ObjectDetection ----------------------------------- -.. ocv:struct:: LatentSvmDetector::ObjectDetection - - Structure contains the detection information. - - .. ocv:member:: Rect rect - - bounding box for a detected object - - .. ocv:member:: float score - - confidence level - - .. ocv:member:: int classID - - class (model or detector) ID that detect an object - - -LatentSvmDetector::LatentSvmDetector ------------------------------------- -Two types of constructors. - -.. ocv:function:: LatentSvmDetector::LatentSvmDetector() - -.. ocv:function:: LatentSvmDetector::LatentSvmDetector(const vector& filenames, const vector& classNames=vector()) - - - - :param filenames: A set of filenames storing the trained detectors (models). Each file contains one model. See examples of such files here /opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007/. - - :param classNames: A set of trained models names. If it's empty then the name of each model will be constructed from the name of file containing the model. E.g. the model stored in "/home/user/cat.xml" will get the name "cat". - -LatentSvmDetector::~LatentSvmDetector -------------------------------------- -Destructor. - -.. ocv:function:: LatentSvmDetector::~LatentSvmDetector() - -LatentSvmDetector::~clear -------------------------- -Clear all trained models and their names stored in an class object. - -.. ocv:function:: void LatentSvmDetector::clear() - -LatentSvmDetector::load ------------------------ -Load the trained models from given ``.xml`` files and return ``true`` if at least one model was loaded. - -.. ocv:function:: bool LatentSvmDetector::load( const vector& filenames, const vector& classNames=vector() ) - - :param filenames: A set of filenames storing the trained detectors (models). Each file contains one model. See examples of such files here /opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007/. - - :param classNames: A set of trained models names. If it's empty then the name of each model will be constructed from the name of file containing the model. E.g. the model stored in "/home/user/cat.xml" will get the name "cat". - -LatentSvmDetector::detect -------------------------- -Find rectangular regions in the given image that are likely to contain objects of loaded classes (models) -and corresponding confidence levels. - -.. ocv:function:: void LatentSvmDetector::detect( const Mat& image, vector& objectDetections, float overlapThreshold=0.5f, int numThreads=-1 ) - - :param image: An image. - :param objectDetections: The detections: rectangulars, scores and class IDs. - :param overlapThreshold: Threshold for the non-maximum suppression algorithm. - :param numThreads: Number of threads used in parallel version of the algorithm. - -LatentSvmDetector::getClassNames --------------------------------- -Return the class (model) names that were passed in constructor or method ``load`` or extracted from models filenames in those methods. - -.. ocv:function:: const vector& LatentSvmDetector::getClassNames() const - -LatentSvmDetector::getClassCount --------------------------------- -Return a count of loaded models (classes). - -.. ocv:function:: size_t LatentSvmDetector::getClassCount() const - - -.. [Felzenszwalb2010] Felzenszwalb, P. F. and Girshick, R. B. and McAllester, D. and Ramanan, D. *Object Detection with Discriminatively Trained Part Based Models*. PAMI, vol. 32, no. 9, pp. 1627-1645, September 2010 diff --git a/modules/objdetect/doc/objdetect.rst b/modules/objdetect/doc/objdetect.rst index 0cd8cf3ef..c00e64e59 100644 --- a/modules/objdetect/doc/objdetect.rst +++ b/modules/objdetect/doc/objdetect.rst @@ -8,5 +8,3 @@ objdetect. Object Detection :maxdepth: 2 cascade_classification - latent_svm - erfilter diff --git a/modules/objdetect/doc/pics/component_tree.png b/modules/objdetect/doc/pics/component_tree.png deleted file mode 100644 index 7391e2de6..000000000 Binary files a/modules/objdetect/doc/pics/component_tree.png and /dev/null differ diff --git a/modules/objdetect/include/opencv2/objdetect.hpp b/modules/objdetect/include/opencv2/objdetect.hpp index 79e213ee3..4ccb81070 100644 --- a/modules/objdetect/include/opencv2/objdetect.hpp +++ b/modules/objdetect/include/opencv2/objdetect.hpp @@ -315,8 +315,6 @@ public: } -#include "opencv2/objdetect/linemod.hpp" -#include "opencv2/objdetect/erfilter.hpp" #include "opencv2/objdetect/detection_based_tracker.hpp" #endif diff --git a/modules/objdetect/include/opencv2/objdetect/erfilter.hpp b/modules/objdetect/include/opencv2/objdetect/erfilter.hpp deleted file mode 100644 index d7e07d80d..000000000 --- a/modules/objdetect/include/opencv2/objdetect/erfilter.hpp +++ /dev/null @@ -1,266 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Copyright (C) 2013, OpenCV Foundation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __OPENCV_OBJDETECT_ERFILTER_HPP__ -#define __OPENCV_OBJDETECT_ERFILTER_HPP__ - -#include "opencv2/core.hpp" -#include -#include -#include - -namespace cv -{ - -/*! - Extremal Region Stat structure - - The ERStat structure represents a class-specific Extremal Region (ER). - - An ER is a 4-connected set of pixels with all its grey-level values smaller than the values - in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's - in the component tree of the image. -*/ -struct CV_EXPORTS ERStat -{ -public: - //! Constructor - explicit ERStat(int level = 256, int pixel = 0, int x = 0, int y = 0); - //! Destructor - ~ERStat() { } - - //! seed point and the threshold (max grey-level value) - int pixel; - int level; - - //! incrementally computable features - int area; - int perimeter; - int euler; //!< euler number - Rect rect; - double raw_moments[2]; //!< order 1 raw moments to derive the centroid - double central_moments[3]; //!< order 2 central moments to construct the covariance matrix - std::deque *crossings;//!< horizontal crossings - float med_crossings; //!< median of the crossings at three different height levels - - //! 2nd stage features - float hole_area_ratio; - float convex_hull_ratio; - float num_inflexion_points; - - // TODO Other features can be added (average color, standard deviation, and such) - - - // TODO shall we include the pixel list whenever available (i.e. after 2nd stage) ? - std::vector *pixels; - - //! probability that the ER belongs to the class we are looking for - double probability; - - //! pointers preserving the tree structure of the component tree - ERStat* parent; - ERStat* child; - ERStat* next; - ERStat* prev; - - //! wenever the regions is a local maxima of the probability - bool local_maxima; - ERStat* max_probability_ancestor; - ERStat* min_probability_ancestor; -}; - -/*! - Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithms - Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012 - - Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier. -*/ -class CV_EXPORTS ERFilter : public Algorithm -{ -public: - - //! callback with the classifier is made a class. By doing it we hide SVM, Boost etc. - class CV_EXPORTS Callback - { - public: - virtual ~Callback() { } - //! The classifier must return probability measure for the region. - virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here? - }; - - /*! - the key method. Takes image on input and returns the selected regions in a vector of ERStat - only distinctive ERs which correspond to characters are selected by a sequential classifier - \param image is the input image - \param regions is output for the first stage, input/output for the second one. - */ - virtual void run( InputArray image, std::vector& regions ) = 0; - - - //! set/get methods to set the algorithm properties, - virtual void setCallback(const Ptr& cb) = 0; - virtual void setThresholdDelta(int thresholdDelta) = 0; - virtual void setMinArea(float minArea) = 0; - virtual void setMaxArea(float maxArea) = 0; - virtual void setMinProbability(float minProbability) = 0; - virtual void setMinProbabilityDiff(float minProbabilityDiff) = 0; - virtual void setNonMaxSuppression(bool nonMaxSuppression) = 0; - virtual int getNumRejected() = 0; -}; - - -/*! - Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm - Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012 - - The component tree of the image is extracted by a threshold increased step by step - from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness, - number of holes, and number of horizontal crossings) are computed for each ER - and used as features for a classifier which estimates the class-conditional - probability P(er|character). The value of P(er|character) is tracked using the inclusion - relation of ER across all thresholds and only the ERs which correspond to local maximum - of the probability P(er|character) are selected (if the local maximum of the - probability is above a global limit pmin and the difference between local maximum and - local minimum is greater than minProbabilityDiff). - - \param cb Callback with the classifier. - default classifier can be implicitly load with function loadClassifierNM1() - from file in samples/cpp/trained_classifierNM1.xml - \param thresholdDelta Threshold step in subsequent thresholds when extracting the component tree - \param minArea The minimum area (% of image size) allowed for retreived ER's - \param minArea The maximum area (% of image size) allowed for retreived ER's - \param minProbability The minimum probability P(er|character) allowed for retreived ER's - \param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities - \param minProbability The minimum probability difference between local maxima and local minima ERs -*/ -CV_EXPORTS Ptr createERFilterNM1(const Ptr& cb, - int thresholdDelta = 1, float minArea = 0.00025, - float maxArea = 0.13, float minProbability = 0.4, - bool nonMaxSuppression = true, - float minProbabilityDiff = 0.1); - -/*! - Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm - Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012 - - In the second stage, the ERs that passed the first stage are classified into character - and non-character classes using more informative but also more computationally expensive - features. The classifier uses all the features calculated in the first stage and the following - additional features: hole area ratio, convex hull ratio, and number of outer inflexion points. - - \param cb Callback with the classifier - default classifier can be implicitly load with function loadClassifierNM2() - from file in samples/cpp/trained_classifierNM2.xml - \param minProbability The minimum probability P(er|character) allowed for retreived ER's -*/ -CV_EXPORTS Ptr createERFilterNM2(const Ptr& cb, - float minProbability = 0.3); - - -/*! - Allow to implicitly load the default classifier when creating an ERFilter object. - The function takes as parameter the XML or YAML file with the classifier model - (e.g. trained_classifierNM1.xml) returns a pointer to ERFilter::Callback. -*/ - -CV_EXPORTS Ptr loadClassifierNM1(const std::string& filename); - -/*! - Allow to implicitly load the default classifier when creating an ERFilter object. - The function takes as parameter the XML or YAML file with the classifier model - (e.g. trained_classifierNM1.xml) returns a pointer to ERFilter::Callback. -*/ - -CV_EXPORTS Ptr loadClassifierNM2(const std::string& filename); - - -// computeNMChannels operation modes -enum { ERFILTER_NM_RGBLGrad = 0, - ERFILTER_NM_IHSGrad = 1 - }; - -/*! - Compute the different channels to be processed independently in the N&M algorithm - Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012 - - In N&M algorithm, the combination of intensity (I), hue (H), saturation (S), and gradient - magnitude channels (Grad) are used in order to obtain high localization recall. - This implementation also provides an alternative combination of red (R), green (G), blue (B), - lightness (L), and gradient magnitude (Grad). - - \param _src Source image. Must be RGB CV_8UC3. - \param _channels Output vector where computed channels are stored. - \param _mode Mode of operation. Currently the only available options are - ERFILTER_NM_RGBLGrad (by default) and ERFILTER_NM_IHSGrad. - -*/ -CV_EXPORTS void computeNMChannels(InputArray _src, OutputArrayOfArrays _channels, int _mode = ERFILTER_NM_RGBLGrad); - - -/*! - Find groups of Extremal Regions that are organized as text blocks. This function implements - the grouping algorithm described in: - Gomez L. and Karatzas D.: Multi-script Text Extraction from Natural Scenes, ICDAR 2013. - Notice that this implementation constrains the results to horizontally-aligned text and - latin script (since ERFilter classifiers are trained only for latin script detection). - - The algorithm combines two different clustering techniques in a single parameter-free procedure - to detect groups of regions organized as text. The maximally meaningful groups are fist detected - in several feature spaces, where each feature space is a combination of proximity information - (x,y coordinates) and a similarity measure (intensity, color, size, gradient magnitude, etc.), - thus providing a set of hypotheses of text groups. Evidence Accumulation framework is used to - combine all these hypotheses to get the final estimate. Each of the resulting groups are finally - validated using a classifier in order to assest if they form a valid horizontally-aligned text block. - - \param src Vector of sinle channel images CV_8UC1 from wich the regions were extracted. - \param regions Vector of ER's retreived from the ERFilter algorithm from each channel - \param filename The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml) - \param minProbability The minimum probability for accepting a group - \param groups The output of the algorithm are stored in this parameter as list of rectangles. -*/ -CV_EXPORTS void erGrouping(InputArrayOfArrays src, std::vector > ®ions, - const std::string& filename, float minProbablity, - std::vector &groups); - -} -#endif // _OPENCV_ERFILTER_HPP_ diff --git a/modules/objdetect/include/opencv2/objdetect/linemod.hpp b/modules/objdetect/include/opencv2/objdetect/linemod.hpp deleted file mode 100644 index 46d869926..000000000 --- a/modules/objdetect/include/opencv2/objdetect/linemod.hpp +++ /dev/null @@ -1,455 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Copyright (C) 2013, OpenCV Foundation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __OPENCV_OBJDETECT_LINEMOD_HPP__ -#define __OPENCV_OBJDETECT_LINEMOD_HPP__ - -#include "opencv2/core.hpp" -#include - -/****************************************************************************************\ -* LINE-MOD * -\****************************************************************************************/ - -namespace cv { -namespace linemod { - -/// @todo Convert doxy comments to rst - -/** - * \brief Discriminant feature described by its location and label. - */ -struct CV_EXPORTS Feature -{ - int x; ///< x offset - int y; ///< y offset - int label; ///< Quantization - - Feature() : x(0), y(0), label(0) {} - Feature(int x, int y, int label); - - void read(const FileNode& fn); - void write(FileStorage& fs) const; -}; - -inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {} - -struct CV_EXPORTS Template -{ - int width; - int height; - int pyramid_level; - std::vector features; - - void read(const FileNode& fn); - void write(FileStorage& fs) const; -}; - -/** - * \brief Represents a modality operating over an image pyramid. - */ -class QuantizedPyramid -{ -public: - // Virtual destructor - virtual ~QuantizedPyramid() {} - - /** - * \brief Compute quantized image at current pyramid level for online detection. - * - * \param[out] dst The destination 8-bit image. For each pixel at most one bit is set, - * representing its classification. - */ - virtual void quantize(Mat& dst) const =0; - - /** - * \brief Extract most discriminant features at current pyramid level to form a new template. - * - * \param[out] templ The new template. - */ - virtual bool extractTemplate(Template& templ) const =0; - - /** - * \brief Go to the next pyramid level. - * - * \todo Allow pyramid scale factor other than 2 - */ - virtual void pyrDown() =0; - -protected: - /// Candidate feature with a score - struct Candidate - { - Candidate(int x, int y, int label, float score); - - /// Sort candidates with high score to the front - bool operator<(const Candidate& rhs) const - { - return score > rhs.score; - } - - Feature f; - float score; - }; - - /** - * \brief Choose candidate features so that they are not bunched together. - * - * \param[in] candidates Candidate features sorted by score. - * \param[out] features Destination vector of selected features. - * \param[in] num_features Number of candidates to select. - * \param[in] distance Hint for desired distance between features. - */ - static void selectScatteredFeatures(const std::vector& candidates, - std::vector& features, - size_t num_features, float distance); -}; - -inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {} - -/** - * \brief Interface for modalities that plug into the LINE template matching representation. - * - * \todo Max response, to allow optimization of summing (255/MAX) features as uint8 - */ -class CV_EXPORTS Modality -{ -public: - // Virtual destructor - virtual ~Modality() {} - - /** - * \brief Form a quantized image pyramid from a source image. - * - * \param[in] src The source image. Type depends on the modality. - * \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero - * in quantized image and cannot be extracted as features. - */ - Ptr process(const Mat& src, - const Mat& mask = Mat()) const - { - return processImpl(src, mask); - } - - virtual String name() const =0; - - virtual void read(const FileNode& fn) =0; - virtual void write(FileStorage& fs) const =0; - - /** - * \brief Create modality by name. - * - * The following modality types are supported: - * - "ColorGradient" - * - "DepthNormal" - */ - static Ptr create(const String& modality_type); - - /** - * \brief Load a modality from file. - */ - static Ptr create(const FileNode& fn); - -protected: - // Indirection is because process() has a default parameter. - virtual Ptr processImpl(const Mat& src, - const Mat& mask) const =0; -}; - -/** - * \brief Modality that computes quantized gradient orientations from a color image. - */ -class CV_EXPORTS ColorGradient : public Modality -{ -public: - /** - * \brief Default constructor. Uses reasonable default parameter values. - */ - ColorGradient(); - - /** - * \brief Constructor. - * - * \param weak_threshold When quantizing, discard gradients with magnitude less than this. - * \param num_features How many features a template must contain. - * \param strong_threshold Consider as candidate features only gradients whose norms are - * larger than this. - */ - ColorGradient(float weak_threshold, size_t num_features, float strong_threshold); - - virtual String name() const; - - virtual void read(const FileNode& fn); - virtual void write(FileStorage& fs) const; - - float weak_threshold; - size_t num_features; - float strong_threshold; - -protected: - virtual Ptr processImpl(const Mat& src, - const Mat& mask) const; -}; - -/** - * \brief Modality that computes quantized surface normals from a dense depth map. - */ -class CV_EXPORTS DepthNormal : public Modality -{ -public: - /** - * \brief Default constructor. Uses reasonable default parameter values. - */ - DepthNormal(); - - /** - * \brief Constructor. - * - * \param distance_threshold Ignore pixels beyond this distance. - * \param difference_threshold When computing normals, ignore contributions of pixels whose - * depth difference with the central pixel is above this threshold. - * \param num_features How many features a template must contain. - * \param extract_threshold Consider as candidate feature only if there are no differing - * orientations within a distance of extract_threshold. - */ - DepthNormal(int distance_threshold, int difference_threshold, size_t num_features, - int extract_threshold); - - virtual String name() const; - - virtual void read(const FileNode& fn); - virtual void write(FileStorage& fs) const; - - int distance_threshold; - int difference_threshold; - size_t num_features; - int extract_threshold; - -protected: - virtual Ptr processImpl(const Mat& src, - const Mat& mask) const; -}; - -/** - * \brief Debug function to colormap a quantized image for viewing. - */ -void colormap(const Mat& quantized, Mat& dst); - -/** - * \brief Represents a successful template match. - */ -struct CV_EXPORTS Match -{ - Match() - { - } - - Match(int x, int y, float similarity, const String& class_id, int template_id); - - /// Sort matches with high similarity to the front - bool operator<(const Match& rhs) const - { - // Secondarily sort on template_id for the sake of duplicate removal - if (similarity != rhs.similarity) - return similarity > rhs.similarity; - else - return template_id < rhs.template_id; - } - - bool operator==(const Match& rhs) const - { - return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id; - } - - int x; - int y; - float similarity; - String class_id; - int template_id; -}; - -inline -Match::Match(int _x, int _y, float _similarity, const String& _class_id, int _template_id) - : x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id) -{} - -/** - * \brief Object detector using the LINE template matching algorithm with any set of - * modalities. - */ -class CV_EXPORTS Detector -{ -public: - /** - * \brief Empty constructor, initialize with read(). - */ - Detector(); - - /** - * \brief Constructor. - * - * \param modalities Modalities to use (color gradients, depth normals, ...). - * \param T_pyramid Value of the sampling step T at each pyramid level. The - * number of pyramid levels is T_pyramid.size(). - */ - Detector(const std::vector< Ptr >& modalities, const std::vector& T_pyramid); - - /** - * \brief Detect objects by template matching. - * - * Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid. - * - * \param sources Source images, one for each modality. - * \param threshold Similarity threshold, a percentage between 0 and 100. - * \param[out] matches Template matches, sorted by similarity score. - * \param class_ids If non-empty, only search for the desired object classes. - * \param[out] quantized_images Optionally return vector of quantized images. - * \param masks The masks for consideration during matching. The masks should be CV_8UC1 - * where 255 represents a valid pixel. If non-empty, the vector must be - * the same size as sources. Each element must be - * empty or the same size as its corresponding source. - */ - void match(const std::vector& sources, float threshold, std::vector& matches, - const std::vector& class_ids = std::vector(), - OutputArrayOfArrays quantized_images = noArray(), - const std::vector& masks = std::vector()) const; - - /** - * \brief Add new object template. - * - * \param sources Source images, one for each modality. - * \param class_id Object class ID. - * \param object_mask Mask separating object from background. - * \param[out] bounding_box Optionally return bounding box of the extracted features. - * - * \return Template ID, or -1 if failed to extract a valid template. - */ - int addTemplate(const std::vector& sources, const String& class_id, - const Mat& object_mask, Rect* bounding_box = NULL); - - /** - * \brief Add a new object template computed by external means. - */ - int addSyntheticTemplate(const std::vector