From 1796a26fc74cb28c748c62dc253754ea536304c6 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Mon, 13 Oct 2014 23:01:45 +0400 Subject: [PATCH 01/11] yet another attempt to refactor features2d; the first commit, features2d does not even compile --- .../features2d/include/opencv2/features2d.hpp | 424 +++--------------- modules/features2d/src/blobdetector.cpp | 27 +- modules/features2d/src/brisk.cpp | 73 +++ modules/features2d/src/descriptors.cpp | 110 ----- modules/features2d/src/detectors.cpp | 145 ++---- modules/features2d/src/fast.cpp | 47 +- modules/features2d/src/features2d_init.cpp | 4 + modules/features2d/src/kaze.cpp | 196 +++----- modules/features2d/src/kaze/AKAZEConfig.h | 21 +- modules/features2d/src/kaze/AKAZEFeatures.cpp | 39 +- modules/features2d/src/kaze/KAZEConfig.h | 6 +- modules/features2d/src/kaze/KAZEFeatures.h | 31 +- modules/features2d/src/orb.cpp | 74 ++- 13 files changed, 414 insertions(+), 783 deletions(-) delete mode 100644 modules/features2d/src/descriptors.cpp diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index d54b38ab8..4262f1c56 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -49,8 +49,6 @@ namespace cv { -CV_EXPORTS bool initModule_features2d(void); - // //! writes vector of keypoints to the file storage // CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector& keypoints); // //! reads vector of keypoints from the specified file storage node @@ -94,12 +92,12 @@ public: /************************************ Base Classes ************************************/ /* - * Abstract base class for 2D image feature detectors. + * Abstract base class for 2D image feature detectors and descriptor extractors */ -class CV_EXPORTS_W FeatureDetector : public virtual Algorithm +class CV_EXPORTS_W Feature2D : public virtual Algorithm { public: - virtual ~FeatureDetector(); + virtual ~Feature2D(); /* * Detect keypoints in an image. @@ -108,47 +106,9 @@ public: * mask Mask specifying where to look for keypoints (optional). Must be a char * matrix with non-zero values in the region of interest. */ - CV_WRAP void detect( InputArray image, CV_OUT std::vector& keypoints, InputArray mask=noArray() ) const; - - /* - * Detect keypoints in an image set. - * images Image collection. - * keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i]. - * masks Masks for image set. masks[i] is a mask for images[i]. - */ - void detect( InputArrayOfArrays images, std::vector >& keypoints, InputArrayOfArrays masks=noArray() ) const; - - // Return true if detector object is empty - CV_WRAP virtual bool empty() const; - - // Create feature detector by detector name. - CV_WRAP static Ptr create( const String& detectorType ); - -protected: - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const = 0; - - /* - * Remove keypoints that are not in the mask. - * Helper function, useful when wrapping a library call for keypoint detection that - * does not support a mask argument. - */ - static void removeInvalidPoints( const Mat & mask, std::vector& keypoints ); -}; - - -/* - * Abstract base class for computing descriptors for image keypoints. - * - * In this interface we assume a keypoint descriptor can be represented as a - * dense, fixed-dimensional vector of some basic type. Most descriptors used - * in practice follow this pattern, as it makes it very easy to compute - * distances between descriptors. Therefore we represent a collection of - * descriptors as a Mat, where each row is one keypoint descriptor. - */ -class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm -{ -public: - virtual ~DescriptorExtractor(); + CV_WRAP virtual void detect( InputArray image, + CV_OUT std::vector& keypoints, + InputArray mask=noArray() ); /* * Compute the descriptors for a set of keypoints in an image. @@ -156,62 +116,26 @@ public: * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed. * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i. */ - CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector& keypoints, OutputArray descriptors ) const; + CV_WRAP virtual void compute( InputArray image, + CV_OUT CV_IN_OUT std::vector& keypoints, + OutputArray descriptors ); - /* - * Compute the descriptors for a keypoints collection detected in image collection. - * images Image collection. - * keypoints Input keypoints collection. keypoints[i] is keypoints detected in images[i]. - * Keypoints for which a descriptor cannot be computed are removed. - * descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i]. - */ - void compute( InputArrayOfArrays images, std::vector >& keypoints, OutputArrayOfArrays descriptors ) const; + /* Detects keypoints and computes the descriptors */ + CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, + CV_OUT std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false ); - CV_WRAP virtual int descriptorSize() const = 0; - CV_WRAP virtual int descriptorType() const = 0; - CV_WRAP virtual int defaultNorm() const = 0; + CV_WRAP virtual int descriptorSize() const; + CV_WRAP virtual int descriptorType() const; + CV_WRAP virtual int defaultNorm() const; + // Return true if detector object is empty CV_WRAP virtual bool empty() const; - - CV_WRAP static Ptr create( const String& descriptorExtractorType ); - -protected: - virtual void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const = 0; - - /* - * Remove keypoints within borderPixels of an image edge. - */ - static void removeBorderKeypoints( std::vector& keypoints, - Size imageSize, int borderSize ); }; - - -/* - * Abstract base class for simultaneous 2D feature detection descriptor extraction. - */ -class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor -{ -public: - /* - * Detect keypoints in an image. - * image The image. - * keypoints The detected keypoints. - * mask Mask specifying where to look for keypoints (optional). Must be a char - * matrix with non-zero values in the region of interest. - * useProvidedKeypoints If true, the method will skip the detection phase and will compute - * descriptors for the provided keypoints - */ - CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask, - CV_OUT std::vector& keypoints, - OutputArray descriptors, - bool useProvidedKeypoints=false ) const = 0; - - CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector& keypoints, OutputArray descriptors ) const; - - // Create feature detector and descriptor extractor by name. - CV_WRAP static Ptr create( const String& name ); -}; +typedef Feature2D FeatureDetector; +typedef Feature2D DescriptorExtractor; /*! BRISK implementation @@ -219,94 +143,12 @@ public: class CV_EXPORTS_W BRISK : public Feature2D { public: - CV_WRAP explicit BRISK(int thresh=30, int octaves=3, float patternScale=1.0f); - - virtual ~BRISK(); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - // Compute the BRISK features on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the BRISK features and descriptors on an image - void operator()( InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints=false ) const; - - AlgorithmInfo* info() const; - + CV_WRAP static Ptr create(int thresh=30, int octaves=3, float patternScale=1.0f); // custom setup - CV_WRAP explicit BRISK(std::vector &radiusList, std::vector &numberList, - float dMax=5.85f, float dMin=8.2f, std::vector indexChange=std::vector()); - - // call this to generate the kernel: - // circle of radius r (pixels), with n points; - // short pairings with dMax, long pairings with dMin - CV_WRAP void generateKernel(std::vector &radiusList, - std::vector &numberList, float dMax=5.85f, float dMin=8.2f, - std::vector indexChange=std::vector()); - -protected: - - void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector& keypoints) const; - void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool doDescriptors, bool doOrientation, - bool useProvidedKeypoints) const; - - // Feature parameters - CV_PROP_RW int threshold; - CV_PROP_RW int octaves; - - // some helper structures for the Brisk pattern representation - struct BriskPatternPoint{ - float x; // x coordinate relative to center - float y; // x coordinate relative to center - float sigma; // Gaussian smoothing sigma - }; - struct BriskShortPair{ - unsigned int i; // index of the first pattern point - unsigned int j; // index of other pattern point - }; - struct BriskLongPair{ - unsigned int i; // index of the first pattern point - unsigned int j; // index of other pattern point - int weighted_dx; // 1024.0/dx - int weighted_dy; // 1024.0/dy - }; - inline int smoothedIntensity(const cv::Mat& image, - const cv::Mat& integral,const float key_x, - const float key_y, const unsigned int scale, - const unsigned int rot, const unsigned int point) const; - // pattern properties - BriskPatternPoint* patternPoints_; //[i][rotation][scale] - unsigned int points_; // total number of collocation points - float* scaleList_; // lists the scaling per scale index [scale] - unsigned int* sizeList_; // lists the total pattern size per scale index [scale] - static const unsigned int scales_; // scales discretization - static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted... - static const unsigned int n_rot_; // discretization of the rotation look-up - - // pairs - int strings_; // number of uchars the descriptor consists of - float dMax_; // short pair maximum distance - float dMin_; // long pair maximum distance - BriskShortPair* shortPairs_; // d<_dMax - BriskLongPair* longPairs_; // d>_dMin - unsigned int noShortPairs_; // number of shortParis - unsigned int noLongPairs_; // number of longParis - - // general - static const float basicSize_; + CV_WRAP static Ptr create(const std::vector &radiusList, const std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, const std::vector indexChange=std::vector()); }; - /*! ORB implementation. */ @@ -316,44 +158,10 @@ public: // the size of the signature in bytes enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; - CV_WRAP explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, + CV_WRAP static Ptr create(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold = 20); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - // Compute the ORB features and descriptors on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the ORB features and descriptors on an image - void operator()( InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints=false ) const; - - AlgorithmInfo* info() const; - -protected: - - void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - CV_PROP_RW int nfeatures; - CV_PROP_RW double scaleFactor; - CV_PROP_RW int nlevels; - CV_PROP_RW int edgeThreshold; - CV_PROP_RW int firstLevel; - CV_PROP_RW int WTA_K; - CV_PROP_RW int scoreType; - CV_PROP_RW int patchSize; - CV_PROP_RW int fastThreshold; }; -typedef ORB OrbFeatureDetector; -typedef ORB OrbDescriptorExtractor; - /*! Maximal Stable Extremal Regions class. @@ -363,36 +171,19 @@ typedef ORB OrbDescriptorExtractor; It returns the regions, each of those is encoded as a contour. */ -class CV_EXPORTS_W MSER : public FeatureDetector +class CV_EXPORTS_W MSER : public Feature2D { public: //! the full constructor - CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400, + CV_WRAP static Ptr create( int _delta=5, int _min_area=60, int _max_area=14400, double _max_variation=0.25, double _min_diversity=.2, int _max_evolution=200, double _area_threshold=1.01, double _min_margin=0.003, int _edge_blur_size=5 ); - //! the operator that extracts the MSERs from the image or the specific part of it - CV_WRAP_AS(detect) void operator()( InputArray image, CV_OUT std::vector >& msers, - InputArray mask=noArray() ) const; - AlgorithmInfo* info() const; - -protected: - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - int delta; - int minArea; - int maxArea; - double maxVariation; - double minDiversity; - int maxEvolution; - double areaThreshold; - double minMargin; - int edgeBlurSize; + CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label, + OutputArray stats=noArray() ) const = 0; }; -typedef MSER MserFeatureDetector; - //! detects corners using FAST algorithm by E. Rosten CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, int threshold, bool nonmaxSuppression=true ); @@ -400,48 +191,27 @@ CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, int threshold, bool nonmaxSuppression, int type ); -class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector +class CV_EXPORTS_W FastFeatureDetector : public Feature2D { public: enum Type { - TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 + TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 }; - CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true); - CV_WRAP FastFeatureDetector( int threshold, bool nonmaxSuppression, int type); - AlgorithmInfo* info() const; - -protected: - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - int threshold; - bool nonmaxSuppression; - int type; + CV_WRAP static Ptr create( int threshold=10, bool nonmaxSuppression=true, int type=TYPE_9_16 ); }; -class CV_EXPORTS_W GFTTDetector : public FeatureDetector +class CV_EXPORTS_W GFTTDetector : public Feature2D { public: - CV_WRAP GFTTDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, - int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); - AlgorithmInfo* info() const; - -protected: - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - int nfeatures; - double qualityLevel; - double minDistance; - int blockSize; - bool useHarrisDetector; - double k; + CV_WRAP static Ptr create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, + int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); }; -typedef GFTTDetector GoodFeaturesToTrackDetector; -class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector +class CV_EXPORTS_W SimpleBlobDetector : public Feature2D { public: struct CV_EXPORTS_W_SIMPLE Params @@ -472,81 +242,29 @@ public: void write( FileStorage& fs ) const; }; - CV_WRAP SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); - - virtual void read( const FileNode& fn ); - virtual void write( FileStorage& fs ) const; - -protected: - struct CV_EXPORTS Center - { - Point2d location; - double radius; - double confidence; - }; - - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector
¢ers) const; - - Params params; - AlgorithmInfo* info() const; + CV_WRAP static Ptr + create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); }; -// KAZE/AKAZE diffusivity -enum { - DIFF_PM_G1 = 0, - DIFF_PM_G2 = 1, - DIFF_WEICKERT = 2, - DIFF_CHARBONNIER = 3 -}; - -// AKAZE descriptor type -enum { - DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_KAZE = 3, - DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_MLDB = 5 -}; - /*! KAZE implementation */ class CV_EXPORTS_W KAZE : public Feature2D { public: - CV_WRAP KAZE(); - CV_WRAP explicit KAZE(bool extended, bool upright, float threshold = 0.001f, - int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); + enum + { + DIFF_PM_G1 = 0, + DIFF_PM_G2 = 1, + DIFF_WEICKERT = 2, + DIFF_CHARBONNIER = 3 + }; - virtual ~KAZE(); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - AlgorithmInfo* info() const; - - // Compute the KAZE features on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the KAZE features and descriptors on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints = false) const; - -protected: - void detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const; - void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; - - CV_PROP bool extended; - CV_PROP bool upright; - CV_PROP float threshold; - CV_PROP int octaves; - CV_PROP int sublevels; - CV_PROP int diffusivity; + CV_WRAP static Ptr create(bool extended=false, bool upright=false, + float threshold = 0.001f, + int octaves = 4, int sublevels = 4, + int diffusivity = KAZE::DIFF_PM_G2); }; /*! @@ -555,41 +273,21 @@ AKAZE implementation class CV_EXPORTS_W AKAZE : public Feature2D { public: - CV_WRAP AKAZE(); - CV_WRAP explicit AKAZE(int descriptor_type, int descriptor_size = 0, int descriptor_channels = 3, - float threshold = 0.001f, int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); + // AKAZE descriptor type + enum + { + DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_KAZE = 3, + DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_MLDB = 5 + }; - virtual ~AKAZE(); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - // Compute the AKAZE features on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the AKAZE features and descriptors on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints = false) const; - - AlgorithmInfo* info() const; - -protected: - - void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; - void detectImpl(InputArray image, std::vector& keypoints, InputArray mask = noArray()) const; - - CV_PROP int descriptor; - CV_PROP int descriptor_channels; - CV_PROP int descriptor_size; - CV_PROP float threshold; - CV_PROP int octaves; - CV_PROP int sublevels; - CV_PROP int diffusivity; + CV_WRAP static Ptr create(int descriptor_type=DESCRIPTOR_MLDB, + int descriptor_size = 0, int descriptor_channels = 3, + float threshold = 0.001f, int octaves = 4, + int sublevels = 4, int diffusivity = KAZE::DIFF_PM_G2); }; + /****************************************************************************************\ * Distance * \****************************************************************************************/ diff --git a/modules/features2d/src/blobdetector.cpp b/modules/features2d/src/blobdetector.cpp index 69e058555..b9511e9ac 100644 --- a/modules/features2d/src/blobdetector.cpp +++ b/modules/features2d/src/blobdetector.cpp @@ -55,7 +55,32 @@ # endif #endif -using namespace cv; +namespace cv +{ + +class CV_EXPORTS_W SimpleBlobDetectorImpl : public SimpleBlobDetector +{ +public: + + explicit SimpleBlobDetectorImpl(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + +protected: + struct CV_EXPORTS Center + { + Point2d location; + double radius; + double confidence; + }; + + virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; + virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector
¢ers) const; + + Params params; + AlgorithmInfo* info() const; +}; /* * SimpleBlobDetector diff --git a/modules/features2d/src/brisk.cpp b/modules/features2d/src/brisk.cpp index eb832d640..3163de70e 100644 --- a/modules/features2d/src/brisk.cpp +++ b/modules/features2d/src/brisk.cpp @@ -53,6 +53,79 @@ namespace cv { + +class BRISK_Impl : public BRISK +{ +public: + explicit BRISK_Impl(int thresh=30, int octaves=3, float patternScale=1.0f); + // custom setup + explicit BRISK_Impl(const std::vector &radiusList, const std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, const std::vector indexChange=std::vector()); + + // call this to generate the kernel: + // circle of radius r (pixels), with n points; + // short pairings with dMax, long pairings with dMin + void generateKernel(std::vector &radiusList, + std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + std::vector indexChange=std::vector()); + +protected: + + void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; + void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; + + void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector& keypoints) const; + void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector& keypoints, + OutputArray descriptors, bool doDescriptors, bool doOrientation, + bool useProvidedKeypoints) const; + + // Feature parameters + CV_PROP_RW int threshold; + CV_PROP_RW int octaves; + + // some helper structures for the Brisk pattern representation + struct BriskPatternPoint{ + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + struct BriskShortPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + }; + struct BriskLongPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + int weighted_dx; // 1024.0/dx + int weighted_dy; // 1024.0/dy + }; + inline int smoothedIntensity(const cv::Mat& image, + const cv::Mat& integral,const float key_x, + const float key_y, const unsigned int scale, + const unsigned int rot, const unsigned int point) const; + // pattern properties + BriskPatternPoint* patternPoints_; //[i][rotation][scale] + unsigned int points_; // total number of collocation points + float* scaleList_; // lists the scaling per scale index [scale] + unsigned int* sizeList_; // lists the total pattern size per scale index [scale] + static const unsigned int scales_; // scales discretization + static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted... + static const unsigned int n_rot_; // discretization of the rotation look-up + + // pairs + int strings_; // number of uchars the descriptor consists of + float dMax_; // short pair maximum distance + float dMin_; // long pair maximum distance + BriskShortPair* shortPairs_; // d<_dMax + BriskLongPair* longPairs_; // d>_dMin + unsigned int noShortPairs_; // number of shortParis + unsigned int noLongPairs_; // number of longParis + + // general + static const float basicSize_; +}; + + // a layer in the Brisk detector pyramid class CV_EXPORTS BriskLayer { diff --git a/modules/features2d/src/descriptors.cpp b/modules/features2d/src/descriptors.cpp deleted file mode 100644 index 23d9fbbc9..000000000 --- a/modules/features2d/src/descriptors.cpp +++ /dev/null @@ -1,110 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include - -namespace cv -{ - -/****************************************************************************************\ -* DescriptorExtractor * -\****************************************************************************************/ -/* - * DescriptorExtractor - */ -DescriptorExtractor::~DescriptorExtractor() -{} - -void DescriptorExtractor::compute( InputArray image, std::vector& keypoints, OutputArray descriptors ) const -{ - if( image.empty() || keypoints.empty() ) - { - descriptors.release(); - return; - } - - KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 ); - KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits::epsilon() ); - - computeImpl( image, keypoints, descriptors ); -} - -void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector >& pointCollection, OutputArrayOfArrays _descCollection ) const -{ - std::vector imageCollection, descCollection; - _imageCollection.getMatVector(imageCollection); - _descCollection.getMatVector(descCollection); - CV_Assert( imageCollection.size() == pointCollection.size() ); - descCollection.resize( imageCollection.size() ); - for( size_t i = 0; i < imageCollection.size(); i++ ) - compute( imageCollection[i], pointCollection[i], descCollection[i] ); -} - -/*void DescriptorExtractor::read( const FileNode& ) -{} - -void DescriptorExtractor::write( FileStorage& ) const -{}*/ - -bool DescriptorExtractor::empty() const -{ - return false; -} - -void DescriptorExtractor::removeBorderKeypoints( std::vector& keypoints, - Size imageSize, int borderSize ) -{ - KeyPointsFilter::runByImageBorder( keypoints, imageSize, borderSize ); -} - -Ptr DescriptorExtractor::create(const String& descriptorExtractorType) -{ - return Algorithm::create("Feature2D." + descriptorExtractorType); -} - - -CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector& keypoints, OutputArray descriptors ) const -{ - DescriptorExtractor::compute(image, keypoints, descriptors); -} - -} diff --git a/modules/features2d/src/detectors.cpp b/modules/features2d/src/detectors.cpp index 866d24d10..899e2f274 100644 --- a/modules/features2d/src/detectors.cpp +++ b/modules/features2d/src/detectors.cpp @@ -44,118 +44,65 @@ namespace cv { -/* - * FeatureDetector - */ - -FeatureDetector::~FeatureDetector() -{} - -void FeatureDetector::detect( InputArray image, std::vector& keypoints, InputArray mask ) const +class GFTTDetector_Impl : public GFTTDetector { - keypoints.clear(); - - if( image.empty() ) - return; - - CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()) ); - - detectImpl( image, keypoints, mask ); -} - -void FeatureDetector::detect(InputArrayOfArrays _imageCollection, std::vector >& pointCollection, - InputArrayOfArrays _masks ) const -{ - if (_imageCollection.isUMatVector()) +public: + GFTTDetector_Impl( int _nfeatures, double _qualityLevel, + double _minDistance, int _blockSize, + bool _useHarrisDetector, double _k ) + : nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance), + blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k) { - std::vector uimageCollection, umasks; - _imageCollection.getUMatVector(uimageCollection); - _masks.getUMatVector(umasks); - - pointCollection.resize( uimageCollection.size() ); - for( size_t i = 0; i < uimageCollection.size(); i++ ) - detect( uimageCollection[i], pointCollection[i], umasks.empty() ? noArray() : umasks[i] ); - - return; } - std::vector imageCollection, masks; - _imageCollection.getMatVector(imageCollection); - _masks.getMatVector(masks); - - pointCollection.resize( imageCollection.size() ); - for( size_t i = 0; i < imageCollection.size(); i++ ) - detect( imageCollection[i], pointCollection[i], masks.empty() ? noArray() : masks[i] ); -} - -/*void FeatureDetector::read( const FileNode& ) -{} - -void FeatureDetector::write( FileStorage& ) const -{}*/ - -bool FeatureDetector::empty() const -{ - return false; -} - -void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector& keypoints ) -{ - KeyPointsFilter::runByPixelsMask( keypoints, mask ); -} - -Ptr FeatureDetector::create( const String& detectorType ) -{ - if( detectorType.compare( "HARRIS" ) == 0 ) + void detect( InputArray _image, std::vector& keypoints, InputArray _mask ) { - Ptr fd = FeatureDetector::create("GFTT"); - fd->set("useHarrisDetector", true); - return fd; - } + std::vector corners; - return Algorithm::create("Feature2D." + detectorType); -} + if (_image.isUMat()) + { + UMat ugrayImage; + if( _image.type() != CV_8U ) + cvtColor( _image, ugrayImage, COLOR_BGR2GRAY ); + else + ugrayImage = _image.getUMat(); - -GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel, - double _minDistance, int _blockSize, - bool _useHarrisDetector, double _k ) - : nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance), - blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k) -{ -} - -void GFTTDetector::detectImpl( InputArray _image, std::vector& keypoints, InputArray _mask) const -{ - std::vector corners; - - if (_image.isUMat()) - { - UMat ugrayImage; - if( _image.type() != CV_8U ) - cvtColor( _image, ugrayImage, COLOR_BGR2GRAY ); + goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask, + blockSize, useHarrisDetector, k ); + } else - ugrayImage = _image.getUMat(); + { + Mat image = _image.getMat(), grayImage = image; + if( image.type() != CV_8U ) + cvtColor( image, grayImage, COLOR_BGR2GRAY ); - goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask, - blockSize, useHarrisDetector, k ); - } - else - { - Mat image = _image.getMat(), grayImage = image; - if( image.type() != CV_8U ) - cvtColor( image, grayImage, COLOR_BGR2GRAY ); + goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask, + blockSize, useHarrisDetector, k ); + } + + keypoints.resize(corners.size()); + std::vector::const_iterator corner_it = corners.begin(); + std::vector::iterator keypoint_it = keypoints.begin(); + for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it ) + *keypoint_it = KeyPoint( *corner_it, (float)blockSize ); - goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask, - blockSize, useHarrisDetector, k ); } - keypoints.resize(corners.size()); - std::vector::const_iterator corner_it = corners.begin(); - std::vector::iterator keypoint_it = keypoints.begin(); - for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it ) - *keypoint_it = KeyPoint( *corner_it, (float)blockSize ); + int nfeatures; + double qualityLevel; + double minDistance; + int blockSize; + bool useHarrisDetector; + double k; +}; + +Ptr GFTTDetector::create( int _nfeatures, double _qualityLevel, + double _minDistance, int _blockSize, + bool _useHarrisDetector, double _k ) +{ + return makePtr(_nfeatures, _qualityLevel, + _minDistance, _blockSize, _useHarrisDetector, _k); } } diff --git a/modules/features2d/src/fast.cpp b/modules/features2d/src/fast.cpp index 79b6d6cef..8acdbb1dd 100644 --- a/modules/features2d/src/fast.cpp +++ b/modules/features2d/src/fast.cpp @@ -356,30 +356,39 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool { FAST(_img, keypoints, threshold, nonmax_suppression, FastFeatureDetector::TYPE_9_16); } -/* - * FastFeatureDetector - */ -FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression ) - : threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type(FastFeatureDetector::TYPE_9_16) -{} -FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression, int _type ) -: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type) -{} -void FastFeatureDetector::detectImpl( InputArray _image, std::vector& keypoints, InputArray _mask ) const +class FastFeatureDetector_Impl : public FastFeatureDetector { - Mat mask = _mask.getMat(), grayImage; - UMat ugrayImage; - _InputArray gray = _image; - if( _image.type() != CV_8U ) +public: + FastFeatureDetector_Impl( int _threshold, bool _nonmaxSuppression, int _type ) + : threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type) + {} + + void detect( InputArray _image, std::vector& keypoints, InputArray _mask ) { - _OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage); - cvtColor( _image, ogray, COLOR_BGR2GRAY ); - gray = ogray; + Mat mask = _mask.getMat(), grayImage; + UMat ugrayImage; + _InputArray gray = _image; + if( _image.type() != CV_8U ) + { + _OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage); + cvtColor( _image, ogray, COLOR_BGR2GRAY ); + gray = ogray; + } + FAST( gray, keypoints, threshold, nonmaxSuppression, type ); + KeyPointsFilter::runByPixelsMask( keypoints, mask ); } - FAST( gray, keypoints, threshold, nonmaxSuppression, type ); - KeyPointsFilter::runByPixelsMask( keypoints, mask ); + + int threshold; + bool nonmaxSuppression; + int type; +}; + +Ptr FastFeatureDetector::create( int threshold, bool nonmaxSuppression, int type ) +{ + return makePtr(threshold, nonmaxSuppression, type); } + } diff --git a/modules/features2d/src/features2d_init.cpp b/modules/features2d/src/features2d_init.cpp index 31ca564e4..b23a8899b 100644 --- a/modules/features2d/src/features2d_init.cpp +++ b/modules/features2d/src/features2d_init.cpp @@ -42,6 +42,8 @@ #include "precomp.hpp" +#if 0 + using namespace cv; Ptr Feature2D::create( const String& feature2DType ) @@ -193,3 +195,5 @@ bool cv::initModule_features2d(void) return all; } + +#endif diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index e4ddbe445..d16e988f6 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -52,153 +52,93 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd namespace cv { - KAZE::KAZE() - : extended(false) - , upright(false) - , threshold(0.001f) - , octaves(4) - , sublevels(4) - , diffusivity(DIFF_PM_G2) - { - } - KAZE::KAZE(bool _extended, bool _upright, float _threshold, int _octaves, - int _sublevels, int _diffusivity) + class KAZE_Impl : public KAZE + { + public: + KAZE_Impl(bool _extended, bool _upright, float _threshold, int _octaves, + int _sublevels, int _diffusivity) : extended(_extended) , upright(_upright) , threshold(_threshold) , octaves(_octaves) , sublevels(_sublevels) , diffusivity(_diffusivity) - { - - } - KAZE::~KAZE() - { - - } - - // returns the descriptor size in bytes - int KAZE::descriptorSize() const - { - return extended ? 128 : 64; - } - - // returns the descriptor type - int KAZE::descriptorType() const - { - return CV_32F; - } - - // returns the default norm type - int KAZE::defaultNorm() const - { - return NORM_L2; - } - - void KAZE::operator()(InputArray image, InputArray mask, std::vector& keypoints) const - { - detectImpl(image, keypoints, mask); - } - - void KAZE::operator()(InputArray image, InputArray mask, - std::vector& keypoints, - OutputArray descriptors, - bool useProvidedKeypoints) const - { - cv::Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - cv::Mat& desc = descriptors.getMatRef(); - - KAZEOptions options; - options.img_width = img.cols; - options.img_height = img.rows; - options.extended = extended; - options.upright = upright; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; - - KAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - - if (!useProvidedKeypoints) { - impl.Feature_Detection(keypoints); } - if (!mask.empty()) + virtual ~KAZE_Impl() {} + + // returns the descriptor size in bytes + int descriptorSize() const { - cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + return extended ? 128 : 64; } - impl.Feature_Description(keypoints, desc); - - CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() == descriptorType()))); - } - - void KAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const - { - Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - KAZEOptions options; - options.img_width = img.cols; - options.img_height = img.rows; - options.extended = extended; - options.upright = upright; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; - - KAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - impl.Feature_Detection(keypoints); - - if (!mask.empty()) + // returns the descriptor type + int descriptorType() const { - cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + return CV_32F; } - } - void KAZE::computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const - { - cv::Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); + // returns the default norm type + int defaultNorm() const + { + return NORM_L2; + } - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + void detectAndCompute(InputArray image, InputArray mask, + std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints) + { + cv::Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); - cv::Mat& desc = descriptors.getMatRef(); + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - KAZEOptions options; - options.img_width = img.cols; - options.img_height = img.rows; - options.extended = extended; - options.upright = upright; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; + KAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + options.extended = extended; + options.upright = upright; + options.dthreshold = threshold; + options.omax = octaves; + options.nsublevels = sublevels; + options.diffusivity = diffusivity; + + KAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + + if (!useProvidedKeypoints) + { + impl.Feature_Detection(keypoints); + } + + if (!mask.empty()) + { + cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + } + + if( descriptors.needed() ) + { + Mat& desc = descriptors.getMatRef(); + impl.Feature_Description(keypoints, desc); + + CV_Assert((!desc.rows || desc.cols == descriptorSize())); + CV_Assert((!desc.rows || (desc.type() == descriptorType()))); + } + } + + bool extended; + bool upright; + float threshold; + int octaves; + int sublevels; + int diffusivity; + }; - KAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - impl.Feature_Description(keypoints, desc); - CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() == descriptorType()))); - } } diff --git a/modules/features2d/src/kaze/AKAZEConfig.h b/modules/features2d/src/kaze/AKAZEConfig.h index e2ba51c53..2ea21f370 100644 --- a/modules/features2d/src/kaze/AKAZEConfig.h +++ b/modules/features2d/src/kaze/AKAZEConfig.h @@ -8,23 +8,8 @@ #ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ #define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ -/* ************************************************************************* */ -// OpenCV -#include "../precomp.hpp" -#include - -/* ************************************************************************* */ -/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right -const float gauss25[7][7] = { - { 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f }, - { 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f }, - { 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f }, - { 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f }, - { 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f }, - { 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f }, - { 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f } -}; - +namespace cv +{ /* ************************************************************************* */ /// AKAZE configuration options structure struct AKAZEOptions { @@ -75,4 +60,6 @@ struct AKAZEOptions { int kcontrast_nbins; ///< Number of bins for the contrast factor histogram }; +} + #endif diff --git a/modules/features2d/src/kaze/AKAZEFeatures.cpp b/modules/features2d/src/kaze/AKAZEFeatures.cpp index 72569dad9..59e260fc4 100644 --- a/modules/features2d/src/kaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/kaze/AKAZEFeatures.cpp @@ -6,6 +6,7 @@ * @author Pablo F. Alcantarilla, Jesus Nuevo */ +#include "../precomp.hpp" #include "AKAZEFeatures.h" #include "fed.h" #include "nldiffusion_functions.h" @@ -14,9 +15,9 @@ #include // Namespaces +namespace cv +{ using namespace std; -using namespace cv; -using namespace cv::details::kaze; /* ************************************************************************* */ /** @@ -29,7 +30,7 @@ AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { ncycles_ = 0; reordering_ = true; - if (options_.descriptor_size > 0 && options_.descriptor >= cv::DESCRIPTOR_MLDB_UPRIGHT) { + if (options_.descriptor_size > 0 && options_.descriptor >= AKAZE::DESCRIPTOR_MLDB_UPRIGHT) { generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, options_.descriptor_pattern_size, options_.descriptor_channels); } @@ -264,10 +265,10 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) vector kpts_aux; // Set maximum size - if (options_.descriptor == cv::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_MLDB) { + if (options_.descriptor == AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_MLDB) { smax = 10.0f*sqrtf(2.0f); } - else if (options_.descriptor == cv::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_KAZE) { + else if (options_.descriptor == AKAZE::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_KAZE) { smax = 12.0f*sqrtf(2.0f); } @@ -712,7 +713,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat } // Allocate memory for the matrix with the descriptors - if (options_.descriptor < cv::DESCRIPTOR_MLDB_UPRIGHT) { + if (options_.descriptor < AKAZE::DESCRIPTOR_MLDB_UPRIGHT) { desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); } else { @@ -729,17 +730,17 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat switch (options_.descriptor) { - case cv::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation + case AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; - case cv::DESCRIPTOR_KAZE: + case AKAZE::DESCRIPTOR_KAZE: { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; - case cv::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation + case AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation { if (options_.descriptor_size == 0) cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); @@ -747,7 +748,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); } break; - case cv::DESCRIPTOR_MLDB: + case AKAZE::DESCRIPTOR_MLDB: { if (options_.descriptor_size == 0) cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); @@ -765,7 +766,20 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) { +void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) +{ + /* ************************************************************************* */ + /// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right + static const float gauss25[7][7] = + { + { 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f }, + { 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f }, + { 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f }, + { 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f }, + { 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f }, + { 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f }, + { 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f } + }; int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; @@ -1702,3 +1716,6 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int sampleList = samples.rowRange(0, count).clone(); comparisons = comps.rowRange(0, nbits).clone(); } + +} +} diff --git a/modules/features2d/src/kaze/KAZEConfig.h b/modules/features2d/src/kaze/KAZEConfig.h index 546ee3657..5f2f8dcec 100644 --- a/modules/features2d/src/kaze/KAZEConfig.h +++ b/modules/features2d/src/kaze/KAZEConfig.h @@ -12,12 +12,14 @@ #include "../precomp.hpp" #include +namespace cv +{ //************************************************************************************* struct KAZEOptions { KAZEOptions() - : diffusivity(cv::DIFF_PM_G2) + : diffusivity(KAZE::DIFF_PM_G2) , soffset(1.60f) , omax(4) @@ -49,4 +51,6 @@ struct KAZEOptions { bool extended; }; +} + #endif diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index b62f94831..98c830788 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -17,43 +17,48 @@ #include "fed.h" #include "TEvolution.h" +namespace cv +{ + /* ************************************************************************* */ // KAZE Class Declaration class KAZEFeatures { private: - /// Parameters of the Nonlinear diffusion class - KAZEOptions options_; ///< Configuration options for KAZE - std::vector evolution_; ///< Vector of nonlinear diffusion evolution + /// Parameters of the Nonlinear diffusion class + KAZEOptions options_; ///< Configuration options for KAZE + std::vector evolution_; ///< Vector of nonlinear diffusion evolution - /// Vector of keypoint vectors for finding extrema in multiple threads + /// Vector of keypoint vectors for finding extrema in multiple threads std::vector > kpts_par_; - /// FED parameters - int ncycles_; ///< Number of cycles - bool reordering_; ///< Flag for reordering time steps - std::vector > tsteps_; ///< Vector of FED dynamic time steps - std::vector nsteps_; ///< Vector of number of steps per cycle + /// FED parameters + int ncycles_; ///< Number of cycles + bool reordering_; ///< Flag for reordering time steps + std::vector > tsteps_; ///< Vector of FED dynamic time steps + std::vector nsteps_; ///< Vector of number of steps per cycle public: - /// Constructor + /// Constructor KAZEFeatures(KAZEOptions& options); - /// Public methods for KAZE interface + /// Public methods for KAZE interface void Allocate_Memory_Evolution(void); int Create_Nonlinear_Scale_Space(const cv::Mat& img); void Feature_Detection(std::vector& kpts); void Feature_Description(std::vector& kpts, cv::Mat& desc); static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_, const KAZEOptions& options); - /// Feature Detection Methods + /// Feature Detection Methods void Compute_KContrast(const cv::Mat& img, const float& kper); void Compute_Multiscale_Derivatives(void); void Compute_Detector_Response(void); - void Determinant_Hessian(std::vector& kpts); + void Determinant_Hessian(std::vector& kpts); void Do_Subpixel_Refinement(std::vector& kpts); }; +} + #endif diff --git a/modules/features2d/src/orb.cpp b/modules/features2d/src/orb.cpp index 4e74adc9e..af91fb6bb 100644 --- a/modules/features2d/src/orb.cpp +++ b/modules/features2d/src/orb.cpp @@ -645,38 +645,70 @@ static inline float getScale(int level, int firstLevel, double scaleFactor) return (float)std::pow(scaleFactor, (double)(level - firstLevel)); } -/** Constructor - * @param detector_params parameters to use - */ -ORB::ORB(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold, - int _firstLevel, int _WTA_K, int _scoreType, int _patchSize, int _fastThreshold) : - nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels), - edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K), - scoreType(_scoreType), patchSize(_patchSize), fastThreshold(_fastThreshold) -{} +class ORB_Impl : public ORB +{ +public: + explicit ORB_Impl(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold, + int _firstLevel, int _WTA_K, int _scoreType, int _patchSize, int _fastThreshold) : + nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels), + edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K), + scoreType(_scoreType), patchSize(_patchSize), fastThreshold(_fastThreshold) + {} -int ORB::descriptorSize() const + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + // returns the default norm type + int defaultNorm() const; + + // Compute the ORB_Impl features and descriptors on an image + void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; + + // Compute the ORB_Impl features and descriptors on an image + void operator()( InputArray image, InputArray mask, std::vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + +protected: + + void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; + void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; + + int nfeatures; + double scaleFactor; + int nlevels; + int edgeThreshold; + int firstLevel; + int WTA_K; + int scoreType; + int patchSize; + int fastThreshold; +}; + +int ORB_Impl::descriptorSize() const { return kBytes; } -int ORB::descriptorType() const +int ORB_Impl::descriptorType() const { return CV_8U; } -int ORB::defaultNorm() const +int ORB_Impl::defaultNorm() const { return NORM_HAMMING; } -/** Compute the ORB features and descriptors on an image +/** Compute the ORB_Impl features and descriptors on an image * @param img the image to compute the features and descriptors on * @param mask the mask to apply * @param keypoints the resulting keypoints */ -void ORB::operator()(InputArray image, InputArray mask, std::vector& keypoints) const +void ORB_Impl::operator()(InputArray image, InputArray mask, std::vector& keypoints) const { (*this)(image, mask, keypoints, noArray(), false); } @@ -716,7 +748,7 @@ static void uploadORBKeypoints(const std::vector& src, } -/** Compute the ORB keypoints on an image +/** Compute the ORB_Impl keypoints on an image * @param image_pyramid the image pyramid to compute the features and descriptors on * @param mask_pyramid the masks to apply at every level * @param keypoints the resulting keypoints, clustered per level @@ -788,7 +820,7 @@ static void computeKeyPoints(const Mat& imagePyramid, KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold); // Keep more points than necessary as FAST does not give amazing corners - KeyPointsFilter::retainBest(keypoints, scoreType == ORB::HARRIS_SCORE ? 2 * featuresNum : featuresNum); + KeyPointsFilter::retainBest(keypoints, scoreType == ORB_Impl::HARRIS_SCORE ? 2 * featuresNum : featuresNum); nkeypoints = (int)keypoints.size(); counters[level] = nkeypoints; @@ -814,7 +846,7 @@ static void computeKeyPoints(const Mat& imagePyramid, UMat ukeypoints, uresponses(1, nkeypoints, CV_32F); // Select best features using the Harris cornerness (better scoring than FAST) - if( scoreType == ORB::HARRIS_SCORE ) + if( scoreType == ORB_Impl::HARRIS_SCORE ) { if( useOCL ) { @@ -886,7 +918,7 @@ static void computeKeyPoints(const Mat& imagePyramid, } -/** Compute the ORB features and descriptors on an image +/** Compute the ORB_Impl features and descriptors on an image * @param img the image to compute the features and descriptors on * @param mask the mask to apply * @param keypoints the resulting keypoints @@ -894,7 +926,7 @@ static void computeKeyPoints(const Mat& imagePyramid, * @param do_keypoints if true, the keypoints are computed, otherwise used as an input * @param do_descriptors if true, also computes the descriptors */ -void ORB::operator()( InputArray _image, InputArray _mask, std::vector& keypoints, +void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector& keypoints, OutputArray _descriptors, bool useProvidedKeypoints ) const { CV_Assert(patchSize >= 2); @@ -1121,12 +1153,12 @@ void ORB::operator()( InputArray _image, InputArray _mask, std::vector } } -void ORB::detectImpl( InputArray image, std::vector& keypoints, InputArray mask) const +void ORB_Impl::detectImpl( InputArray image, std::vector& keypoints, InputArray mask) const { (*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false); } -void ORB::computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors) const +void ORB_Impl::computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors) const { (*this)(image, Mat(), keypoints, descriptors, true); } From 5e667ee53ab2bb2051cea3fe39a843f2805dc964 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Wed, 15 Oct 2014 22:49:17 +0400 Subject: [PATCH 02/11] OpenCV with the refactored features2d compiles! contrib is broken for now; the tests are not tried yet --- .../features2d/include/opencv2/features2d.hpp | 30 +- modules/features2d/perf/opencl/perf_orb.cpp | 14 +- modules/features2d/perf/perf_orb.cpp | 14 +- modules/features2d/src/akaze.cpp | 281 ++- modules/features2d/src/blobdetector.cpp | 20 +- modules/features2d/src/brisk.cpp | 129 +- modules/features2d/src/feature2d.cpp | 159 ++ modules/features2d/src/features2d_init.cpp | 199 -- .../src/{detectors.cpp => gftt.cpp} | 0 modules/features2d/src/kaze.cpp | 8 +- modules/features2d/src/kaze/AKAZEConfig.h | 4 +- modules/features2d/src/kaze/AKAZEFeatures.cpp | 203 +- modules/features2d/src/kaze/AKAZEFeatures.h | 8 +- modules/features2d/src/kaze/KAZEFeatures.cpp | 216 +- modules/features2d/src/kaze/KAZEFeatures.h | 4 +- modules/features2d/src/kaze/TEvolution.h | 19 +- .../src/kaze/nldiffusion_functions.cpp | 976 +++++---- .../src/kaze/nldiffusion_functions.h | 48 +- modules/features2d/src/mser.cpp | 1803 +++++++---------- modules/features2d/src/orb.cpp | 46 +- .../test/test_descriptors_regression.cpp | 21 +- .../test/test_detectors_regression.cpp | 16 +- modules/features2d/test/test_keypoints.cpp | 19 +- modules/features2d/test/test_mser.cpp | 4 + modules/features2d/test/test_orb.cpp | 4 +- modules/stitching/src/matchers.cpp | 8 +- modules/videostab/src/global_motion.cpp | 2 +- 27 files changed, 1939 insertions(+), 2316 deletions(-) create mode 100644 modules/features2d/src/feature2d.cpp delete mode 100644 modules/features2d/src/features2d_init.cpp rename modules/features2d/src/{detectors.cpp => gftt.cpp} (100%) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 4262f1c56..e9222918b 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -110,6 +110,10 @@ public: CV_OUT std::vector& keypoints, InputArray mask=noArray() ); + virtual void detect( InputArrayOfArrays images, + std::vector >& keypoints, + InputArrayOfArrays masks=noArray() ); + /* * Compute the descriptors for a set of keypoints in an image. * image The image. @@ -120,6 +124,10 @@ public: CV_OUT CV_IN_OUT std::vector& keypoints, OutputArray descriptors ); + virtual void compute( InputArrayOfArrays images, + std::vector >& keypoints, + OutputArrayOfArrays descriptors ); + /* Detects keypoints and computes the descriptors */ CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, CV_OUT std::vector& keypoints, @@ -146,7 +154,7 @@ public: CV_WRAP static Ptr create(int thresh=30, int octaves=3, float patternScale=1.0f); // custom setup CV_WRAP static Ptr create(const std::vector &radiusList, const std::vector &numberList, - float dMax=5.85f, float dMin=8.2f, const std::vector indexChange=std::vector()); + float dMax=5.85f, float dMin=8.2f, const std::vector& indexChange=std::vector()); }; /*! @@ -174,6 +182,13 @@ public: class CV_EXPORTS_W MSER : public Feature2D { public: + enum + { + DELTA=10000, MIN_AREA=10001, MAX_AREA=10002, PASS2_ONLY=10003, + MAX_EVOLUTION=10004, AREA_THRESHOLD=10005, + MIN_MARGIN=10006, EDGE_BLUR_SIZE=10007 + }; + //! the full constructor CV_WRAP static Ptr create( int _delta=5, int _min_area=60, int _max_area=14400, double _max_variation=0.25, double _min_diversity=.2, @@ -181,7 +196,7 @@ public: double _min_margin=0.003, int _edge_blur_size=5 ); CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label, - OutputArray stats=noArray() ) const = 0; + OutputArray stats=noArray() ) = 0; }; //! detects corners using FAST algorithm by E. Rosten @@ -199,13 +214,16 @@ public: TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 }; - CV_WRAP static Ptr create( int threshold=10, bool nonmaxSuppression=true, int type=TYPE_9_16 ); + CV_WRAP static Ptr create( int threshold=10, + bool nonmaxSuppression=true, + int type=FastFeatureDetector::TYPE_9_16 ); }; class CV_EXPORTS_W GFTTDetector : public Feature2D { public: + enum { USE_HARRIS_DETECTOR=10000 }; CV_WRAP static Ptr create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); }; @@ -282,7 +300,7 @@ public: DESCRIPTOR_MLDB = 5 }; - CV_WRAP static Ptr create(int descriptor_type=DESCRIPTOR_MLDB, + CV_WRAP static Ptr create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB, int descriptor_size = 0, int descriptor_channels = 3, float threshold = 0.001f, int octaves = 4, int sublevels = 4, int diffusivity = KAZE::DIFF_PM_G2); @@ -535,8 +553,6 @@ public: virtual bool isMaskSupported() const { return true; } virtual Ptr clone( bool emptyTrainData=false ) const; - - AlgorithmInfo* info() const; protected: virtual void knnMatchImpl( InputArray queryDescriptors, std::vector >& matches, int k, InputArrayOfArrays masks=noArray(), bool compactResult=false ); @@ -569,8 +585,6 @@ public: virtual bool isMaskSupported() const; virtual Ptr clone( bool emptyTrainData=false ) const; - - AlgorithmInfo* info() const; protected: static void convertToDMatches( const DescriptorCollection& descriptors, const Mat& indices, const Mat& distances, diff --git a/modules/features2d/perf/opencl/perf_orb.cpp b/modules/features2d/perf/opencl/perf_orb.cpp index c551dee88..e9aadf50f 100644 --- a/modules/features2d/perf/opencl/perf_orb.cpp +++ b/modules/features2d/perf/opencl/perf_orb.cpp @@ -22,10 +22,10 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES) mframe.copyTo(frame); declare.in(frame); - ORB detector(1500, 1.3f, 1); + Ptr detector = ORB::create(1500, 1.3f, 1); vector points; - OCL_TEST_CYCLE() detector(frame, mask, points); + OCL_TEST_CYCLE() detector->detect(frame, points, mask); std::sort(points.begin(), points.end(), comparators::KeypointGreater()); SANITY_CHECK_KEYPOINTS(points, 1e-5); @@ -44,14 +44,14 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES) declare.in(frame); - ORB detector(1500, 1.3f, 1); + Ptr detector = ORB::create(1500, 1.3f, 1); vector points; - detector(frame, mask, points); + detector->detect(frame, points, mask); std::sort(points.begin(), points.end(), comparators::KeypointGreater()); UMat descriptors; - OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, true); + OCL_TEST_CYCLE() detector->compute(frame, points, descriptors); SANITY_CHECK(descriptors); } @@ -68,12 +68,12 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES) mframe.copyTo(frame); declare.in(frame); - ORB detector(1500, 1.3f, 1); + Ptr detector = ORB::create(1500, 1.3f, 1); vector points; UMat descriptors; - OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, false); + OCL_TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false); ::perf::sort(points, descriptors); SANITY_CHECK_KEYPOINTS(points, 1e-5); diff --git a/modules/features2d/perf/perf_orb.cpp b/modules/features2d/perf/perf_orb.cpp index 1c181c45e..96cfc3eb3 100644 --- a/modules/features2d/perf/perf_orb.cpp +++ b/modules/features2d/perf/perf_orb.cpp @@ -22,10 +22,10 @@ PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES)) Mat mask; declare.in(frame); - ORB detector(1500, 1.3f, 1); + Ptr detector = ORB::create(1500, 1.3f, 1); vector points; - TEST_CYCLE() detector(frame, mask, points); + TEST_CYCLE() detector->detect(frame, points, mask); sort(points.begin(), points.end(), comparators::KeypointGreater()); SANITY_CHECK_KEYPOINTS(points, 1e-5); @@ -42,14 +42,14 @@ PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES)) Mat mask; declare.in(frame); - ORB detector(1500, 1.3f, 1); + Ptr detector = ORB::create(1500, 1.3f, 1); vector points; - detector(frame, mask, points); + detector->detect(frame, points, mask); sort(points.begin(), points.end(), comparators::KeypointGreater()); Mat descriptors; - TEST_CYCLE() detector(frame, mask, points, descriptors, true); + TEST_CYCLE() detector->compute(frame, points, descriptors); SANITY_CHECK(descriptors); } @@ -64,12 +64,12 @@ PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES)) Mat mask; declare.in(frame); - ORB detector(1500, 1.3f, 1); + Ptr detector = ORB::create(1500, 1.3f, 1); vector points; Mat descriptors; - TEST_CYCLE() detector(frame, mask, points, descriptors, false); + TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false); perf::sort(points, descriptors); SANITY_CHECK_KEYPOINTS(points, 1e-5); diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index d875b4644..b72b9402a 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -52,22 +52,15 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd #include "kaze/AKAZEFeatures.h" #include -using namespace std; namespace cv { - AKAZE::AKAZE() - : descriptor(DESCRIPTOR_MLDB) - , descriptor_channels(3) - , descriptor_size(0) - , threshold(0.001f) - , octaves(4) - , sublevels(4) - , diffusivity(DIFF_PM_G2) - { - } + using namespace std; - AKAZE::AKAZE(int _descriptor_type, int _descriptor_size, int _descriptor_channels, + class AKAZE_Impl : public AKAZE + { + public: + AKAZE_Impl(int _descriptor_type, int _descriptor_size, int _descriptor_channels, float _threshold, int _octaves, int _sublevels, int _diffusivity) : descriptor(_descriptor_type) , descriptor_channels(_descriptor_channels) @@ -76,181 +69,139 @@ namespace cv , octaves(_octaves) , sublevels(_sublevels) , diffusivity(_diffusivity) - { - - } - - AKAZE::~AKAZE() - { - - } - - // returns the descriptor size in bytes - int AKAZE::descriptorSize() const - { - switch (descriptor) { - case cv::DESCRIPTOR_KAZE: - case cv::DESCRIPTOR_KAZE_UPRIGHT: - return 64; - - case cv::DESCRIPTOR_MLDB: - case cv::DESCRIPTOR_MLDB_UPRIGHT: - // We use the full length binary descriptor -> 486 bits - if (descriptor_size == 0) - { - int t = (6 + 36 + 120) * descriptor_channels; - return (int)ceil(t / 8.); - } - else - { - // We use the random bit selection length binary descriptor - return (int)ceil(descriptor_size / 8.); - } - - default: - return -1; } - } - // returns the descriptor type - int AKAZE::descriptorType() const - { - switch (descriptor) + virtual ~AKAZE_Impl() { - case cv::DESCRIPTOR_KAZE: - case cv::DESCRIPTOR_KAZE_UPRIGHT: - return CV_32F; - case cv::DESCRIPTOR_MLDB: - case cv::DESCRIPTOR_MLDB_UPRIGHT: - return CV_8U; + } + + // returns the descriptor size in bytes + int descriptorSize() const + { + switch (descriptor) + { + case DESCRIPTOR_KAZE: + case DESCRIPTOR_KAZE_UPRIGHT: + return 64; + + case DESCRIPTOR_MLDB: + case DESCRIPTOR_MLDB_UPRIGHT: + // We use the full length binary descriptor -> 486 bits + if (descriptor_size == 0) + { + int t = (6 + 36 + 120) * descriptor_channels; + return (int)ceil(t / 8.); + } + else + { + // We use the random bit selection length binary descriptor + return (int)ceil(descriptor_size / 8.); + } default: return -1; + } } - } - // returns the default norm type - int AKAZE::defaultNorm() const - { - switch (descriptor) + // returns the descriptor type + int descriptorType() const { - case cv::DESCRIPTOR_KAZE: - case cv::DESCRIPTOR_KAZE_UPRIGHT: - return cv::NORM_L2; + switch (descriptor) + { + case DESCRIPTOR_KAZE: + case DESCRIPTOR_KAZE_UPRIGHT: + return CV_32F; - case cv::DESCRIPTOR_MLDB: - case cv::DESCRIPTOR_MLDB_UPRIGHT: - return cv::NORM_HAMMING; + case DESCRIPTOR_MLDB: + case DESCRIPTOR_MLDB_UPRIGHT: + return CV_8U; - default: - return -1; + default: + return -1; + } } - } - - void AKAZE::operator()(InputArray image, InputArray mask, - std::vector& keypoints, - OutputArray descriptors, - bool useProvidedKeypoints) const - { - cv::Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - cv::Mat& desc = descriptors.getMatRef(); - - AKAZEOptions options; - options.descriptor = descriptor; - options.descriptor_channels = descriptor_channels; - options.descriptor_size = descriptor_size; - options.img_width = img.cols; - options.img_height = img.rows; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; - - AKAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - - if (!useProvidedKeypoints) + // returns the default norm type + int defaultNorm() const { - impl.Feature_Detection(keypoints); + switch (descriptor) + { + case DESCRIPTOR_KAZE: + case DESCRIPTOR_KAZE_UPRIGHT: + return NORM_L2; + + case DESCRIPTOR_MLDB: + case DESCRIPTOR_MLDB_UPRIGHT: + return NORM_HAMMING; + + default: + return -1; + } } - if (!mask.empty()) + void detectAndCompute(InputArray image, InputArray mask, + std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints) { - cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + AKAZEOptions options; + options.descriptor = descriptor; + options.descriptor_channels = descriptor_channels; + options.descriptor_size = descriptor_size; + options.img_width = img.cols; + options.img_height = img.rows; + options.dthreshold = threshold; + options.omax = octaves; + options.nsublevels = sublevels; + options.diffusivity = diffusivity; + + AKAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + + if (!useProvidedKeypoints) + { + impl.Feature_Detection(keypoints); + } + + if (!mask.empty()) + { + KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + } + + if( descriptors.needed() ) + { + Mat& desc = descriptors.getMatRef(); + impl.Compute_Descriptors(keypoints, desc); + + CV_Assert((!desc.rows || desc.cols == descriptorSize())); + CV_Assert((!desc.rows || (desc.type() == descriptorType()))); + } } - impl.Compute_Descriptors(keypoints, desc); + int descriptor; + int descriptor_channels; + int descriptor_size; + float threshold; + int octaves; + int sublevels; + int diffusivity; + }; - CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() == descriptorType()))); - } - - void AKAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const + Ptr AKAZE::create(int descriptor_type, + int descriptor_size, int descriptor_channels, + float threshold, int octaves, + int sublevels, int diffusivity) { - cv::Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - AKAZEOptions options; - options.descriptor = descriptor; - options.descriptor_channels = descriptor_channels; - options.descriptor_size = descriptor_size; - options.img_width = img.cols; - options.img_height = img.rows; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; - - AKAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - impl.Feature_Detection(keypoints); - - if (!mask.empty()) - { - cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); - } - } - - void AKAZE::computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const - { - cv::Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - cv::Mat& desc = descriptors.getMatRef(); - - AKAZEOptions options; - options.descriptor = descriptor; - options.descriptor_channels = descriptor_channels; - options.descriptor_size = descriptor_size; - options.img_width = img.cols; - options.img_height = img.rows; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; - - AKAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - impl.Compute_Descriptors(keypoints, desc); - - CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() == descriptorType()))); + return makePtr(descriptor_type, descriptor_size, descriptor_channels, + threshold, octaves, sublevels, diffusivity); } } diff --git a/modules/features2d/src/blobdetector.cpp b/modules/features2d/src/blobdetector.cpp index b9511e9ac..daf1d3644 100644 --- a/modules/features2d/src/blobdetector.cpp +++ b/modules/features2d/src/blobdetector.cpp @@ -75,11 +75,10 @@ protected: double confidence; }; - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; + virtual void detect( InputArray image, std::vector& keypoints, InputArray mask=noArray() ); virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector
¢ers) const; Params params; - AlgorithmInfo* info() const; }; /* @@ -173,22 +172,22 @@ void SimpleBlobDetector::Params::write(cv::FileStorage& fs) const fs << "maxConvexity" << maxConvexity; } -SimpleBlobDetector::SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters) : +SimpleBlobDetectorImpl::SimpleBlobDetectorImpl(const SimpleBlobDetector::Params ¶meters) : params(parameters) { } -void SimpleBlobDetector::read( const cv::FileNode& fn ) +void SimpleBlobDetectorImpl::read( const cv::FileNode& fn ) { params.read(fn); } -void SimpleBlobDetector::write( cv::FileStorage& fs ) const +void SimpleBlobDetectorImpl::write( cv::FileStorage& fs ) const { params.write(fs); } -void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, std::vector
¢ers) const +void SimpleBlobDetectorImpl::findBlobs(InputArray _image, InputArray _binaryImage, std::vector
¢ers) const { Mat image = _image.getMat(), binaryImage = _binaryImage.getMat(); (void)image; @@ -302,7 +301,7 @@ void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, s #endif } -void SimpleBlobDetector::detectImpl(InputArray image, std::vector& keypoints, InputArray) const +void SimpleBlobDetectorImpl::detect(InputArray image, std::vector& keypoints, InputArray) { //TODO: support mask keypoints.clear(); @@ -365,3 +364,10 @@ void SimpleBlobDetector::detectImpl(InputArray image, std::vector& keypoints.push_back(kpt); } } + +Ptr SimpleBlobDetector::create(const SimpleBlobDetector::Params& params) +{ + return makePtr(params); +} + +} diff --git a/modules/features2d/src/brisk.cpp b/modules/features2d/src/brisk.cpp index 3163de70e..1facb3eac 100644 --- a/modules/features2d/src/brisk.cpp +++ b/modules/features2d/src/brisk.cpp @@ -42,9 +42,7 @@ the IEEE International Conference on Computer Vision (ICCV2011). */ -#include -#include -#include +#include "precomp.hpp" #include #include @@ -53,7 +51,6 @@ namespace cv { - class BRISK_Impl : public BRISK { public: @@ -62,18 +59,37 @@ public: explicit BRISK_Impl(const std::vector &radiusList, const std::vector &numberList, float dMax=5.85f, float dMin=8.2f, const std::vector indexChange=std::vector()); + virtual ~BRISK_Impl(); + + int descriptorSize() const + { + return strings_; + } + + int descriptorType() const + { + return CV_8U; + } + + int defaultNorm() const + { + return NORM_HAMMING; + } + // call this to generate the kernel: // circle of radius r (pixels), with n points; // short pairings with dMax, long pairings with dMin - void generateKernel(std::vector &radiusList, - std::vector &numberList, float dMax=5.85f, float dMin=8.2f, - std::vector indexChange=std::vector()); + void generateKernel(const std::vector &radiusList, + const std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + const std::vector &indexChange=std::vector()); + + void detectAndCompute( InputArray image, InputArray mask, + CV_OUT std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints ); protected: - void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector& keypoints) const; void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector& keypoints, OutputArray descriptors, bool doDescriptors, bool doOrientation, @@ -256,16 +272,16 @@ protected: static const float basicSize_; }; -const float BRISK::basicSize_ = 12.0f; -const unsigned int BRISK::scales_ = 64; -const float BRISK::scalerange_ = 30.f; // 40->4 Octaves - else, this needs to be adjusted... -const unsigned int BRISK::n_rot_ = 1024; // discretization of the rotation look-up +const float BRISK_Impl::basicSize_ = 12.0f; +const unsigned int BRISK_Impl::scales_ = 64; +const float BRISK_Impl::scalerange_ = 30.f; // 40->4 Octaves - else, this needs to be adjusted... +const unsigned int BRISK_Impl::n_rot_ = 1024; // discretization of the rotation look-up const float BriskScaleSpace::safetyFactor_ = 1.0f; const float BriskScaleSpace::basicSize_ = 12.0f; // constructors -BRISK::BRISK(int thresh, int octaves_in, float patternScale) +BRISK_Impl::BRISK_Impl(int thresh, int octaves_in, float patternScale) { threshold = thresh; octaves = octaves_in; @@ -291,10 +307,12 @@ BRISK::BRISK(int thresh, int octaves_in, float patternScale) nList[4] = 20; generateKernel(rList, nList, (float)(5.85 * patternScale), (float)(8.2 * patternScale)); - } -BRISK::BRISK(std::vector &radiusList, std::vector &numberList, float dMax, float dMin, - std::vector indexChange) + +BRISK_Impl::BRISK_Impl(const std::vector &radiusList, + const std::vector &numberList, + float dMax, float dMin, + const std::vector indexChange) { generateKernel(radiusList, numberList, dMax, dMin, indexChange); threshold = 20; @@ -302,10 +320,12 @@ BRISK::BRISK(std::vector &radiusList, std::vector &numberList, float } void -BRISK::generateKernel(std::vector &radiusList, std::vector &numberList, float dMax, - float dMin, std::vector indexChange) +BRISK_Impl::generateKernel(const std::vector &radiusList, + const std::vector &numberList, + float dMax, float dMin, + const std::vector& _indexChange) { - + std::vector indexChange = _indexChange; dMax_ = dMax; dMin_ = dMin; @@ -427,7 +447,7 @@ BRISK::generateKernel(std::vector &radiusList, std::vector &numberLi // simple alternative: inline int -BRISK::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const float key_x, +BRISK_Impl::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const float key_x, const float key_y, const unsigned int scale, const unsigned int rot, const unsigned int point) const { @@ -594,8 +614,8 @@ RoiPredicate(const float minX, const float minY, const float maxX, const float m // computes the descriptor void -BRISK::operator()( InputArray _image, InputArray _mask, std::vector& keypoints, - OutputArray _descriptors, bool useProvidedKeypoints) const +BRISK_Impl::detectAndCompute( InputArray _image, InputArray _mask, std::vector& keypoints, + OutputArray _descriptors, bool useProvidedKeypoints) { bool doOrientation=true; if (useProvidedKeypoints) @@ -609,7 +629,7 @@ BRISK::operator()( InputArray _image, InputArray _mask, std::vector& k } void -BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, std::vector& keypoints, +BRISK_Impl::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, std::vector& keypoints, OutputArray _descriptors, bool doDescriptors, bool doOrientation, bool useProvidedKeypoints) const { @@ -775,25 +795,8 @@ BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, s delete[] _values; } -int -BRISK::descriptorSize() const -{ - return strings_; -} -int -BRISK::descriptorType() const -{ - return CV_8U; -} - -int -BRISK::defaultNorm() const -{ - return NORM_HAMMING; -} - -BRISK::~BRISK() +BRISK_Impl::~BRISK_Impl() { delete[] patternPoints_; delete[] shortPairs_; @@ -803,14 +806,7 @@ BRISK::~BRISK() } void -BRISK::operator()(InputArray image, InputArray mask, std::vector& keypoints) const -{ - computeKeypointsNoOrientation(image, mask, keypoints); - computeDescriptorsAndOrOrientation(image, mask, keypoints, cv::noArray(), false, true, true); -} - -void -BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::vector& keypoints) const +BRISK_Impl::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::vector& keypoints) const { Mat image = _image.getMat(), mask = _mask.getMat(); if( image.type() != CV_8UC1 ) @@ -821,20 +817,7 @@ BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::v briskScaleSpace.getKeypoints(threshold, keypoints); // remove invalid points - removeInvalidPoints(mask, keypoints); -} - - -void -BRISK::detectImpl( InputArray image, std::vector& keypoints, InputArray mask) const -{ - (*this)(image.getMat(), mask.getMat(), keypoints); -} - -void - BRISK::computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors) const -{ - (*this)(image, Mat(), keypoints, descriptors, true); + KeyPointsFilter::runByPixelsMask(keypoints, mask); } // construct telling the octaves number: @@ -2084,7 +2067,7 @@ BriskLayer::BriskLayer(const cv::Mat& img_in, float scale_in, float offset_in) scale_ = scale_in; offset_ = offset_in; // create an agast detector - fast_9_16_ = makePtr(1, true, FastFeatureDetector::TYPE_9_16); + fast_9_16_ = FastFeatureDetector::create(1, true, FastFeatureDetector::TYPE_9_16); makeOffsets(pixel_5_8_, (int)img_.step, 8); makeOffsets(pixel_9_16_, (int)img_.step, 16); } @@ -2106,7 +2089,7 @@ BriskLayer::BriskLayer(const BriskLayer& layer, int mode) offset_ = 0.5f * scale_ - 0.5f; } scores_ = cv::Mat::zeros(img_.rows, img_.cols, CV_8U); - fast_9_16_ = makePtr(1, false, FastFeatureDetector::TYPE_9_16); + fast_9_16_ = FastFeatureDetector::create(1, false, FastFeatureDetector::TYPE_9_16); makeOffsets(pixel_5_8_, (int)img_.step, 8); makeOffsets(pixel_9_16_, (int)img_.step, 16); } @@ -2318,4 +2301,16 @@ BriskLayer::twothirdsample(const cv::Mat& srcimg, cv::Mat& dstimg) resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA); } +Ptr BRISK::create(int thresh, int octaves, float patternScale) +{ + return makePtr(thresh, octaves, patternScale); +} + +// custom setup +Ptr BRISK::create(const std::vector &radiusList, const std::vector &numberList, + float dMax, float dMin, const std::vector& indexChange) +{ + return makePtr(radiusList, numberList, dMax, dMin, indexChange); +} + } diff --git a/modules/features2d/src/feature2d.cpp b/modules/features2d/src/feature2d.cpp new file mode 100644 index 000000000..039b33d35 --- /dev/null +++ b/modules/features2d/src/feature2d.cpp @@ -0,0 +1,159 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +using std::vector; + +Feature2D::~Feature2D() {} + +/* + * Detect keypoints in an image. + * image The image. + * keypoints The detected keypoints. + * mask Mask specifying where to look for keypoints (optional). Must be a char + * matrix with non-zero values in the region of interest. + */ +void Feature2D::detect( InputArray image, + std::vector& keypoints, + InputArray mask ) +{ + detectAndCompute(image, mask, keypoints, noArray(), false); +} + + +void Feature2D::detect( InputArrayOfArrays _images, + std::vector >& keypoints, + InputArrayOfArrays _masks ) +{ + vector images, masks; + + _images.getMatVector(images); + size_t i, nimages = images.size(); + + if( !_masks.empty() ) + { + _masks.getMatVector(masks); + CV_Assert(masks.size() == nimages); + } + + keypoints.resize(nimages); + + for( i = 0; i < nimages; i++ ) + { + detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] ); + } +} + +/* + * Compute the descriptors for a set of keypoints in an image. + * image The image. + * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed. + * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i. + */ +void Feature2D::compute( InputArray image, + std::vector& keypoints, + OutputArray descriptors ) +{ + detectAndCompute(image, noArray(), keypoints, descriptors, true); +} + +void Feature2D::compute( InputArrayOfArrays _images, + std::vector >& keypoints, + OutputArrayOfArrays _descriptors ) +{ + if( !_descriptors.needed() ) + return; + + vector images; + + _images.getMatVector(images); + size_t i, nimages = images.size(); + + CV_Assert( keypoints.size() == nimages ); + CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT ); + + vector& descriptors = *(vector*)_descriptors.getObj(); + descriptors.resize(nimages); + + for( i = 0; i < nimages; i++ ) + { + compute(images[i], keypoints[i], descriptors[i]); + } +} + + +/* Detects keypoints and computes the descriptors */ +void Feature2D::detectAndCompute( InputArray, InputArray, + std::vector&, + OutputArray, + bool ) +{ + CV_Error(Error::StsNotImplemented, ""); +} + +int Feature2D::descriptorSize() const +{ + return 0; +} + +int Feature2D::descriptorType() const +{ + return CV_32F; +} + +int Feature2D::defaultNorm() const +{ + int tp = descriptorType(); + return tp == CV_8U ? NORM_HAMMING : NORM_L2; +} + +// Return true if detector object is empty +bool Feature2D::empty() const +{ + return true; +} + +} diff --git a/modules/features2d/src/features2d_init.cpp b/modules/features2d/src/features2d_init.cpp deleted file mode 100644 index b23a8899b..000000000 --- a/modules/features2d/src/features2d_init.cpp +++ /dev/null @@ -1,199 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -#if 0 - -using namespace cv; - -Ptr Feature2D::create( const String& feature2DType ) -{ - return Algorithm::create("Feature2D." + feature2DType); -} - -/////////////////////// AlgorithmInfo for various detector & descriptors //////////////////////////// - -/* NOTE!!! - All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d(). - Otherwise, linker may throw away some seemingly unused stuff. -*/ - -CV_INIT_ALGORITHM(BRISK, "Feature2D.BRISK", - obj.info()->addParam(obj, "thres", obj.threshold); - obj.info()->addParam(obj, "octaves", obj.octaves)) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST", - obj.info()->addParam(obj, "threshold", obj.threshold); - obj.info()->addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression); - obj.info()->addParam(obj, "type", obj.type)) - - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -CV_INIT_ALGORITHM(MSER, "Feature2D.MSER", - obj.info()->addParam(obj, "delta", obj.delta); - obj.info()->addParam(obj, "minArea", obj.minArea); - obj.info()->addParam(obj, "maxArea", obj.maxArea); - obj.info()->addParam(obj, "maxVariation", obj.maxVariation); - obj.info()->addParam(obj, "minDiversity", obj.minDiversity); - obj.info()->addParam(obj, "maxEvolution", obj.maxEvolution); - obj.info()->addParam(obj, "areaThreshold", obj.areaThreshold); - obj.info()->addParam(obj, "minMargin", obj.minMargin); - obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize)) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -CV_INIT_ALGORITHM(ORB, "Feature2D.ORB", - obj.info()->addParam(obj, "nFeatures", obj.nfeatures); - obj.info()->addParam(obj, "scaleFactor", obj.scaleFactor); - obj.info()->addParam(obj, "nLevels", obj.nlevels); - obj.info()->addParam(obj, "firstLevel", obj.firstLevel); - obj.info()->addParam(obj, "edgeThreshold", obj.edgeThreshold); - obj.info()->addParam(obj, "patchSize", obj.patchSize); - obj.info()->addParam(obj, "WTA_K", obj.WTA_K); - obj.info()->addParam(obj, "scoreType", obj.scoreType); - obj.info()->addParam(obj, "fastThreshold", obj.fastThreshold)) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT", - obj.info()->addParam(obj, "nfeatures", obj.nfeatures); - obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel); - obj.info()->addParam(obj, "minDistance", obj.minDistance); - obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector); - obj.info()->addParam(obj, "k", obj.k)) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -CV_INIT_ALGORITHM(KAZE, "Feature2D.KAZE", - obj.info()->addParam(obj, "upright", obj.upright); - obj.info()->addParam(obj, "extended", obj.extended); - obj.info()->addParam(obj, "threshold", obj.threshold); - obj.info()->addParam(obj, "octaves", obj.octaves); - obj.info()->addParam(obj, "sublevels", obj.sublevels); - obj.info()->addParam(obj, "diffusivity", obj.diffusivity)) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -CV_INIT_ALGORITHM(AKAZE, "Feature2D.AKAZE", - obj.info()->addParam(obj, "descriptor", obj.descriptor); - obj.info()->addParam(obj, "descriptor_channels", obj.descriptor_channels); - obj.info()->addParam(obj, "descriptor_size", obj.descriptor_size); - obj.info()->addParam(obj, "threshold", obj.threshold); - obj.info()->addParam(obj, "octaves", obj.octaves); - obj.info()->addParam(obj, "sublevels", obj.sublevels); - obj.info()->addParam(obj, "diffusivity", obj.diffusivity)) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - - - -CV_INIT_ALGORITHM(SimpleBlobDetector, "Feature2D.SimpleBlob", - obj.info()->addParam(obj, "thresholdStep", obj.params.thresholdStep); - obj.info()->addParam(obj, "minThreshold", obj.params.minThreshold); - obj.info()->addParam(obj, "maxThreshold", obj.params.maxThreshold); - obj.info()->addParam_(obj, "minRepeatability", (sizeof(size_t) == sizeof(uint64))?Param::UINT64 : Param::UNSIGNED_INT, &obj.params.minRepeatability, false, 0, 0); - obj.info()->addParam(obj, "minDistBetweenBlobs", obj.params.minDistBetweenBlobs); - obj.info()->addParam(obj, "filterByColor", obj.params.filterByColor); - obj.info()->addParam(obj, "blobColor", obj.params.blobColor); - obj.info()->addParam(obj, "filterByArea", obj.params.filterByArea); - obj.info()->addParam(obj, "maxArea", obj.params.maxArea); - obj.info()->addParam(obj, "filterByCircularity", obj.params.filterByCircularity); - obj.info()->addParam(obj, "maxCircularity", obj.params.maxCircularity); - obj.info()->addParam(obj, "filterByInertia", obj.params.filterByInertia); - obj.info()->addParam(obj, "maxInertiaRatio", obj.params.maxInertiaRatio); - obj.info()->addParam(obj, "filterByConvexity", obj.params.filterByConvexity); - obj.info()->addParam(obj, "maxConvexity", obj.params.maxConvexity); - ) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -class CV_EXPORTS HarrisDetector : public GFTTDetector -{ -public: - HarrisDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, - int blockSize=3, bool useHarrisDetector=true, double k=0.04 ); - AlgorithmInfo* info() const; -}; - -inline HarrisDetector::HarrisDetector( int _maxCorners, double _qualityLevel, double _minDistance, - int _blockSize, bool _useHarrisDetector, double _k ) - : GFTTDetector( _maxCorners, _qualityLevel, _minDistance, _blockSize, _useHarrisDetector, _k ) {} - -CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS", - obj.info()->addParam(obj, "nfeatures", obj.nfeatures); - obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel); - obj.info()->addParam(obj, "minDistance", obj.minDistance); - obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector); - obj.info()->addParam(obj, "k", obj.k)) - -//////////////////////////////////////////////////////////////////////////////////////////////////////////// - -CV_INIT_ALGORITHM(BFMatcher, "DescriptorMatcher.BFMatcher", - obj.info()->addParam(obj, "normType", obj.normType); - obj.info()->addParam(obj, "crossCheck", obj.crossCheck)) - -CV_INIT_ALGORITHM(FlannBasedMatcher, "DescriptorMatcher.FlannBasedMatcher",) - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -bool cv::initModule_features2d(void) -{ - bool all = true; - all &= !BRISK_info_auto.name().empty(); - all &= !FastFeatureDetector_info_auto.name().empty(); - all &= !MSER_info_auto.name().empty(); - all &= !ORB_info_auto.name().empty(); - all &= !GFTTDetector_info_auto.name().empty(); - all &= !KAZE_info_auto.name().empty(); - all &= !AKAZE_info_auto.name().empty(); - all &= !HarrisDetector_info_auto.name().empty(); - all &= !BFMatcher_info_auto.name().empty(); - all &= !FlannBasedMatcher_info_auto.name().empty(); - - return all; -} - -#endif diff --git a/modules/features2d/src/detectors.cpp b/modules/features2d/src/gftt.cpp similarity index 100% rename from modules/features2d/src/detectors.cpp rename to modules/features2d/src/gftt.cpp diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index d16e988f6..83eeecc4e 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -140,5 +140,11 @@ namespace cv int diffusivity; }; - + Ptr KAZE::create(bool extended, bool upright, + float threshold, + int octaves, int sublevels, + int diffusivity) + { + return makePtr(extended, upright, threshold, octaves, sublevels, diffusivity); + } } diff --git a/modules/features2d/src/kaze/AKAZEConfig.h b/modules/features2d/src/kaze/AKAZEConfig.h index 2ea21f370..806d25c80 100644 --- a/modules/features2d/src/kaze/AKAZEConfig.h +++ b/modules/features2d/src/kaze/AKAZEConfig.h @@ -22,12 +22,12 @@ struct AKAZEOptions { , soffset(1.6f) , derivative_factor(1.5f) , sderivatives(1.0) - , diffusivity(cv::DIFF_PM_G2) + , diffusivity(KAZE::DIFF_PM_G2) , dthreshold(0.001f) , min_dthreshold(0.00001f) - , descriptor(cv::DESCRIPTOR_MLDB) + , descriptor(AKAZE::DESCRIPTOR_MLDB) , descriptor_size(0) , descriptor_channels(3) , descriptor_pattern_size(10) diff --git a/modules/features2d/src/kaze/AKAZEFeatures.cpp b/modules/features2d/src/kaze/AKAZEFeatures.cpp index 59e260fc4..798858403 100644 --- a/modules/features2d/src/kaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/kaze/AKAZEFeatures.cpp @@ -61,14 +61,14 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { for (int j = 0; j < options_.nsublevels; j++) { TEvolution step; - step.Lx = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Ly = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lxx = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lxy = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lyy = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lt = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Ldet = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lsmooth = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lx = Mat::zeros(level_height, level_width, CV_32F); + step.Ly = Mat::zeros(level_height, level_width, CV_32F); + step.Lxx = Mat::zeros(level_height, level_width, CV_32F); + step.Lxy = Mat::zeros(level_height, level_width, CV_32F); + step.Lyy = Mat::zeros(level_height, level_width, CV_32F); + step.Lt = Mat::zeros(level_height, level_width, CV_32F); + step.Ldet = Mat::zeros(level_height, level_width, CV_32F); + step.Lsmooth = Mat::zeros(level_height, level_width, CV_32F); step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i); step.sigma_size = fRound(step.esigma); step.etime = 0.5f*(step.esigma*step.esigma); @@ -97,7 +97,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully, -1 otherwise */ -int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) +int AKAZEFeatures::Create_Nonlinear_Scale_Space(const Mat& img) { CV_Assert(evolution_.size() > 0); @@ -107,8 +107,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); // Allocate memory for the flow and step images - cv::Mat Lflow = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); - cv::Mat Lstep = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); + Mat Lflow = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); + Mat Lstep = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); // First compute the kcontrast factor options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, 1.0f, options_.kcontrast_nbins, 0, 0); @@ -121,8 +121,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) options_.kcontrast = options_.kcontrast*0.75f; // Allocate memory for the resized flow and step images - Lflow = cv::Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F); - Lstep = cv::Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F); + Lflow = Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F); + Lstep = Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F); } else { evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); @@ -136,16 +136,16 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) // Compute the conductivity equation switch (options_.diffusivity) { - case cv::DIFF_PM_G1: + case KAZE::DIFF_PM_G1: pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); break; - case cv::DIFF_PM_G2: + case KAZE::DIFF_PM_G2: pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); break; - case cv::DIFF_WEICKERT: + case KAZE::DIFF_WEICKERT: weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); break; - case cv::DIFF_CHARBONNIER: + case KAZE::DIFF_CHARBONNIER: charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); break; default: @@ -155,7 +155,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) // Perform FED n inner steps for (int j = 0; j < nsteps_[i - 1]; j++) { - cv::details::kaze::nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]); + nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]); } } @@ -167,7 +167,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of detected keypoints */ -void AKAZEFeatures::Feature_Detection(std::vector& kpts) +void AKAZEFeatures::Feature_Detection(std::vector& kpts) { kpts.clear(); Compute_Determinant_Hessian_Response(); @@ -176,7 +176,7 @@ void AKAZEFeatures::Feature_Detection(std::vector& kpts) } /* ************************************************************************* */ -class MultiscaleDerivativesAKAZEInvoker : public cv::ParallelLoopBody +class MultiscaleDerivativesAKAZEInvoker : public ParallelLoopBody { public: explicit MultiscaleDerivativesAKAZEInvoker(std::vector& ev, const AKAZEOptions& opt) @@ -185,7 +185,7 @@ public: { } - void operator()(const cv::Range& range) const + void operator()(const Range& range) const { std::vector& evolution = *evolution_; @@ -219,7 +219,7 @@ private: */ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { - cv::parallel_for_(cv::Range(0, (int)evolution_.size()), + parallel_for_(Range(0, (int)evolution_.size()), MultiscaleDerivativesAKAZEInvoker(evolution_, options_)); } @@ -253,7 +253,7 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { * @brief This method finds extrema in the nonlinear scale space * @param kpts Vector of detected keypoints */ -void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) +void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { float value = 0.0; @@ -261,8 +261,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) int npoints = 0, id_repeated = 0; int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0; bool is_extremum = false, is_repeated = false, is_out = false; - cv::KeyPoint point; - vector kpts_aux; + KeyPoint point; + vector kpts_aux; // Set maximum size if (options_.descriptor == AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_MLDB) { @@ -365,7 +365,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) for (size_t i = 0; i < kpts_aux.size(); i++) { is_repeated = false; - const cv::KeyPoint& pt = kpts_aux[i]; + const KeyPoint& pt = kpts_aux[i]; for (size_t j = i + 1; j < kpts_aux.size(); j++) { // Compare response with the upper scale @@ -392,7 +392,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints */ -void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) +void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { float Dx = 0.0, Dy = 0.0, ratio = 0.0; float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; @@ -433,7 +433,7 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) b(0) = -Dx; b(1) = -Dy; - cv::solve(A, b, dst, DECOMP_LU); + solve(A, b, dst, DECOMP_LU); if (fabs(dst(0)) <= 1.0f && fabs(dst(1)) <= 1.0f) { kpts[i].pt.x = x + dst(0); @@ -456,10 +456,10 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) /* ************************************************************************* */ -class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody +class SURF_Descriptor_Upright_64_Invoker : public ParallelLoopBody { public: - SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -474,18 +474,18 @@ public: } } - void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_SURF_Descriptor_Upright_64(const KeyPoint& kpt, float* desc) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; }; -class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody +class SURF_Descriptor_64_Invoker : public ParallelLoopBody { public: - SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + SURF_Descriptor_64_Invoker(std::vector& kpts, Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -501,18 +501,18 @@ public: } } - void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_SURF_Descriptor_64(const KeyPoint& kpt, float* desc) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; }; -class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody +class MSURF_Upright_Descriptor_64_Invoker : public ParallelLoopBody { public: - MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -527,18 +527,18 @@ public: } } - void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_MSURF_Upright_Descriptor_64(const KeyPoint& kpt, float* desc) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; }; -class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody +class MSURF_Descriptor_64_Invoker : public ParallelLoopBody { public: - MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) + MSURF_Descriptor_64_Invoker(std::vector& kpts, Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -554,18 +554,18 @@ public: } } - void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_MSURF_Descriptor_64(const KeyPoint& kpt, float* desc) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; }; -class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody +class Upright_MLDB_Full_Descriptor_Invoker : public ParallelLoopBody { public: - Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, Mat& desc, std::vector& evolution, AKAZEOptions& options) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -581,24 +581,24 @@ public: } } - void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + void Get_Upright_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char* desc) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; AKAZEOptions* options_; }; -class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody +class Upright_MLDB_Descriptor_Subset_Invoker : public ParallelLoopBody { public: - Upright_MLDB_Descriptor_Subset_Invoker(std::vector& kpts, - cv::Mat& desc, + Upright_MLDB_Descriptor_Subset_Invoker(std::vector& kpts, + Mat& desc, std::vector& evolution, AKAZEOptions& options, - cv::Mat descriptorSamples, - cv::Mat descriptorBits) + Mat descriptorSamples, + Mat descriptorBits) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -616,22 +616,22 @@ public: } } - void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; + void Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char* desc) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; AKAZEOptions* options_; - cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. - cv::Mat descriptorBits_; + Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + Mat descriptorBits_; }; -class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody +class MLDB_Full_Descriptor_Invoker : public ParallelLoopBody { public: - MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + MLDB_Full_Descriptor_Invoker(std::vector& kpts, Mat& desc, std::vector& evolution, AKAZEOptions& options) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -648,28 +648,28 @@ public: } } - void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + void Get_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char* desc) const; void MLDB_Fill_Values(float* values, int sample_step, int level, float xf, float yf, float co, float si, float scale) const; void MLDB_Binary_Comparisons(float* values, unsigned char* desc, int count, int& dpos) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; AKAZEOptions* options_; }; -class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody +class MLDB_Descriptor_Subset_Invoker : public ParallelLoopBody { public: - MLDB_Descriptor_Subset_Invoker(std::vector& kpts, - cv::Mat& desc, + MLDB_Descriptor_Subset_Invoker(std::vector& kpts, + Mat& desc, std::vector& evolution, AKAZEOptions& options, - cv::Mat descriptorSamples, - cv::Mat descriptorBits) + Mat descriptorSamples, + Mat descriptorBits) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) @@ -688,16 +688,16 @@ public: } } - void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; + void Get_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char* desc) const; private: - std::vector* keypoints_; - cv::Mat* descriptors_; + std::vector* keypoints_; + Mat* descriptors_; std::vector* evolution_; AKAZEOptions* options_; - cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. - cv::Mat descriptorBits_; + Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + Mat descriptorBits_; }; /** @@ -705,7 +705,7 @@ private: * @param kpts Vector of detected keypoints * @param desc Matrix to store the descriptors */ -void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) +void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, Mat& desc) { for(size_t i = 0; i < kpts.size(); i++) { @@ -714,17 +714,17 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat // Allocate memory for the matrix with the descriptors if (options_.descriptor < AKAZE::DESCRIPTOR_MLDB_UPRIGHT) { - desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); + desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1); } else { // We use the full length binary descriptor -> 486 bits if (options_.descriptor_size == 0) { int t = (6 + 36 + 120)*options_.descriptor_channels; - desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1); + desc = Mat::zeros((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1); } else { // We use the random bit selection length binary descriptor - desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1); + desc = Mat::zeros((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1); } } @@ -732,28 +732,28 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat { case AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); + parallel_for_(Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; case AKAZE::DESCRIPTOR_KAZE: { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); + parallel_for_(Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; case AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation { if (options_.descriptor_size == 0) - cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + parallel_for_(Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); else - cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + parallel_for_(Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); } break; case AKAZE::DESCRIPTOR_MLDB: { if (options_.descriptor_size == 0) - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + parallel_for_(Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); else - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + parallel_for_(Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); } break; } @@ -766,7 +766,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) +void AKAZEFeatures::Compute_Main_Orientation(KeyPoint& kpt, const std::vector& evolution_) { /* ************************************************************************* */ /// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right @@ -854,7 +854,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { +void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -977,7 +977,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { +void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1101,7 +1101,7 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp * @param kpt Input keypoint * @param desc Descriptor vector */ -void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { +void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char *desc) const { float di = 0.0, dx = 0.0, dy = 0.0; float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; @@ -1114,9 +1114,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons const std::vector& evolution = *evolution_; // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); - cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); + Mat values_1 = Mat::zeros(4, options.descriptor_channels, CV_32FC1); + Mat values_2 = Mat::zeros(9, options.descriptor_channels, CV_32FC1); + Mat values_3 = Mat::zeros(16, options.descriptor_channels, CV_32FC1); // Get the information from the keypoint ratio = (float)(1 << kpt.octave); @@ -1395,7 +1395,7 @@ void MLDB_Full_Descriptor_Invoker::MLDB_Binary_Comparisons(float* values, unsign * @param kpt Input keypoint * @param desc Descriptor vector */ -void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { +void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char *desc) const { const int max_channels = 3; CV_Assert(options_->descriptor_channels <= max_channels); @@ -1428,7 +1428,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& * @param kpt Input keypoint * @param desc Descriptor vector */ -void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { +void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char *desc) const { float di = 0.f, dx = 0.f, dy = 0.f; float rx = 0.f, ry = 0.f; @@ -1449,7 +1449,7 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi float si = sin(angle); // Allocate memory for the matrix of values - cv::Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); + Mat values = Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); // Sample everything, but only do the comparisons vector steps(3); @@ -1522,7 +1522,7 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi * @param kpt Input keypoint * @param desc Descriptor vector */ -void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { +void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char *desc) const { float di = 0.0f, dx = 0.0f, dy = 0.0f; float rx = 0.0f, ry = 0.0f; @@ -1540,7 +1540,7 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( float xf = kpt.pt.x / ratio; // Allocate memory for the matrix of values - Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); + Mat values = Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); vector steps(3); steps.at(0) = options.descriptor_pattern_size; @@ -1614,7 +1614,7 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( * @note The function keeps the 18 bits (3-channels by 6 comparisons) of the * coarser grid, since it provides the most robust estimations */ -void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int nbits, +void generateDescriptorSubsample(Mat& sampleList, Mat& comparisons, int nbits, int pattern_size, int nchannels) { int ssz = 0; @@ -1718,4 +1718,3 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int } } -} diff --git a/modules/features2d/src/kaze/AKAZEFeatures.h b/modules/features2d/src/kaze/AKAZEFeatures.h index 9119c97f2..14800bc37 100644 --- a/modules/features2d/src/kaze/AKAZEFeatures.h +++ b/modules/features2d/src/kaze/AKAZEFeatures.h @@ -11,10 +11,12 @@ /* ************************************************************************* */ // Includes -#include "../precomp.hpp" #include "AKAZEConfig.h" #include "TEvolution.h" +namespace cv +{ + /* ************************************************************************* */ // AKAZE Class Declaration class AKAZEFeatures { @@ -22,7 +24,7 @@ class AKAZEFeatures { private: AKAZEOptions options_; ///< Configuration options for AKAZE - std::vector evolution_; ///< Vector of nonlinear diffusion evolution + std::vector evolution_; ///< Vector of nonlinear diffusion evolution /// FED parameters int ncycles_; ///< Number of cycles @@ -59,4 +61,6 @@ public: void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int nbits, int pattern_size, int nchannels); +} + #endif diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 69e9a4b69..702a8a021 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -20,14 +20,15 @@ * @date Jan 21, 2012 * @author Pablo F. Alcantarilla */ - +#include "../precomp.hpp" #include "KAZEFeatures.h" #include "utils.h" +namespace cv +{ + // Namespaces using namespace std; -using namespace cv; -using namespace cv::details::kaze; /* ************************************************************************* */ /** @@ -52,19 +53,20 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) void KAZEFeatures::Allocate_Memory_Evolution(void) { // Allocate the dimension of the matrices for the evolution - for (int i = 0; i <= options_.omax - 1; i++) { - for (int j = 0; j <= options_.nsublevels - 1; j++) { - + for (int i = 0; i <= options_.omax - 1; i++) + { + for (int j = 0; j <= options_.nsublevels - 1; j++) + { TEvolution aux; - aux.Lx = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.Ly = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.Lxx = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.Lxy = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.Lyy = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.Lt = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.Lsmooth = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.Ldet = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F); - aux.esigma = options_.soffset*pow((float)2.0f, (float)(j) / (float)(options_.nsublevels)+i); + aux.Lx = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Ly = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lxx = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lxy = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lyy = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lt = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Lsmooth = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.Ldet = Mat::zeros(options_.img_height, options_.img_width, CV_32F); + aux.esigma = options_.soffset*pow((float)2.0f, (float)(j) / (float)(options_.nsublevels)+i); aux.etime = 0.5f*(aux.esigma*aux.esigma); aux.sigma_size = fRound(aux.esigma); aux.octave = i; @@ -74,7 +76,8 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { } // Allocate memory for the FED number of cycles and time steps - for (size_t i = 1; i < evolution_.size(); i++) { + for (size_t i = 1; i < evolution_.size(); i++) + { int naux = 0; vector tau; float ttime = 0.0; @@ -92,47 +95,43 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully. -1 otherwise */ -int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) +int KAZEFeatures::Create_Nonlinear_Scale_Space(const Mat &img) { CV_Assert(evolution_.size() > 0); // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options_.sderivatives); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options_.sderivatives); // Firstly compute the kcontrast factor Compute_KContrast(evolution_[0].Lt, options_.kcontrast_percentille); // Allocate memory for the flow and step images - cv::Mat Lflow = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); - cv::Mat Lstep = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); + Mat Lflow = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); + Mat Lstep = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F); // Now generate the rest of evolution levels - for (size_t i = 1; i < evolution_.size(); i++) { - + for (size_t i = 1; i < evolution_.size(); i++) + { evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); - gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options_.sderivatives); + gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options_.sderivatives); // Compute the Gaussian derivatives Lx and Ly Scharr(evolution_[i].Lsmooth, evolution_[i].Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT); Scharr(evolution_[i].Lsmooth, evolution_[i].Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT); // Compute the conductivity equation - if (options_.diffusivity == cv::DIFF_PM_G1) { - pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); - } - else if (options_.diffusivity == cv::DIFF_PM_G2) { - pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); - } - else if (options_.diffusivity == cv::DIFF_WEICKERT) { - weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); - } + if (options_.diffusivity == KAZE::DIFF_PM_G1) + pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); + else if (options_.diffusivity == KAZE::DIFF_PM_G2) + pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); + else if (options_.diffusivity == KAZE::DIFF_WEICKERT) + weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast); // Perform FED n inner steps - for (int j = 0; j < nsteps_[i - 1]; j++) { + for (int j = 0; j < nsteps_[i - 1]; j++) nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]); - } } return 0; @@ -144,7 +143,7 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) * @param img Input image * @param kpercentile Percentile of the gradient histogram */ -void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) +void KAZEFeatures::Compute_KContrast(const Mat &img, const float &kpercentile) { options_.kcontrast = compute_k_percentile(img, kpercentile, options_.sderivatives, options_.kcontrast_bins, 0, 0); } @@ -181,7 +180,7 @@ void KAZEFeatures::Compute_Detector_Response(void) * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of keypoints */ -void KAZEFeatures::Feature_Detection(std::vector& kpts) +void KAZEFeatures::Feature_Detection(std::vector& kpts) { kpts.clear(); Compute_Detector_Response(); @@ -190,14 +189,14 @@ void KAZEFeatures::Feature_Detection(std::vector& kpts) } /* ************************************************************************* */ -class MultiscaleDerivativesKAZEInvoker : public cv::ParallelLoopBody +class MultiscaleDerivativesKAZEInvoker : public ParallelLoopBody { public: explicit MultiscaleDerivativesKAZEInvoker(std::vector& ev) : evolution_(&ev) { } - void operator()(const cv::Range& range) const + void operator()(const Range& range) const { std::vector& evolution = *evolution_; for (int i = range.start; i < range.end; i++) @@ -226,74 +225,79 @@ private: */ void KAZEFeatures::Compute_Multiscale_Derivatives(void) { - cv::parallel_for_(cv::Range(0, (int)evolution_.size()), + parallel_for_(Range(0, (int)evolution_.size()), MultiscaleDerivativesKAZEInvoker(evolution_)); } /* ************************************************************************* */ -class FindExtremumKAZEInvoker : public cv::ParallelLoopBody +class FindExtremumKAZEInvoker : public ParallelLoopBody { public: - explicit FindExtremumKAZEInvoker(std::vector& ev, std::vector >& kpts_par, + explicit FindExtremumKAZEInvoker(std::vector& ev, std::vector >& kpts_par, const KAZEOptions& options) : evolution_(&ev), kpts_par_(&kpts_par), options_(options) { } - void operator()(const cv::Range& range) const + void operator()(const Range& range) const { std::vector& evolution = *evolution_; - std::vector >& kpts_par = *kpts_par_; + std::vector >& kpts_par = *kpts_par_; for (int i = range.start; i < range.end; i++) { float value = 0.0; bool is_extremum = false; - for (int ix = 1; ix < options_.img_height - 1; ix++) { - for (int jx = 1; jx < options_.img_width - 1; jx++) { + for (int ix = 1; ix < options_.img_height - 1; ix++) + { + for (int jx = 1; jx < options_.img_width - 1; jx++) + { + is_extremum = false; + value = *(evolution[i].Ldet.ptr(ix)+jx); - is_extremum = false; - value = *(evolution[i].Ldet.ptr(ix)+jx); - - // Filter the points with the detector threshold - if (value > options_.dthreshold) { - if (value >= *(evolution[i].Ldet.ptr(ix)+jx - 1)) { - // First check on the same scale - if (check_maximum_neighbourhood(evolution[i].Ldet, 1, value, ix, jx, 1)) { - // Now check on the lower scale - if (check_maximum_neighbourhood(evolution[i - 1].Ldet, 1, value, ix, jx, 0)) { - // Now check on the upper scale - if (check_maximum_neighbourhood(evolution[i + 1].Ldet, 1, value, ix, jx, 0)) { - is_extremum = true; - } - } - } - } - } - - // Add the point of interest!! - if (is_extremum == true) { - cv::KeyPoint point; - point.pt.x = (float)jx; - point.pt.y = (float)ix; - point.response = fabs(value); - point.size = evolution[i].esigma; - point.octave = (int)evolution[i].octave; - point.class_id = i; - - // We use the angle field for the sublevel value - // Then, we will replace this angle field with the main orientation - point.angle = static_cast(evolution[i].sublevel); - kpts_par[i - 1].push_back(point); + // Filter the points with the detector threshold + if (value > options_.dthreshold) + { + if (value >= *(evolution[i].Ldet.ptr(ix)+jx - 1)) + { + // First check on the same scale + if (check_maximum_neighbourhood(evolution[i].Ldet, 1, value, ix, jx, 1)) + { + // Now check on the lower scale + if (check_maximum_neighbourhood(evolution[i - 1].Ldet, 1, value, ix, jx, 0)) + { + // Now check on the upper scale + if (check_maximum_neighbourhood(evolution[i + 1].Ldet, 1, value, ix, jx, 0)) + is_extremum = true; + } } + } } + + // Add the point of interest!! + if (is_extremum) + { + KeyPoint point; + point.pt.x = (float)jx; + point.pt.y = (float)ix; + point.response = fabs(value); + point.size = evolution[i].esigma; + point.octave = (int)evolution[i].octave; + point.class_id = i; + + // We use the angle field for the sublevel value + // Then, we will replace this angle field with the main orientation + point.angle = static_cast(evolution[i].sublevel); + kpts_par[i - 1].push_back(point); + } + } } } } private: std::vector* evolution_; - std::vector >* kpts_par_; + std::vector >* kpts_par_; KAZEOptions options_; }; @@ -304,7 +308,7 @@ private: * @param kpts Vector of keypoints * @note We compute features for each of the nonlinear scale space level in a different processing thread */ -void KAZEFeatures::Determinant_Hessian(std::vector& kpts) +void KAZEFeatures::Determinant_Hessian(std::vector& kpts) { int level = 0; float dist = 0.0, smax = 3.0; @@ -325,12 +329,14 @@ void KAZEFeatures::Determinant_Hessian(std::vector& kpts) kpts_par_.push_back(aux); } - cv::parallel_for_(cv::Range(1, (int)evolution_.size()-1), - FindExtremumKAZEInvoker(evolution_, kpts_par_, options_)); + parallel_for_(Range(1, (int)evolution_.size()-1), + FindExtremumKAZEInvoker(evolution_, kpts_par_, options_)); // Now fill the vector of keypoints!!! - for (int i = 0; i < (int)kpts_par_.size(); i++) { - for (int j = 0; j < (int)kpts_par_[i].size(); j++) { + for (int i = 0; i < (int)kpts_par_.size(); i++) + { + for (int j = 0; j < (int)kpts_par_[i].size(); j++) + { level = i + 1; is_extremum = true; is_repeated = false; @@ -388,7 +394,7 @@ void KAZEFeatures::Determinant_Hessian(std::vector& kpts) * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints */ -void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { +void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { int step = 1; int x = 0, y = 0; @@ -482,10 +488,10 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { } /* ************************************************************************* */ -class KAZE_Descriptor_Invoker : public cv::ParallelLoopBody +class KAZE_Descriptor_Invoker : public ParallelLoopBody { public: - KAZE_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& options) + KAZE_Descriptor_Invoker(std::vector &kpts, Mat &desc, std::vector& evolution, const KAZEOptions& options) : kpts_(&kpts) , desc_(&desc) , evolution_(&evolution) @@ -497,10 +503,10 @@ public: { } - void operator() (const cv::Range& range) const + void operator() (const Range& range) const { - std::vector &kpts = *kpts_; - cv::Mat &desc = *desc_; + std::vector &kpts = *kpts_; + Mat &desc = *desc_; std::vector &evolution = *evolution_; for (int i = range.start; i < range.end; i++) @@ -526,13 +532,13 @@ public: } } private: - void Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - void Get_KAZE_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - void Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; - void Get_KAZE_Descriptor_128(const cv::KeyPoint& kpt, float *desc) const; + void Get_KAZE_Upright_Descriptor_64(const KeyPoint& kpt, float* desc) const; + void Get_KAZE_Descriptor_64(const KeyPoint& kpt, float* desc) const; + void Get_KAZE_Upright_Descriptor_128(const KeyPoint& kpt, float* desc) const; + void Get_KAZE_Descriptor_128(const KeyPoint& kpt, float *desc) const; - std::vector * kpts_; - cv::Mat * desc_; + std::vector * kpts_; + Mat * desc_; std::vector * evolution_; KAZEOptions options_; }; @@ -543,7 +549,7 @@ private: * @param kpts Vector of keypoints * @param desc Matrix with the feature descriptors */ -void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) +void KAZEFeatures::Feature_Description(std::vector &kpts, Mat &desc) { for(size_t i = 0; i < kpts.size(); i++) { @@ -558,7 +564,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1); } - cv::parallel_for_(cv::Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options_)); + parallel_for_(Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options_)); } /* ************************************************************************* */ @@ -568,7 +574,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void KAZEFeatures::Compute_Main_Orientation(cv::KeyPoint &kpt, const std::vector& evolution_, const KAZEOptions& options) +void KAZEFeatures::Compute_Main_Orientation(KeyPoint &kpt, const std::vector& evolution_, const KAZEOptions& options) { int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0; @@ -647,7 +653,7 @@ void KAZEFeatures::Compute_Main_Orientation(cv::KeyPoint &kpt, const std::vector * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -775,7 +781,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -904,7 +910,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, fl * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const KeyPoint &kpt, float *desc) const { float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1056,7 +1062,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const KeyPoint &kpt, float *desc) const { float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1202,3 +1208,5 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, f desc[i] /= len; } } + +} diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index 98c830788..934dfff29 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -22,8 +22,8 @@ namespace cv /* ************************************************************************* */ // KAZE Class Declaration -class KAZEFeatures { - +class KAZEFeatures +{ private: /// Parameters of the Nonlinear diffusion class diff --git a/modules/features2d/src/kaze/TEvolution.h b/modules/features2d/src/kaze/TEvolution.h index bc7654ef0..b033bce27 100644 --- a/modules/features2d/src/kaze/TEvolution.h +++ b/modules/features2d/src/kaze/TEvolution.h @@ -8,10 +8,13 @@ #ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__ #define __OPENCV_FEATURES_2D_TEVOLUTION_H__ +namespace cv +{ + /* ************************************************************************* */ /// KAZE/A-KAZE nonlinear diffusion filtering evolution -struct TEvolution { - +struct TEvolution +{ TEvolution() { etime = 0.0f; esigma = 0.0f; @@ -20,11 +23,11 @@ struct TEvolution { sigma_size = 0; } - cv::Mat Lx, Ly; ///< First order spatial derivatives - cv::Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives - cv::Mat Lt; ///< Evolution image - cv::Mat Lsmooth; ///< Smoothed image - cv::Mat Ldet; ///< Detector response + Mat Lx, Ly; ///< First order spatial derivatives + Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives + Mat Lt; ///< Evolution image + Mat Lsmooth; ///< Smoothed image + Mat Ldet; ///< Detector response float etime; ///< Evolution time float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2 int octave; ///< Image octave @@ -32,4 +35,6 @@ struct TEvolution { int sigma_size; ///< Integer esigma. For computing the feature detector responses }; +} + #endif diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index 1c0d70a75..85ed5de91 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -22,502 +22,500 @@ * @author Pablo F. Alcantarilla */ +#include "../precomp.hpp" #include "nldiffusion_functions.h" #include // Namespaces -using namespace std; -using namespace cv; /* ************************************************************************* */ -namespace cv { - namespace details { - namespace kaze { - - /* ************************************************************************* */ - /** - * @brief This function smoothes an image with a Gaussian kernel - * @param src Input image - * @param dst Output image - * @param ksize_x Kernel size in X-direction (horizontal) - * @param ksize_y Kernel size in Y-direction (vertical) - * @param sigma Kernel standard deviation - */ - void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) { - - int ksize_x_ = 0, ksize_y_ = 0; - - // Compute an appropriate kernel size according to the specified sigma - if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f))); - ksize_y_ = ksize_x_; - } - - // The kernel size must be and odd number - if ((ksize_x_ % 2) == 0) { - ksize_x_ += 1; - } - - if ((ksize_y_ % 2) == 0) { - ksize_y_ += 1; - } - - // Perform the Gaussian Smoothing with border replication - GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE); - } - - /* ************************************************************************* */ - /** - * @brief This function computes image derivatives with Scharr kernel - * @param src Input image - * @param dst Output image - * @param xorder Derivative order in X-direction (horizontal) - * @param yorder Derivative order in Y-direction (vertical) - * @note Scharr operator approximates better rotation invariance than - * other stencils such as Sobel. See Weickert and Scharr, - * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, - * Journal of Visual Communication and Image Representation 2002 - */ - void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) { - Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); - } - - /* ************************************************************************* */ - /** - * @brief This function computes the Perona and Malik conductivity coefficient g1 - * g1 = exp(-|dL|^2/k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ - void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - - Size sz = Lx.size(); - float inv_k = 1.0f / (k*k); - for (int y = 0; y < sz.height; y++) { - - const float* Lx_row = Lx.ptr(y); - const float* Ly_row = Ly.ptr(y); - float* dst_row = dst.ptr(y); - - for (int x = 0; x < sz.width; x++) { - dst_row[x] = (-inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x])); - } - } - - exp(dst, dst); - } - - /* ************************************************************************* */ - /** - * @brief This function computes the Perona and Malik conductivity coefficient g2 - * g2 = 1 / (1 + dL^2 / k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ - void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - - Size sz = Lx.size(); - dst.create(sz, Lx.type()); - float k2inv = 1.0f / (k * k); - - for(int y = 0; y < sz.height; y++) { - const float *Lx_row = Lx.ptr(y); - const float *Ly_row = Ly.ptr(y); - float* dst_row = dst.ptr(y); - for(int x = 0; x < sz.width; x++) { - dst_row[x] = 1.0f / (1.0f + ((Lx_row[x] * Lx_row[x] + Ly_row[x] * Ly_row[x]) * k2inv)); - } - } - } - /* ************************************************************************* */ - /** - * @brief This function computes Weickert conductivity coefficient gw - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - * @note For more information check the following paper: J. Weickert - * Applications of nonlinear diffusion in image processing and computer vision, - * Proceedings of Algorithmy 2000 - */ - void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - - Size sz = Lx.size(); - float inv_k = 1.0f / (k*k); - for (int y = 0; y < sz.height; y++) { - - const float* Lx_row = Lx.ptr(y); - const float* Ly_row = Ly.ptr(y); - float* dst_row = dst.ptr(y); - - for (int x = 0; x < sz.width; x++) { - float dL = inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]); - dst_row[x] = -3.315f/(dL*dL*dL*dL); - } - } - - exp(dst, dst); - dst = 1.0 - dst; - } - - - /* ************************************************************************* */ - /** - * @brief This function computes Charbonnier conductivity coefficient gc - * gc = 1 / sqrt(1 + dL^2 / k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - * @note For more information check the following paper: J. Weickert - * Applications of nonlinear diffusion in image processing and computer vision, - * Proceedings of Algorithmy 2000 - */ - void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - - Size sz = Lx.size(); - float inv_k = 1.0f / (k*k); - for (int y = 0; y < sz.height; y++) { - - const float* Lx_row = Lx.ptr(y); - const float* Ly_row = Ly.ptr(y); - float* dst_row = dst.ptr(y); - - for (int x = 0; x < sz.width; x++) { - float den = sqrt(1.0f+inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x])); - dst_row[x] = 1.0f / den; - } - } - } - - - /* ************************************************************************* */ - /** - * @brief This function computes a good empirical value for the k contrast factor - * given an input image, the percentile (0-1), the gradient scale and the number of - * bins in the histogram - * @param img Input image - * @param perc Percentile of the image gradient histogram (0-1) - * @param gscale Scale for computing the image gradient histogram - * @param nbins Number of histogram bins - * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel - * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel - * @return k contrast factor - */ - float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) { - - int nbin = 0, nelements = 0, nthreshold = 0, k = 0; - float kperc = 0.0, modg = 0.0; - float npoints = 0.0; - float hmax = 0.0; - - // Create the array for the histogram - std::vector hist(nbins, 0); - - // Create the matrices - Mat gaussian = Mat::zeros(img.rows, img.cols, CV_32F); - Mat Lx = Mat::zeros(img.rows, img.cols, CV_32F); - Mat Ly = Mat::zeros(img.rows, img.cols, CV_32F); - - // Perform the Gaussian convolution - gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); - - // Compute the Gaussian derivatives Lx and Ly - Scharr(gaussian, Lx, CV_32F, 1, 0, 1, 0, cv::BORDER_DEFAULT); - Scharr(gaussian, Ly, CV_32F, 0, 1, 1, 0, cv::BORDER_DEFAULT); - - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows - 1; i++) { - const float *lx = Lx.ptr(i); - const float *ly = Ly.ptr(i); - for (int j = 1; j < gaussian.cols - 1; j++) { - modg = lx[j]*lx[j] + ly[j]*ly[j]; - - // Get the maximum - if (modg > hmax) { - hmax = modg; - } - } - } - hmax = sqrt(hmax); - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows - 1; i++) { - const float *lx = Lx.ptr(i); - const float *ly = Ly.ptr(i); - for (int j = 1; j < gaussian.cols - 1; j++) { - modg = lx[j]*lx[j] + ly[j]*ly[j]; - - // Find the correspondent bin - if (modg != 0.0) { - nbin = (int)floor(nbins*(sqrt(modg) / hmax)); - - if (nbin == nbins) { - nbin--; - } - - hist[nbin]++; - npoints++; - } - } - } - - // Now find the perc of the histogram percentile - nthreshold = (int)(npoints*perc); - - for (k = 0; nelements < nthreshold && k < nbins; k++) { - nelements = nelements + hist[k]; - } - - if (nelements < nthreshold) { - kperc = 0.03f; - } - else { - kperc = hmax*((float)(k) / (float)nbins); - } - - return kperc; - } - - /* ************************************************************************* */ - /** - * @brief This function computes Scharr image derivatives - * @param src Input image - * @param dst Output image - * @param xorder Derivative order in X-direction (horizontal) - * @param yorder Derivative order in Y-direction (vertical) - * @param scale Scale factor for the derivative size - */ - void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { - Mat kx, ky; - compute_derivative_kernels(kx, ky, xorder, yorder, scale); - sepFilter2D(src, dst, CV_32F, kx, ky); - } - - /* ************************************************************************* */ - /** - * @brief Compute derivative kernels for sizes different than 3 - * @param _kx Horizontal kernel ues - * @param _ky Vertical kernel values - * @param dx Derivative order in X-direction (horizontal) - * @param dy Derivative order in Y-direction (vertical) - * @param scale_ Scale factor or derivative size - */ - void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) { - - int ksize = 3 + 2 * (scale - 1); - - // The standard Scharr kernel - if (scale == 1) { - getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F); - return; - } - - _kx.create(ksize, 1, CV_32F, -1, true); - _ky.create(ksize, 1, CV_32F, -1, true); - Mat kx = _kx.getMat(); - Mat ky = _ky.getMat(); - - float w = 10.0f / 3.0f; - float norm = 1.0f / (2.0f*scale*(w + 2.0f)); - - for (int k = 0; k < 2; k++) { - Mat* kernel = k == 0 ? &kx : &ky; - int order = k == 0 ? dx : dy; - std::vector kerI(ksize, 0.0f); - - if (order == 0) { - kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm; - } - else if (order == 1) { - kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1; - } - - Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); - temp.copyTo(*kernel); - } - } - - class Nld_Step_Scalar_Invoker : public cv::ParallelLoopBody - { - public: - Nld_Step_Scalar_Invoker(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float _stepsize) - : _Ld(&Ld) - , _c(&c) - , _Lstep(&Lstep) - , stepsize(_stepsize) - { - } - - virtual ~Nld_Step_Scalar_Invoker() - { - - } - - void operator()(const cv::Range& range) const - { - cv::Mat& Ld = *_Ld; - const cv::Mat& c = *_c; - cv::Mat& Lstep = *_Lstep; - - for (int i = range.start; i < range.end; i++) - { - const float *c_prev = c.ptr(i - 1); - const float *c_curr = c.ptr(i); - const float *c_next = c.ptr(i + 1); - const float *ld_prev = Ld.ptr(i - 1); - const float *ld_curr = Ld.ptr(i); - const float *ld_next = Ld.ptr(i + 1); - - float *dst = Lstep.ptr(i); - - for (int j = 1; j < Lstep.cols - 1; j++) - { - float xpos = (c_curr[j] + c_curr[j+1])*(ld_curr[j+1] - ld_curr[j]); - float xneg = (c_curr[j-1] + c_curr[j]) *(ld_curr[j] - ld_curr[j-1]); - float ypos = (c_curr[j] + c_next[j]) *(ld_next[j] - ld_curr[j]); - float yneg = (c_prev[j] + c_curr[j]) *(ld_curr[j] - ld_prev[j]); - dst[j] = 0.5f*stepsize*(xpos - xneg + ypos - yneg); - } - } - } - private: - cv::Mat * _Ld; - const cv::Mat * _c; - cv::Mat * _Lstep; - float stepsize; - }; - - /* ************************************************************************* */ - /** - * @brief This function performs a scalar non-linear diffusion step - * @param Ld2 Output image in the evolution - * @param c Conductivity image - * @param Lstep Previous image in the evolution - * @param stepsize The step size in time units - * @note Forward Euler Scheme 3x3 stencil - * The function c is a scalar value that depends on the gradient norm - * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy - */ - void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) { - - cv::parallel_for_(cv::Range(1, Lstep.rows - 1), Nld_Step_Scalar_Invoker(Ld, c, Lstep, stepsize), (double)Ld.total()/(1 << 16)); - - float xneg, xpos, yneg, ypos; - float* dst = Lstep.ptr(0); - const float* cprv = NULL; - const float* ccur = c.ptr(0); - const float* cnxt = c.ptr(1); - const float* ldprv = NULL; - const float* ldcur = Ld.ptr(0); - const float* ldnxt = Ld.ptr(1); - for (int j = 1; j < Lstep.cols - 1; j++) { - xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]); - xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]); - ypos = (ccur[j] + cnxt[j]) * (ldnxt[j] - ldcur[j]); - dst[j] = 0.5f*stepsize*(xpos - xneg + ypos); - } - - dst = Lstep.ptr(Lstep.rows - 1); - ccur = c.ptr(Lstep.rows - 1); - cprv = c.ptr(Lstep.rows - 2); - ldcur = Ld.ptr(Lstep.rows - 1); - ldprv = Ld.ptr(Lstep.rows - 2); - - for (int j = 1; j < Lstep.cols - 1; j++) { - xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]); - xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]); - yneg = (cprv[j] + ccur[j]) * (ldcur[j] - ldprv[j]); - dst[j] = 0.5f*stepsize*(xpos - xneg - yneg); - } - - ccur = c.ptr(1); - ldcur = Ld.ptr(1); - cprv = c.ptr(0); - ldprv = Ld.ptr(0); - - int r0 = Lstep.cols - 1; - int r1 = Lstep.cols - 2; - - for (int i = 1; i < Lstep.rows - 1; i++) { - cnxt = c.ptr(i + 1); - ldnxt = Ld.ptr(i + 1); - dst = Lstep.ptr(i); - - xpos = (ccur[0] + ccur[1]) * (ldcur[1] - ldcur[0]); - ypos = (ccur[0] + cnxt[0]) * (ldnxt[0] - ldcur[0]); - yneg = (cprv[0] + ccur[0]) * (ldcur[0] - ldprv[0]); - dst[0] = 0.5f*stepsize*(xpos + ypos - yneg); - - xneg = (ccur[r1] + ccur[r0]) * (ldcur[r0] - ldcur[r1]); - ypos = (ccur[r0] + cnxt[r0]) * (ldnxt[r0] - ldcur[r0]); - yneg = (cprv[r0] + ccur[r0]) * (ldcur[r0] - ldprv[r0]); - dst[r0] = 0.5f*stepsize*(-xneg + ypos - yneg); - - cprv = ccur; - ccur = cnxt; - ldprv = ldcur; - ldcur = ldnxt; - } - Ld += Lstep; - } - - /* ************************************************************************* */ - /** - * @brief This function downsamples the input image using OpenCV resize - * @param img Input image to be downsampled - * @param dst Output image with half of the resolution of the input image - */ - void halfsample_image(const cv::Mat& src, cv::Mat& dst) { - - // Make sure the destination image is of the right size - CV_Assert(src.cols / 2 == dst.cols); - CV_Assert(src.rows / 2 == dst.rows); - resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); - } - - /* ************************************************************************* */ - /** - * @brief This function checks if a given pixel is a maximum in a local neighbourhood - * @param img Input image where we will perform the maximum search - * @param dsize Half size of the neighbourhood - * @param value Response value at (x,y) position - * @param row Image row coordinate - * @param col Image column coordinate - * @param same_img Flag to indicate if the image value at (x,y) is in the input image - * @return 1->is maximum, 0->otherwise - */ - bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img) { - - bool response = true; - - for (int i = row - dsize; i <= row + dsize; i++) { - for (int j = col - dsize; j <= col + dsize; j++) { - if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) { - if (same_img == true) { - if (i != row || j != col) { - if ((*(img.ptr(i)+j)) > value) { - response = false; - return response; - } - } - } - else { - if ((*(img.ptr(i)+j)) > value) { - response = false; - return response; - } - } - } - } - } - - return response; - } +namespace cv +{ +using namespace std; + +/* ************************************************************************* */ +/** + * @brief This function smoothes an image with a Gaussian kernel + * @param src Input image + * @param dst Output image + * @param ksize_x Kernel size in X-direction (horizontal) + * @param ksize_y Kernel size in Y-direction (vertical) + * @param sigma Kernel standard deviation + */ +void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) { + + int ksize_x_ = 0, ksize_y_ = 0; + + // Compute an appropriate kernel size according to the specified sigma + if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { + ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f))); + ksize_y_ = ksize_x_; + } + + // The kernel size must be and odd number + if ((ksize_x_ % 2) == 0) { + ksize_x_ += 1; + } + + if ((ksize_y_ % 2) == 0) { + ksize_y_ += 1; + } + + // Perform the Gaussian Smoothing with border replication + GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE); +} + +/* ************************************************************************* */ +/** + * @brief This function computes image derivatives with Scharr kernel + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @note Scharr operator approximates better rotation invariance than + * other stencils such as Sobel. See Weickert and Scharr, + * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, + * Journal of Visual Communication and Image Representation 2002 + */ +void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) { + Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); +} + +/* ************************************************************************* */ +/** + * @brief This function computes the Perona and Malik conductivity coefficient g1 + * g1 = exp(-|dL|^2/k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ +void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + + Size sz = Lx.size(); + float inv_k = 1.0f / (k*k); + for (int y = 0; y < sz.height; y++) { + + const float* Lx_row = Lx.ptr(y); + const float* Ly_row = Ly.ptr(y); + float* dst_row = dst.ptr(y); + + for (int x = 0; x < sz.width; x++) { + dst_row[x] = (-inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x])); + } + } + + exp(dst, dst); +} + +/* ************************************************************************* */ +/** + * @brief This function computes the Perona and Malik conductivity coefficient g2 + * g2 = 1 / (1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ +void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + + Size sz = Lx.size(); + dst.create(sz, Lx.type()); + float k2inv = 1.0f / (k * k); + + for(int y = 0; y < sz.height; y++) { + const float *Lx_row = Lx.ptr(y); + const float *Ly_row = Ly.ptr(y); + float* dst_row = dst.ptr(y); + for(int x = 0; x < sz.width; x++) { + dst_row[x] = 1.0f / (1.0f + ((Lx_row[x] * Lx_row[x] + Ly_row[x] * Ly_row[x]) * k2inv)); } } } +/* ************************************************************************* */ +/** + * @brief This function computes Weickert conductivity coefficient gw + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ +void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + + Size sz = Lx.size(); + float inv_k = 1.0f / (k*k); + for (int y = 0; y < sz.height; y++) { + + const float* Lx_row = Lx.ptr(y); + const float* Ly_row = Ly.ptr(y); + float* dst_row = dst.ptr(y); + + for (int x = 0; x < sz.width; x++) { + float dL = inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]); + dst_row[x] = -3.315f/(dL*dL*dL*dL); + } + } + + exp(dst, dst); + dst = 1.0 - dst; +} + + +/* ************************************************************************* */ +/** +* @brief This function computes Charbonnier conductivity coefficient gc +* gc = 1 / sqrt(1 + dL^2 / k^2) +* @param Lx First order image derivative in X-direction (horizontal) +* @param Ly First order image derivative in Y-direction (vertical) +* @param dst Output image +* @param k Contrast factor parameter +* @note For more information check the following paper: J. Weickert +* Applications of nonlinear diffusion in image processing and computer vision, +* Proceedings of Algorithmy 2000 +*/ +void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + + Size sz = Lx.size(); + float inv_k = 1.0f / (k*k); + for (int y = 0; y < sz.height; y++) { + + const float* Lx_row = Lx.ptr(y); + const float* Ly_row = Ly.ptr(y); + float* dst_row = dst.ptr(y); + + for (int x = 0; x < sz.width; x++) { + float den = sqrt(1.0f+inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x])); + dst_row[x] = 1.0f / den; + } + } +} + + +/* ************************************************************************* */ +/** + * @brief This function computes a good empirical value for the k contrast factor + * given an input image, the percentile (0-1), the gradient scale and the number of + * bins in the histogram + * @param img Input image + * @param perc Percentile of the image gradient histogram (0-1) + * @param gscale Scale for computing the image gradient histogram + * @param nbins Number of histogram bins + * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel + * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel + * @return k contrast factor + */ +float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) { + + int nbin = 0, nelements = 0, nthreshold = 0, k = 0; + float kperc = 0.0, modg = 0.0; + float npoints = 0.0; + float hmax = 0.0; + + // Create the array for the histogram + std::vector hist(nbins, 0); + + // Create the matrices + Mat gaussian = Mat::zeros(img.rows, img.cols, CV_32F); + Mat Lx = Mat::zeros(img.rows, img.cols, CV_32F); + Mat Ly = Mat::zeros(img.rows, img.cols, CV_32F); + + // Perform the Gaussian convolution + gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); + + // Compute the Gaussian derivatives Lx and Ly + Scharr(gaussian, Lx, CV_32F, 1, 0, 1, 0, cv::BORDER_DEFAULT); + Scharr(gaussian, Ly, CV_32F, 0, 1, 1, 0, cv::BORDER_DEFAULT); + + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + const float *lx = Lx.ptr(i); + const float *ly = Ly.ptr(i); + for (int j = 1; j < gaussian.cols - 1; j++) { + modg = lx[j]*lx[j] + ly[j]*ly[j]; + + // Get the maximum + if (modg > hmax) { + hmax = modg; + } + } + } + hmax = sqrt(hmax); + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + const float *lx = Lx.ptr(i); + const float *ly = Ly.ptr(i); + for (int j = 1; j < gaussian.cols - 1; j++) { + modg = lx[j]*lx[j] + ly[j]*ly[j]; + + // Find the correspondent bin + if (modg != 0.0) { + nbin = (int)floor(nbins*(sqrt(modg) / hmax)); + + if (nbin == nbins) { + nbin--; + } + + hist[nbin]++; + npoints++; + } + } + } + + // Now find the perc of the histogram percentile + nthreshold = (int)(npoints*perc); + + for (k = 0; nelements < nthreshold && k < nbins; k++) { + nelements = nelements + hist[k]; + } + + if (nelements < nthreshold) { + kperc = 0.03f; + } + else { + kperc = hmax*((float)(k) / (float)nbins); + } + + return kperc; +} + +/* ************************************************************************* */ +/** + * @brief This function computes Scharr image derivatives + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @param scale Scale factor for the derivative size + */ +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { + Mat kx, ky; + compute_derivative_kernels(kx, ky, xorder, yorder, scale); + sepFilter2D(src, dst, CV_32F, kx, ky); +} + +/* ************************************************************************* */ +/** + * @brief Compute derivative kernels for sizes different than 3 + * @param _kx Horizontal kernel ues + * @param _ky Vertical kernel values + * @param dx Derivative order in X-direction (horizontal) + * @param dy Derivative order in Y-direction (vertical) + * @param scale_ Scale factor or derivative size + */ +void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) { + + int ksize = 3 + 2 * (scale - 1); + + // The standard Scharr kernel + if (scale == 1) { + getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F); + return; + } + + _kx.create(ksize, 1, CV_32F, -1, true); + _ky.create(ksize, 1, CV_32F, -1, true); + Mat kx = _kx.getMat(); + Mat ky = _ky.getMat(); + + float w = 10.0f / 3.0f; + float norm = 1.0f / (2.0f*scale*(w + 2.0f)); + + for (int k = 0; k < 2; k++) { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + std::vector kerI(ksize, 0.0f); + + if (order == 0) { + kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm; + } + else if (order == 1) { + kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1; + } + + Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); + temp.copyTo(*kernel); + } +} + +class Nld_Step_Scalar_Invoker : public cv::ParallelLoopBody +{ +public: + Nld_Step_Scalar_Invoker(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float _stepsize) + : _Ld(&Ld) + , _c(&c) + , _Lstep(&Lstep) + , stepsize(_stepsize) + { + } + + virtual ~Nld_Step_Scalar_Invoker() + { + + } + + void operator()(const cv::Range& range) const + { + cv::Mat& Ld = *_Ld; + const cv::Mat& c = *_c; + cv::Mat& Lstep = *_Lstep; + + for (int i = range.start; i < range.end; i++) + { + const float *c_prev = c.ptr(i - 1); + const float *c_curr = c.ptr(i); + const float *c_next = c.ptr(i + 1); + const float *ld_prev = Ld.ptr(i - 1); + const float *ld_curr = Ld.ptr(i); + const float *ld_next = Ld.ptr(i + 1); + + float *dst = Lstep.ptr(i); + + for (int j = 1; j < Lstep.cols - 1; j++) + { + float xpos = (c_curr[j] + c_curr[j+1])*(ld_curr[j+1] - ld_curr[j]); + float xneg = (c_curr[j-1] + c_curr[j]) *(ld_curr[j] - ld_curr[j-1]); + float ypos = (c_curr[j] + c_next[j]) *(ld_next[j] - ld_curr[j]); + float yneg = (c_prev[j] + c_curr[j]) *(ld_curr[j] - ld_prev[j]); + dst[j] = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } + } + } +private: + cv::Mat * _Ld; + const cv::Mat * _c; + cv::Mat * _Lstep; + float stepsize; +}; + +/* ************************************************************************* */ +/** +* @brief This function performs a scalar non-linear diffusion step +* @param Ld2 Output image in the evolution +* @param c Conductivity image +* @param Lstep Previous image in the evolution +* @param stepsize The step size in time units +* @note Forward Euler Scheme 3x3 stencil +* The function c is a scalar value that depends on the gradient norm +* dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy +*/ +void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) { + + cv::parallel_for_(cv::Range(1, Lstep.rows - 1), Nld_Step_Scalar_Invoker(Ld, c, Lstep, stepsize), (double)Ld.total()/(1 << 16)); + + float xneg, xpos, yneg, ypos; + float* dst = Lstep.ptr(0); + const float* cprv = NULL; + const float* ccur = c.ptr(0); + const float* cnxt = c.ptr(1); + const float* ldprv = NULL; + const float* ldcur = Ld.ptr(0); + const float* ldnxt = Ld.ptr(1); + for (int j = 1; j < Lstep.cols - 1; j++) { + xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]); + xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]); + ypos = (ccur[j] + cnxt[j]) * (ldnxt[j] - ldcur[j]); + dst[j] = 0.5f*stepsize*(xpos - xneg + ypos); + } + + dst = Lstep.ptr(Lstep.rows - 1); + ccur = c.ptr(Lstep.rows - 1); + cprv = c.ptr(Lstep.rows - 2); + ldcur = Ld.ptr(Lstep.rows - 1); + ldprv = Ld.ptr(Lstep.rows - 2); + + for (int j = 1; j < Lstep.cols - 1; j++) { + xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]); + xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]); + yneg = (cprv[j] + ccur[j]) * (ldcur[j] - ldprv[j]); + dst[j] = 0.5f*stepsize*(xpos - xneg - yneg); + } + + ccur = c.ptr(1); + ldcur = Ld.ptr(1); + cprv = c.ptr(0); + ldprv = Ld.ptr(0); + + int r0 = Lstep.cols - 1; + int r1 = Lstep.cols - 2; + + for (int i = 1; i < Lstep.rows - 1; i++) { + cnxt = c.ptr(i + 1); + ldnxt = Ld.ptr(i + 1); + dst = Lstep.ptr(i); + + xpos = (ccur[0] + ccur[1]) * (ldcur[1] - ldcur[0]); + ypos = (ccur[0] + cnxt[0]) * (ldnxt[0] - ldcur[0]); + yneg = (cprv[0] + ccur[0]) * (ldcur[0] - ldprv[0]); + dst[0] = 0.5f*stepsize*(xpos + ypos - yneg); + + xneg = (ccur[r1] + ccur[r0]) * (ldcur[r0] - ldcur[r1]); + ypos = (ccur[r0] + cnxt[r0]) * (ldnxt[r0] - ldcur[r0]); + yneg = (cprv[r0] + ccur[r0]) * (ldcur[r0] - ldprv[r0]); + dst[r0] = 0.5f*stepsize*(-xneg + ypos - yneg); + + cprv = ccur; + ccur = cnxt; + ldprv = ldcur; + ldcur = ldnxt; + } + Ld += Lstep; +} + +/* ************************************************************************* */ +/** +* @brief This function downsamples the input image using OpenCV resize +* @param img Input image to be downsampled +* @param dst Output image with half of the resolution of the input image +*/ +void halfsample_image(const cv::Mat& src, cv::Mat& dst) { + + // Make sure the destination image is of the right size + CV_Assert(src.cols / 2 == dst.cols); + CV_Assert(src.rows / 2 == dst.rows); + resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); +} + +/* ************************************************************************* */ +/** + * @brief This function checks if a given pixel is a maximum in a local neighbourhood + * @param img Input image where we will perform the maximum search + * @param dsize Half size of the neighbourhood + * @param value Response value at (x,y) position + * @param row Image row coordinate + * @param col Image column coordinate + * @param same_img Flag to indicate if the image value at (x,y) is in the input image + * @return 1->is maximum, 0->otherwise + */ +bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img) { + + bool response = true; + + for (int i = row - dsize; i <= row + dsize; i++) { + for (int j = col - dsize; j <= col + dsize; j++) { + if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) { + if (same_img == true) { + if (i != row || j != col) { + if ((*(img.ptr(i)+j)) > value) { + response = false; + return response; + } + } + } + else { + if ((*(img.ptr(i)+j)) > value) { + response = false; + return response; + } + } + } + } + } + + return response; +} + +} diff --git a/modules/features2d/src/kaze/nldiffusion_functions.h b/modules/features2d/src/kaze/nldiffusion_functions.h index 6665e5427..9dc9fed19 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.h +++ b/modules/features2d/src/kaze/nldiffusion_functions.h @@ -11,43 +11,37 @@ #ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__ #define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__ -/* ************************************************************************* */ -// Includes -#include "../precomp.hpp" - /* ************************************************************************* */ // Declaration of functions -namespace cv { - namespace details { - namespace kaze { +namespace cv +{ - // Gaussian 2D convolution - void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma); +// Gaussian 2D convolution +void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma); - // Diffusivity functions - void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); - void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); - void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); - void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); +// Diffusivity functions +void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); +void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); +void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); +void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); - float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); +float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); - // Image derivatives - void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale); - void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale); - void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder); +// Image derivatives +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale); +void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale); +void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder); - // Nonlinear diffusion filtering scalar step - void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); +// Nonlinear diffusion filtering scalar step +void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); - // For non-maxima suppresion - bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); +// For non-maxima suppresion +bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); + +// Image downsampling +void halfsample_image(const cv::Mat& src, cv::Mat& dst); - // Image downsampling - void halfsample_image(const cv::Mat& src, cv::Mat& dst); - } - } } #endif diff --git a/modules/features2d/src/mser.cpp b/modules/features2d/src/mser.cpp index 40c32d0a9..0deaa26f3 100644 --- a/modules/features2d/src/mser.cpp +++ b/modules/features2d/src/mser.cpp @@ -46,638 +46,564 @@ namespace cv { -const int TABLE_SIZE = 400; +using std::vector; -static double chitab3[]={0, 0.0150057, 0.0239478, 0.0315227, - 0.0383427, 0.0446605, 0.0506115, 0.0562786, - 0.0617174, 0.0669672, 0.0720573, 0.0770099, - 0.081843, 0.0865705, 0.0912043, 0.0957541, - 0.100228, 0.104633, 0.108976, 0.113261, - 0.117493, 0.121676, 0.125814, 0.12991, - 0.133967, 0.137987, 0.141974, 0.145929, - 0.149853, 0.15375, 0.15762, 0.161466, - 0.165287, 0.169087, 0.172866, 0.176625, - 0.180365, 0.184088, 0.187794, 0.191483, - 0.195158, 0.198819, 0.202466, 0.2061, - 0.209722, 0.213332, 0.216932, 0.220521, - 0.2241, 0.22767, 0.231231, 0.234783, - 0.238328, 0.241865, 0.245395, 0.248918, - 0.252435, 0.255947, 0.259452, 0.262952, - 0.266448, 0.269939, 0.273425, 0.276908, - 0.280386, 0.283862, 0.287334, 0.290803, - 0.29427, 0.297734, 0.301197, 0.304657, - 0.308115, 0.311573, 0.315028, 0.318483, - 0.321937, 0.32539, 0.328843, 0.332296, - 0.335749, 0.339201, 0.342654, 0.346108, - 0.349562, 0.353017, 0.356473, 0.35993, - 0.363389, 0.366849, 0.37031, 0.373774, - 0.377239, 0.380706, 0.384176, 0.387648, - 0.391123, 0.3946, 0.39808, 0.401563, - 0.405049, 0.408539, 0.412032, 0.415528, - 0.419028, 0.422531, 0.426039, 0.429551, - 0.433066, 0.436586, 0.440111, 0.44364, - 0.447173, 0.450712, 0.454255, 0.457803, - 0.461356, 0.464915, 0.468479, 0.472049, - 0.475624, 0.479205, 0.482792, 0.486384, - 0.489983, 0.493588, 0.4972, 0.500818, - 0.504442, 0.508073, 0.511711, 0.515356, - 0.519008, 0.522667, 0.526334, 0.530008, - 0.533689, 0.537378, 0.541075, 0.54478, - 0.548492, 0.552213, 0.555942, 0.55968, - 0.563425, 0.56718, 0.570943, 0.574715, - 0.578497, 0.582287, 0.586086, 0.589895, - 0.593713, 0.597541, 0.601379, 0.605227, - 0.609084, 0.612952, 0.61683, 0.620718, - 0.624617, 0.628526, 0.632447, 0.636378, - 0.64032, 0.644274, 0.648239, 0.652215, - 0.656203, 0.660203, 0.664215, 0.668238, - 0.672274, 0.676323, 0.680384, 0.684457, - 0.688543, 0.692643, 0.696755, 0.700881, - 0.70502, 0.709172, 0.713339, 0.717519, - 0.721714, 0.725922, 0.730145, 0.734383, - 0.738636, 0.742903, 0.747185, 0.751483, - 0.755796, 0.760125, 0.76447, 0.768831, - 0.773208, 0.777601, 0.782011, 0.786438, - 0.790882, 0.795343, 0.799821, 0.804318, - 0.808831, 0.813363, 0.817913, 0.822482, - 0.827069, 0.831676, 0.836301, 0.840946, - 0.84561, 0.850295, 0.854999, 0.859724, - 0.864469, 0.869235, 0.874022, 0.878831, - 0.883661, 0.888513, 0.893387, 0.898284, - 0.903204, 0.908146, 0.913112, 0.918101, - 0.923114, 0.928152, 0.933214, 0.938301, - 0.943413, 0.94855, 0.953713, 0.958903, - 0.964119, 0.969361, 0.974631, 0.979929, - 0.985254, 0.990608, 0.99599, 1.0014, - 1.00684, 1.01231, 1.01781, 1.02335, - 1.02891, 1.0345, 1.04013, 1.04579, - 1.05148, 1.05721, 1.06296, 1.06876, - 1.07459, 1.08045, 1.08635, 1.09228, - 1.09826, 1.10427, 1.11032, 1.1164, - 1.12253, 1.1287, 1.1349, 1.14115, - 1.14744, 1.15377, 1.16015, 1.16656, - 1.17303, 1.17954, 1.18609, 1.19269, - 1.19934, 1.20603, 1.21278, 1.21958, - 1.22642, 1.23332, 1.24027, 1.24727, - 1.25433, 1.26144, 1.26861, 1.27584, - 1.28312, 1.29047, 1.29787, 1.30534, - 1.31287, 1.32046, 1.32812, 1.33585, - 1.34364, 1.3515, 1.35943, 1.36744, - 1.37551, 1.38367, 1.39189, 1.4002, - 1.40859, 1.41705, 1.42561, 1.43424, - 1.44296, 1.45177, 1.46068, 1.46967, - 1.47876, 1.48795, 1.49723, 1.50662, - 1.51611, 1.52571, 1.53541, 1.54523, - 1.55517, 1.56522, 1.57539, 1.58568, - 1.59611, 1.60666, 1.61735, 1.62817, - 1.63914, 1.65025, 1.66152, 1.67293, - 1.68451, 1.69625, 1.70815, 1.72023, - 1.73249, 1.74494, 1.75757, 1.77041, - 1.78344, 1.79669, 1.81016, 1.82385, - 1.83777, 1.85194, 1.86635, 1.88103, - 1.89598, 1.91121, 1.92674, 1.94257, - 1.95871, 1.97519, 1.99201, 2.0092, - 2.02676, 2.04471, 2.06309, 2.08189, - 2.10115, 2.12089, 2.14114, 2.16192, - 2.18326, 2.2052, 2.22777, 2.25101, - 2.27496, 2.29966, 2.32518, 2.35156, - 2.37886, 2.40717, 2.43655, 2.46709, - 2.49889, 2.53206, 2.56673, 2.60305, - 2.64117, 2.6813, 2.72367, 2.76854, - 2.81623, 2.86714, 2.92173, 2.98059, - 3.04446, 3.1143, 3.19135, 3.27731, - 3.37455, 3.48653, 3.61862, 3.77982, - 3.98692, 4.2776, 4.77167, 133.333 }; - -typedef struct LinkedPoint +class MSER_Impl : public MSER { - struct LinkedPoint* prev; - struct LinkedPoint* next; - Point pt; -} -LinkedPoint; - -// the history of region grown -typedef struct MSERGrowHistory -{ - struct MSERGrowHistory* shortcut; - struct MSERGrowHistory* child; - int stable; // when it ever stabled before, record the size - int val; - int size; -} -MSERGrowHistory; - -typedef struct MSERConnectedComp -{ - LinkedPoint* head; - LinkedPoint* tail; - MSERGrowHistory* history; - unsigned long grey_level; - int size; - int dvar; // the derivative of last var - float var; // the current variation (most time is the variation of one-step back) -} -MSERConnectedComp; - -// Linear Time MSER claims by using bsf can get performance gain, here is the implementation -// however it seems that will not do any good in real world test -inline void _bitset(unsigned long * a, unsigned long b) -{ - *a |= 1<size = 0; - comp->var = 0; - comp->dvar = 1; - comp->history = NULL; -} - -// add history of size to a connected component -static void -MSERNewHistory( MSERConnectedComp* comp, MSERGrowHistory* history ) -{ - history->child = history; - if ( NULL == comp->history ) +public: + struct Params { - history->shortcut = history; - history->stable = 0; - } else { - comp->history->child = history; - history->shortcut = comp->history->shortcut; - history->stable = comp->history->stable; - } - history->val = comp->grey_level; - history->size = comp->size; - comp->history = history; -} - -// merging two connected component -static void -MSERMergeComp( MSERConnectedComp* comp1, - MSERConnectedComp* comp2, - MSERConnectedComp* comp, - MSERGrowHistory* history ) -{ - LinkedPoint* head; - LinkedPoint* tail; - comp->grey_level = comp2->grey_level; - history->child = history; - // select the winner by size - if ( comp1->size >= comp2->size ) - { - if ( NULL == comp1->history ) + explicit Params( int _delta=5, double _maxVariation=0.25, + int _minArea=60, int _maxArea=14400, + double _minDiversity=.2, int _maxEvolution=200, + double _areaThreshold=1.01, + double _minMargin=0.003, int _edgeBlurSize=5 ) { - history->shortcut = history; - history->stable = 0; - } else { - comp1->history->child = history; - history->shortcut = comp1->history->shortcut; - history->stable = comp1->history->stable; + delta = _delta; + minArea = _minArea; + maxArea = _maxArea; + maxVariation = _maxVariation; + minDiversity = _minDiversity; + pass2Only = false; + maxEvolution = _maxEvolution; + areaThreshold = _areaThreshold; + minMargin = _minMargin; + edgeBlurSize = _edgeBlurSize; } - if ( NULL != comp2->history && comp2->history->stable > history->stable ) - history->stable = comp2->history->stable; - history->val = comp1->grey_level; - history->size = comp1->size; - // put comp1 to history - comp->var = comp1->var; - comp->dvar = comp1->dvar; - if ( comp1->size > 0 && comp2->size > 0 ) + int delta; + int minArea; + int maxArea; + double maxVariation; + double minDiversity; + bool pass2Only; + + int maxEvolution; + double areaThreshold; + double minMargin; + int edgeBlurSize; + }; + + explicit MSER_Impl(const Params& _params) : params(_params) {} + + virtual ~MSER_Impl() {} + + void set(int propId, double value) + { + if( propId == DELTA ) + params.delta = cvRound(value); + else if( propId == MIN_AREA ) + params.minArea = cvRound(value); + else if( propId == MAX_AREA ) + params.maxArea = cvRound(value); + else if( propId == PASS2_ONLY ) + params.pass2Only = value != 0; + else + CV_Error(CV_StsBadArg, "Unknown parameter id"); + } + + double get(int propId) const + { + double value = 0; + if( propId == DELTA ) + value = params.delta; + else if( propId == MIN_AREA ) + value = params.minArea; + else if( propId == MAX_AREA ) + value = params.maxArea; + else if( propId == PASS2_ONLY ) + value = params.pass2Only; + else + CV_Error(CV_StsBadArg, "Unknown parameter id"); + return value; + } + + enum { DIR_SHIFT = 29, NEXT_MASK = ((1<tail->next = comp2->head; - comp2->head->prev = comp1->tail; + return imgptr0[this - ptr0] ^ mask; } - head = ( comp1->size > 0 ) ? comp1->head : comp2->head; - tail = ( comp2->size > 0 ) ? comp2->tail : comp1->tail; - // always made the newly added in the last of the pixel list (comp1 ... comp2) - } else { - if ( NULL == comp2->history ) + int getNext() const { return (val & NEXT_MASK); } + void setNext(int next) { val = (val & ~NEXT_MASK) | next; } + + int getDir() const { return (int)((unsigned)val >> DIR_SHIFT); } + void setDir(int dir) { val = (val & NEXT_MASK) | (dir << DIR_SHIFT); } + bool isVisited() const { return (val & ~NEXT_MASK) != 0; } + + int val; + }; + typedef int PPixel; + + // the history of region grown + struct CompHistory + { + CompHistory() { shortcut = child = 0; stable = val = size = 0; } + CompHistory* shortcut; + CompHistory* child; + int stable; // when it ever stabled before, record the size + int val; + int size; + }; + + struct ConnectedComp + { + ConnectedComp() { - history->shortcut = history; - history->stable = 0; - } else { - comp2->history->child = history; - history->shortcut = comp2->history->shortcut; - history->stable = comp2->history->stable; + init(0); } - if ( NULL != comp1->history && comp1->history->stable > history->stable ) - history->stable = comp1->history->stable; - history->val = comp2->grey_level; - history->size = comp2->size; - // put comp2 to history - comp->var = comp2->var; - comp->dvar = comp2->dvar; - if ( comp1->size > 0 && comp2->size > 0 ) + + void init(int gray) { - comp2->tail->next = comp1->head; - comp1->head->prev = comp2->tail; + head = tail = 0; + history = 0; + size = 0; + grey_level = gray; + dvar = false; + var = 0; } - head = ( comp2->size > 0 ) ? comp2->head : comp1->head; - tail = ( comp1->size > 0 ) ? comp1->tail : comp2->tail; - // always made the newly added in the last of the pixel list (comp2 ... comp1) - } - comp->head = head; - comp->tail = tail; - comp->history = history; - comp->size = comp1->size + comp2->size; -} -static float -MSERVariationCalc( MSERConnectedComp* comp, int delta ) -{ - MSERGrowHistory* history = comp->history; - int val = comp->grey_level; - if ( NULL != history ) - { - MSERGrowHistory* shortcut = history->shortcut; - while ( shortcut != shortcut->shortcut && shortcut->val + delta > val ) - shortcut = shortcut->shortcut; - MSERGrowHistory* child = shortcut->child; - while ( child != child->child && child->val + delta <= val ) + // add history chunk to a connected component + void growHistory( CompHistory* h ) { - shortcut = child; - child = child->child; - } - // get the position of history where the shortcut->val <= delta+val and shortcut->child->val >= delta+val - history->shortcut = shortcut; - return (float)(comp->size-shortcut->size)/(float)shortcut->size; - // here is a small modification of MSER where cal ||R_{i}-R_{i-delta}||/||R_{i-delta}|| - // in standard MSER, cal ||R_{i+delta}-R_{i-delta}||/||R_{i}|| - // my calculation is simpler and much easier to implement - } - return 1.; -} - -static bool MSERStableCheck( MSERConnectedComp* comp, MSERParams params ) -{ - // tricky part: it actually check the stablity of one-step back - if ( comp->history == NULL || comp->history->size <= params.minArea || comp->history->size >= params.maxArea ) - return 0; - float div = (float)(comp->history->size-comp->history->stable)/(float)comp->history->size; - float var = MSERVariationCalc( comp, params.delta ); - int dvar = ( comp->var < var || (unsigned long)(comp->history->val + 1) < comp->grey_level ); - int stable = ( dvar && !comp->dvar && comp->var < params.maxVariation && div > params.minDiversity ); - comp->var = var; - comp->dvar = dvar; - if ( stable ) - comp->history->stable = comp->history->size; - return stable != 0; -} - -// add a pixel to the pixel list -static void accumulateMSERComp( MSERConnectedComp* comp, LinkedPoint* point ) -{ - if ( comp->size > 0 ) - { - point->prev = comp->tail; - comp->tail->next = point; - point->next = NULL; - } else { - point->prev = NULL; - point->next = NULL; - comp->head = point; - } - comp->tail = point; - comp->size++; -} - -// convert the point set to CvSeq -static CvContour* MSERToContour( MSERConnectedComp* comp, CvMemStorage* storage ) -{ - CvSeq* _contour = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage ); - CvContour* contour = (CvContour*)_contour; - cvSeqPushMulti( _contour, 0, comp->history->size ); - LinkedPoint* lpt = comp->head; - for ( int i = 0; i < comp->history->size; i++ ) - { - CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, _contour, i ); - pt->x = lpt->pt.x; - pt->y = lpt->pt.y; - lpt = lpt->next; - } - cvBoundingRect( contour ); - return contour; -} - -// to preprocess src image to following format -// 32-bit image -// > 0 is available, < 0 is visited -// 17~19 bits is the direction -// 8~11 bits is the bucket it falls to (for BitScanForward) -// 0~8 bits is the color -static int* preprocessMSER_8UC1( CvMat* img, - int*** heap_cur, - CvMat* src, - CvMat* mask ) -{ - int srccpt = src->step-src->cols; - int cpt_1 = img->cols-src->cols-1; - int* imgptr = img->data.i; - int* startptr; - - int level_size[256]; - for ( int i = 0; i < 256; i++ ) - level_size[i] = 0; - - for ( int i = 0; i < src->cols+2; i++ ) - { - *imgptr = -1; - imgptr++; - } - imgptr += cpt_1-1; - uchar* srcptr = src->data.ptr; - if ( mask ) - { - startptr = 0; - uchar* maskptr = mask->data.ptr; - for ( int i = 0; i < src->rows; i++ ) - { - *imgptr = -1; - imgptr++; - for ( int j = 0; j < src->cols; j++ ) + h->child = h; + if( !history ) { - if ( *maskptr ) + h->shortcut = h; + h->stable = 0; + } + else + { + history->child = h; + h->shortcut = history->shortcut; + h->stable = history->stable; + } + h->val = grey_level; + h->size = size; + history = h; + } + + // merging two connected components + static void + merge( const ConnectedComp* comp1, + const ConnectedComp* comp2, + ConnectedComp* comp, + CompHistory* h, + Pixel* pix0 ) + { + comp->grey_level = comp2->grey_level; + h->child = h; + // select the winner by size + if ( comp1->size < comp2->size ) + std::swap(comp1, comp2); + + if( !comp1->history ) + { + h->shortcut = h; + h->stable = 0; + } + else + { + comp1->history->child = h; + h->shortcut = comp1->history->shortcut; + h->stable = comp1->history->stable; + } + if( comp2->history && comp2->history->stable > h->stable ) + h->stable = comp2->history->stable; + h->val = comp1->grey_level; + h->size = comp1->size; + // put comp1 to history + comp->var = comp1->var; + comp->dvar = comp1->dvar; + if( comp1->size > 0 && comp2->size > 0 ) + pix0[comp1->tail].setNext(comp2->head); + PPixel head = comp1->size > 0 ? comp1->head : comp2->head; + PPixel tail = comp2->size > 0 ? comp2->tail : comp1->tail; + // always made the newly added in the last of the pixel list (comp1 ... comp2) + comp->head = head; + comp->tail = tail; + comp->history = h; + comp->size = comp1->size + comp2->size; + } + + float calcVariation( int delta ) const + { + if( !history ) + return 1.f; + int val = grey_level; + CompHistory* shortcut = history->shortcut; + while( shortcut != shortcut->shortcut && shortcut->val + delta > val ) + shortcut = shortcut->shortcut; + CompHistory* child = shortcut->child; + while( child != child->child && child->val + delta <= val ) + { + shortcut = child; + child = child->child; + } + // get the position of history where the shortcut->val <= delta+val and shortcut->child->val >= delta+val + history->shortcut = shortcut; + return (float)(size - shortcut->size)/(float)shortcut->size; + // here is a small modification of MSER where cal ||R_{i}-R_{i-delta}||/||R_{i-delta}|| + // in standard MSER, cal ||R_{i+delta}-R_{i-delta}||/||R_{i}|| + // my calculation is simpler and much easier to implement + } + + bool isStable(const Params& p) + { + // tricky part: it actually check the stablity of one-step back + if( !history || history->size <= p.minArea || history->size >= p.maxArea ) + return false; + float div = (float)(history->size - history->stable)/(float)history->size; + float _var = calcVariation( p.delta ); + bool _dvar = (var < _var) || (history->val + 1 < grey_level); + bool stable = _dvar && !dvar && _var < p.maxVariation && div > p.minDiversity; + var = _var; + dvar = _dvar; + if( stable ) + history->stable = history->size; + return stable; + } + + // convert the point set to CvSeq + Rect label( Mat& labels, int lval, const Pixel* pix0, int step ) const + { + int* lptr = labels.ptr(); + int lstep = labels.step/sizeof(lptr[0]); + int xmin = INT_MAX, ymin = INT_MAX, xmax = INT_MIN, ymax = INT_MIN; + + for( PPixel pix = head; pix != 0; pix = pix0[pix].getNext() ) + { + int y = pix/step; + int x = pix - y*step; + + xmin = std::min(xmin, x); + xmax = std::max(xmax, x); + ymin = std::min(ymin, y); + ymax = std::max(ymax, y); + + lptr[lstep*y + x] = lval; + } + + return Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1); + } + + PPixel head; + PPixel tail; + CompHistory* history; + int grey_level; + int size; + float var; // the current variation (most time is the variation of one-step back) + bool dvar; // the derivative of last var + }; + + int detectAndLabel( InputArray _src, OutputArray _labels, OutputArray _bboxes ); + void detect( InputArray _src, vector& keypoints, InputArray _mask ); + + void preprocess1( const Mat& img, int* level_size ) + { + memset(level_size, 0, 256*sizeof(level_size[0])); + + int i, j, cols = img.cols, rows = img.rows; + int step = cols; + pixbuf.resize(step*rows); + heapbuf.resize(cols*rows + 256); + histbuf.resize(cols*rows); + Pixel borderpix; + borderpix.setDir(4); + + for( j = 0; j < step; j++ ) + { + pixbuf[j] = pixbuf[j + (rows-1)*step] = borderpix; + } + + for( i = 1; i < rows-1; i++ ) + { + const uchar* imgptr = img.ptr(i); + Pixel* pptr = &pixbuf[i*step]; + pptr[0] = pptr[cols-1] = borderpix; + for( j = 1; j < cols-1; j++ ) + { + int val = imgptr[j]; + level_size[val]++; + pptr[j].val = 0; + } + } + } + + void preprocess2( const Mat& img, int* level_size ) + { + int i; + + for( i = 0; i < 128; i++ ) + std::swap(level_size[i], level_size[255-i]); + + if( !params.pass2Only ) + { + int j, cols = img.cols, rows = img.rows; + int step = cols; + for( i = 1; i < rows-1; i++ ) + { + Pixel* pptr = &pixbuf[i*step + 1]; + for( j = 1; j < cols-1; j++ ) { - if ( !startptr ) - startptr = imgptr; - *srcptr = 0xff-*srcptr; - level_size[*srcptr]++; - *imgptr = ((*srcptr>>5)<<8)|(*srcptr); - } else { - *imgptr = -1; + pptr[j].val = 0; } - imgptr++; - srcptr++; - maskptr++; } - *imgptr = -1; - imgptr += cpt_1; - srcptr += srccpt; - maskptr += srccpt; - } - } else { - startptr = imgptr+img->cols+1; - for ( int i = 0; i < src->rows; i++ ) - { - *imgptr = -1; - imgptr++; - for ( int j = 0; j < src->cols; j++ ) - { - *srcptr = 0xff-*srcptr; - level_size[*srcptr]++; - *imgptr = ((*srcptr>>5)<<8)|(*srcptr); - imgptr++; - srcptr++; - } - *imgptr = -1; - imgptr += cpt_1; - srcptr += srccpt; } } - for ( int i = 0; i < src->cols+2; i++ ) - { - *imgptr = -1; - imgptr++; - } - heap_cur[0][0] = 0; - for ( int i = 1; i < 256; i++ ) + void pass( const Mat& img, Mat& labels, int& lval, vector& bboxvec, + Size size, const int* level_size, int mask ) { - heap_cur[i] = heap_cur[i-1]+level_size[i-1]+1; - heap_cur[i][0] = 0; - } - return startptr; -} + CompHistory* histptr = &histbuf[0]; + int step = size.width; + Pixel *ptr0 = &pixbuf[0], *ptr = &ptr0[step+1]; + const uchar* imgptr0 = img.ptr(); + Pixel** heap[256]; + ConnectedComp comp[257]; + ConnectedComp* comptr = &comp[0]; -static void extractMSER_8UC1_Pass( int* ioptr, - int* imgptr, - int*** heap_cur, - LinkedPoint* ptsptr, - MSERGrowHistory* histptr, - MSERConnectedComp* comptr, - int step, - int stepmask, - int stepgap, - MSERParams params, - int color, - CvSeq* contours, - CvMemStorage* storage ) -{ - comptr->grey_level = 256; - comptr++; - comptr->grey_level = (*imgptr)&0xff; - initMSERComp( comptr ); - *imgptr |= 0x80000000; - heap_cur += (*imgptr)&0xff; - int dir[] = { 1, step, -1, -step }; -#ifdef __INTRIN_ENABLED__ - unsigned long heapbit[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; - unsigned long* bit_cur = heapbit+(((*imgptr)&0x700)>>8); -#endif - for ( ; ; ) - { - // take tour of all the 4 directions - while ( ((*imgptr)&0x70000) < 0x40000 ) + heap[0] = &heapbuf[0]; + heap[0][0] = 0; + + for( int i = 1; i < 256; i++ ) { - // get the neighbor - int* imgptr_nbr = imgptr+dir[((*imgptr)&0x70000)>>16]; - if ( *imgptr_nbr >= 0 ) // if the neighbor is not visited yet + heap[i] = heap[i-1] + level_size[i-1] + 1; + heap[i][0] = 0; + } + + comptr->grey_level = 256; + comptr++; + comptr->grey_level = ptr->getGray(ptr0, imgptr0, mask); + ptr->setDir(1); + int dir[] = { 0, 1, step, -1, -step }; + for( ;; ) + { + int curr_gray = ptr->getGray(ptr0, imgptr0, mask); + int nbr_idx = ptr->getDir(); + // take tour of all the 4 directions + for( ; nbr_idx <= 4; nbr_idx++ ) { - *imgptr_nbr |= 0x80000000; // mark it as visited - if ( ((*imgptr_nbr)&0xff) < ((*imgptr)&0xff) ) + // get the neighbor + Pixel* ptr_nbr = ptr + dir[nbr_idx]; + if( !ptr_nbr->isVisited() ) { - // when the value of neighbor smaller than current - // push current to boundary heap and make the neighbor to be the current one - // create an empty comp - (*heap_cur)++; - **heap_cur = imgptr; - *imgptr += 0x10000; - heap_cur += ((*imgptr_nbr)&0xff)-((*imgptr)&0xff); -#ifdef __INTRIN_ENABLED__ - _bitset( bit_cur, (*imgptr)&0x1f ); - bit_cur += (((*imgptr_nbr)&0x700)-((*imgptr)&0x700))>>8; -#endif - imgptr = imgptr_nbr; - comptr++; - initMSERComp( comptr ); - comptr->grey_level = (*imgptr)&0xff; - continue; - } else { + // set dir=1, next=0 + ptr_nbr->val = 1 << DIR_SHIFT; + int nbr_gray = ptr_nbr->getGray(ptr0, imgptr0, mask); + if( nbr_gray < curr_gray ) + { + // when the value of neighbor smaller than current + // push current to boundary heap and make the neighbor to be the current one + // create an empty comp + *(++heap[curr_gray]) = ptr; + ptr->val = (nbr_idx+1) << DIR_SHIFT; + ptr = ptr_nbr; + comptr++; + comptr->init(nbr_gray); + curr_gray = nbr_gray; + nbr_idx = 0; + continue; + } // otherwise, push the neighbor to boundary heap - heap_cur[((*imgptr_nbr)&0xff)-((*imgptr)&0xff)]++; - *heap_cur[((*imgptr_nbr)&0xff)-((*imgptr)&0xff)] = imgptr_nbr; -#ifdef __INTRIN_ENABLED__ - _bitset( bit_cur+((((*imgptr_nbr)&0x700)-((*imgptr)&0x700))>>8), (*imgptr_nbr)&0x1f ); -#endif + *(++heap[nbr_gray]) = ptr_nbr; } } - *imgptr += 0x10000; - } - int imsk = (int)(imgptr-ioptr); - ptsptr->pt = cvPoint( imsk&stepmask, imsk>>stepgap ); - // get the current location - accumulateMSERComp( comptr, ptsptr ); - ptsptr++; - // get the next pixel from boundary heap - if ( **heap_cur ) - { - imgptr = **heap_cur; - (*heap_cur)--; -#ifdef __INTRIN_ENABLED__ - if ( !**heap_cur ) - _bitreset( bit_cur, (*imgptr)&0x1f ); -#endif - } else { -#ifdef __INTRIN_ENABLED__ - bool found_pixel = 0; - unsigned long pixel_val; - for ( int i = ((*imgptr)&0x700)>>8; i < 8; i++ ) + + // set dir = nbr_idx, next = 0 + ptr->val = nbr_idx << DIR_SHIFT; + int ptrofs = (int)(ptr - ptr0); + CV_Assert(ptrofs != 0); + + // add a pixel to the pixel list + if( comptr->tail ) + ptr0[comptr->tail].setNext(ptrofs); + else + comptr->head = ptrofs; + comptr->tail = ptrofs; + comptr->size++; + // get the next pixel from boundary heap + if( *heap[curr_gray] ) { - if ( _BitScanForward( &pixel_val, *bit_cur ) ) + ptr = *heap[curr_gray]; + heap[curr_gray]--; + } + else + { + for( curr_gray++; curr_gray < 256; curr_gray++ ) { - found_pixel = 1; - pixel_val += i<<5; - heap_cur += pixel_val-((*imgptr)&0xff); - break; + if( *heap[curr_gray] ) + break; } - bit_cur++; - } - if ( found_pixel ) -#else - heap_cur++; - unsigned long pixel_val = 0; - for ( unsigned long i = ((*imgptr)&0xff)+1; i < 256; i++ ) - { - if ( **heap_cur ) - { - pixel_val = i; + if( curr_gray >= 256 ) break; - } - heap_cur++; - } - if ( pixel_val ) -#endif - { - imgptr = **heap_cur; - (*heap_cur)--; -#ifdef __INTRIN_ENABLED__ - if ( !**heap_cur ) - _bitreset( bit_cur, pixel_val&0x1f ); -#endif - if ( pixel_val < comptr[-1].grey_level ) + + ptr = *heap[curr_gray]; + heap[curr_gray]--; + if( curr_gray < comptr[-1].grey_level ) { // check the stablity and push a new history, increase the grey level - if ( MSERStableCheck( comptr, params ) ) + if( comptr->isStable(params) ) { - CvContour* contour = MSERToContour( comptr, storage ); - contour->color = color; - cvSeqPush( contours, &contour ); + Rect box = comptr->label( labels, lval++, ptr0, step ); + bboxvec.push_back(box); } - MSERNewHistory( comptr, histptr ); - comptr[0].grey_level = pixel_val; - histptr++; - } else { + comptr->growHistory( histptr++ ); + comptr[0].grey_level = curr_gray; + } + else + { // keep merging top two comp in stack until the grey level >= pixel_val - for ( ; ; ) + for(;;) { comptr--; - MSERMergeComp( comptr+1, comptr, comptr, histptr ); - histptr++; - if ( pixel_val <= comptr[0].grey_level ) + ConnectedComp::merge(comptr+1, comptr, comptr, histptr++, ptr0); + if( curr_gray <= comptr[0].grey_level ) break; - if ( pixel_val < comptr[-1].grey_level ) + if( curr_gray < comptr[-1].grey_level ) { // check the stablity here otherwise it wouldn't be an ER - if ( MSERStableCheck( comptr, params ) ) + if( comptr->isStable(params) ) { - CvContour* contour = MSERToContour( comptr, storage ); - contour->color = color; - cvSeqPush( contours, &contour ); + Rect box = comptr->label( labels, lval++, ptr0, step ); + bboxvec.push_back(box); } - MSERNewHistory( comptr, histptr ); - comptr[0].grey_level = pixel_val; - histptr++; + comptr->growHistory( histptr++ ); + comptr[0].grey_level = curr_gray; break; } } } - } else - break; + } } } -} -static void extractMSER_8UC1( CvMat* src, - CvMat* mask, - CvSeq* contours, - CvMemStorage* storage, - MSERParams params ) + Mat tempsrc; + vector pixbuf; + vector heapbuf; + vector histbuf; + + Params params; +}; + +/* + +TODO: +the color MSER has not been completely refactored yet. We leave it mostly as-is, +with just enough changes to convert C structures to C++ ones and +add support for color images into MSER_Impl::detectAndLabel. +*/ + +const int TABLE_SIZE = 400; + +static const float chitab3[]= { - int step = 8; - int stepgap = 3; - while ( step < src->step+2 ) - { - step <<= 1; - stepgap++; - } - int stepmask = step-1; + 0.f, 0.0150057f, 0.0239478f, 0.0315227f, + 0.0383427f, 0.0446605f, 0.0506115f, 0.0562786f, + 0.0617174f, 0.0669672f, 0.0720573f, 0.0770099f, + 0.081843f, 0.0865705f, 0.0912043f, 0.0957541f, + 0.100228f, 0.104633f, 0.108976f, 0.113261f, + 0.117493f, 0.121676f, 0.125814f, 0.12991f, + 0.133967f, 0.137987f, 0.141974f, 0.145929f, + 0.149853f, 0.15375f, 0.15762f, 0.161466f, + 0.165287f, 0.169087f, 0.172866f, 0.176625f, + 0.180365f, 0.184088f, 0.187794f, 0.191483f, + 0.195158f, 0.198819f, 0.202466f, 0.2061f, + 0.209722f, 0.213332f, 0.216932f, 0.220521f, + 0.2241f, 0.22767f, 0.231231f, 0.234783f, + 0.238328f, 0.241865f, 0.245395f, 0.248918f, + 0.252435f, 0.255947f, 0.259452f, 0.262952f, + 0.266448f, 0.269939f, 0.273425f, 0.276908f, + 0.280386f, 0.283862f, 0.287334f, 0.290803f, + 0.29427f, 0.297734f, 0.301197f, 0.304657f, + 0.308115f, 0.311573f, 0.315028f, 0.318483f, + 0.321937f, 0.32539f, 0.328843f, 0.332296f, + 0.335749f, 0.339201f, 0.342654f, 0.346108f, + 0.349562f, 0.353017f, 0.356473f, 0.35993f, + 0.363389f, 0.366849f, 0.37031f, 0.373774f, + 0.377239f, 0.380706f, 0.384176f, 0.387648f, + 0.391123f, 0.3946f, 0.39808f, 0.401563f, + 0.405049f, 0.408539f, 0.412032f, 0.415528f, + 0.419028f, 0.422531f, 0.426039f, 0.429551f, + 0.433066f, 0.436586f, 0.440111f, 0.44364f, + 0.447173f, 0.450712f, 0.454255f, 0.457803f, + 0.461356f, 0.464915f, 0.468479f, 0.472049f, + 0.475624f, 0.479205f, 0.482792f, 0.486384f, + 0.489983f, 0.493588f, 0.4972f, 0.500818f, + 0.504442f, 0.508073f, 0.511711f, 0.515356f, + 0.519008f, 0.522667f, 0.526334f, 0.530008f, + 0.533689f, 0.537378f, 0.541075f, 0.54478f, + 0.548492f, 0.552213f, 0.555942f, 0.55968f, + 0.563425f, 0.56718f, 0.570943f, 0.574715f, + 0.578497f, 0.582287f, 0.586086f, 0.589895f, + 0.593713f, 0.597541f, 0.601379f, 0.605227f, + 0.609084f, 0.612952f, 0.61683f, 0.620718f, + 0.624617f, 0.628526f, 0.632447f, 0.636378f, + 0.64032f, 0.644274f, 0.648239f, 0.652215f, + 0.656203f, 0.660203f, 0.664215f, 0.668238f, + 0.672274f, 0.676323f, 0.680384f, 0.684457f, + 0.688543f, 0.692643f, 0.696755f, 0.700881f, + 0.70502f, 0.709172f, 0.713339f, 0.717519f, + 0.721714f, 0.725922f, 0.730145f, 0.734383f, + 0.738636f, 0.742903f, 0.747185f, 0.751483f, + 0.755796f, 0.760125f, 0.76447f, 0.768831f, + 0.773208f, 0.777601f, 0.782011f, 0.786438f, + 0.790882f, 0.795343f, 0.799821f, 0.804318f, + 0.808831f, 0.813363f, 0.817913f, 0.822482f, + 0.827069f, 0.831676f, 0.836301f, 0.840946f, + 0.84561f, 0.850295f, 0.854999f, 0.859724f, + 0.864469f, 0.869235f, 0.874022f, 0.878831f, + 0.883661f, 0.888513f, 0.893387f, 0.898284f, + 0.903204f, 0.908146f, 0.913112f, 0.918101f, + 0.923114f, 0.928152f, 0.933214f, 0.938301f, + 0.943413f, 0.94855f, 0.953713f, 0.958903f, + 0.964119f, 0.969361f, 0.974631f, 0.979929f, + 0.985254f, 0.990608f, 0.99599f, 1.0014f, + 1.00684f, 1.01231f, 1.01781f, 1.02335f, + 1.02891f, 1.0345f, 1.04013f, 1.04579f, + 1.05148f, 1.05721f, 1.06296f, 1.06876f, + 1.07459f, 1.08045f, 1.08635f, 1.09228f, + 1.09826f, 1.10427f, 1.11032f, 1.1164f, + 1.12253f, 1.1287f, 1.1349f, 1.14115f, + 1.14744f, 1.15377f, 1.16015f, 1.16656f, + 1.17303f, 1.17954f, 1.18609f, 1.19269f, + 1.19934f, 1.20603f, 1.21278f, 1.21958f, + 1.22642f, 1.23332f, 1.24027f, 1.24727f, + 1.25433f, 1.26144f, 1.26861f, 1.27584f, + 1.28312f, 1.29047f, 1.29787f, 1.30534f, + 1.31287f, 1.32046f, 1.32812f, 1.33585f, + 1.34364f, 1.3515f, 1.35943f, 1.36744f, + 1.37551f, 1.38367f, 1.39189f, 1.4002f, + 1.40859f, 1.41705f, 1.42561f, 1.43424f, + 1.44296f, 1.45177f, 1.46068f, 1.46967f, + 1.47876f, 1.48795f, 1.49723f, 1.50662f, + 1.51611f, 1.52571f, 1.53541f, 1.54523f, + 1.55517f, 1.56522f, 1.57539f, 1.58568f, + 1.59611f, 1.60666f, 1.61735f, 1.62817f, + 1.63914f, 1.65025f, 1.66152f, 1.67293f, + 1.68451f, 1.69625f, 1.70815f, 1.72023f, + 1.73249f, 1.74494f, 1.75757f, 1.77041f, + 1.78344f, 1.79669f, 1.81016f, 1.82385f, + 1.83777f, 1.85194f, 1.86635f, 1.88103f, + 1.89598f, 1.91121f, 1.92674f, 1.94257f, + 1.95871f, 1.97519f, 1.99201f, 2.0092f, + 2.02676f, 2.04471f, 2.06309f, 2.08189f, + 2.10115f, 2.12089f, 2.14114f, 2.16192f, + 2.18326f, 2.2052f, 2.22777f, 2.25101f, + 2.27496f, 2.29966f, 2.32518f, 2.35156f, + 2.37886f, 2.40717f, 2.43655f, 2.46709f, + 2.49889f, 2.53206f, 2.56673f, 2.60305f, + 2.64117f, 2.6813f, 2.72367f, 2.76854f, + 2.81623f, 2.86714f, 2.92173f, 2.98059f, + 3.04446f, 3.1143f, 3.19135f, 3.27731f, + 3.37455f, 3.48653f, 3.61862f, 3.77982f, + 3.98692f, 4.2776f, 4.77167f, 133.333f +}; - // to speedup the process, make the width to be 2^N - CvMat* img = cvCreateMat( src->rows+2, step, CV_32SC1 ); - int* ioptr = img->data.i+step+1; - int* imgptr; - - // pre-allocate boundary heap - int** heap = (int**)cvAlloc( (src->rows*src->cols+256)*sizeof(heap[0]) ); - int** heap_start[256]; - heap_start[0] = heap; - - // pre-allocate linked point and grow history - LinkedPoint* pts = (LinkedPoint*)cvAlloc( src->rows*src->cols*sizeof(pts[0]) ); - MSERGrowHistory* history = (MSERGrowHistory*)cvAlloc( src->rows*src->cols*sizeof(history[0]) ); - MSERConnectedComp comp[257]; - - // darker to brighter (MSER-) - imgptr = preprocessMSER_8UC1( img, heap_start, src, mask ); - extractMSER_8UC1_Pass( ioptr, imgptr, heap_start, pts, history, comp, step, stepmask, stepgap, params, -1, contours, storage ); - // brighter to darker (MSER+) - imgptr = preprocessMSER_8UC1( img, heap_start, src, mask ); - extractMSER_8UC1_Pass( ioptr, imgptr, heap_start, pts, history, comp, step, stepmask, stepgap, params, 1, contours, storage ); - - // clean up - cvFree( &history ); - cvFree( &heap ); - cvFree( &pts ); - cvReleaseMat( &img ); -} struct MSCRNode; @@ -691,6 +617,56 @@ struct TempMSCR struct MSCRNode { + // the stable mscr should be: + // bigger than minArea and smaller than maxArea + // differ from its ancestor more than minDiversity + bool isStable( const MSER_Impl::Params& params ) const + { + if( size <= params.minArea || size >= params.maxArea ) + return 0; + if( gmsr == NULL ) + return 1; + double div = (double)(size - gmsr->size)/(double)size; + return div > params.minDiversity; + } + + void init( int _index ) + { + gmsr = tmsr = NULL; + reinit = 0xffff; + rank = 0; + sizei = size = 1; + prev = next = shortcut = this; + index = _index; + } + + // to find the root of one region + MSCRNode* findRoot() + { + MSCRNode* x = this; + MSCRNode* _prev = x; + MSCRNode* _next; + for(;;) + { + _next = x->shortcut; + x->shortcut = _prev; + if( _next == x ) + break; + _prev = x; + x = _next; + } + MSCRNode* root = x; + for(;;) + { + _prev = x->shortcut; + x->shortcut = root; + if( _prev == x ) + break; + x = _prev; + } + return root; + } + MSCRNode* shortcut; // to make the finding of root less painful MSCRNode* prev; @@ -711,348 +687,86 @@ struct MSCRNode struct MSCREdge { - double chi; + double init(float _chi, MSCRNode* _left, MSCRNode* _right) + { + chi = _chi; + left = _left; + right = _right; + return chi; + } + float chi; MSCRNode* left; MSCRNode* right; }; -static double ChiSquaredDistance( uchar* x, uchar* y ) +static float ChiSquaredDistance( const uchar* x, const uchar* y ) { - return (double)((x[0]-y[0])*(x[0]-y[0]))/(double)(x[0]+y[0]+1e-10)+ - (double)((x[1]-y[1])*(x[1]-y[1]))/(double)(x[1]+y[1]+1e-10)+ - (double)((x[2]-y[2])*(x[2]-y[2]))/(double)(x[2]+y[2]+1e-10); + return (float)((x[0]-y[0])*(x[0]-y[0]))/(float)(x[0]+y[0]+FLT_EPSILON)+ + (float)((x[1]-y[1])*(x[1]-y[1]))/(float)(x[1]+y[1]+FLT_EPSILON)+ + (float)((x[2]-y[2])*(x[2]-y[2]))/(float)(x[2]+y[2]+FLT_EPSILON); } -static void initMSCRNode( MSCRNode* node ) -{ - node->gmsr = node->tmsr = NULL; - node->reinit = 0xffff; - node->rank = 0; - node->sizei = node->size = 1; - node->prev = node->next = node->shortcut = node; -} // the preprocess to get the edge list with proper gaussian blur -static int preprocessMSER_8UC3( MSCRNode* node, - MSCREdge* edge, - double* total, - CvMat* src, - CvMat* mask, - CvMat* dx, - CvMat* dy, - int Ne, - int edgeBlurSize ) +static int preprocessMSER_8UC3( MSCRNode* node, MSCREdge* edge, + double& total, const Mat& src, + Mat& dx, Mat& dy, int Ne, int edgeBlurSize ) { - int srccpt = src->step-src->cols*3; - uchar* srcptr = src->data.ptr; - uchar* lastptr = src->data.ptr+3; - double* dxptr = dx->data.db; - for ( int i = 0; i < src->rows; i++ ) + int nch = src.channels(); + int i, j, nrows = src.rows, ncols = src.cols; + float* dxptr = 0; + float* dyptr = 0; + for( i = 0; i < nrows; i++ ) { - for ( int j = 0; j < src->cols-1; j++ ) + const uchar* srcptr = src.ptr(i); + const uchar* nextsrc = src.ptr(std::min(i+1, nrows-1)); + dxptr = dx.ptr(i); + dyptr = dy.ptr(i); + + for( j = 0; j < ncols-1; j++ ) { - *dxptr = ChiSquaredDistance( srcptr, lastptr ); - dxptr++; - srcptr += 3; - lastptr += 3; + dxptr[j] = ChiSquaredDistance( srcptr + j*nch, srcptr + (j+1)*nch ); + dyptr[j] = ChiSquaredDistance( srcptr + j*nch, nextsrc + j*nch ); } - srcptr += srccpt+3; - lastptr += srccpt+3; - } - srcptr = src->data.ptr; - lastptr = src->data.ptr+src->step; - double* dyptr = dy->data.db; - for ( int i = 0; i < src->rows-1; i++ ) - { - for ( int j = 0; j < src->cols; j++ ) - { - *dyptr = ChiSquaredDistance( srcptr, lastptr ); - dyptr++; - srcptr += 3; - lastptr += 3; - } - srcptr += srccpt; - lastptr += srccpt; + dyptr[ncols-1] = ChiSquaredDistance( srcptr + (ncols-1)*nch, nextsrc + (ncols-1)*nch ); } + // get dx and dy and blur it - if ( edgeBlurSize >= 1 ) + if( edgeBlurSize >= 1 ) { - cvSmooth( dx, dx, CV_GAUSSIAN, edgeBlurSize, edgeBlurSize ); - cvSmooth( dy, dy, CV_GAUSSIAN, edgeBlurSize, edgeBlurSize ); + GaussianBlur(dx, dx, Size(edgeBlurSize, edgeBlurSize), 0); + GaussianBlur(dy, dy, Size(edgeBlurSize, edgeBlurSize), 0); } - dxptr = dx->data.db; - dyptr = dy->data.db; + dxptr = dx.ptr(); + dyptr = dy.ptr(); // assian dx, dy to proper edge list and initialize mscr node // the nasty code here intended to avoid extra loops - if ( mask ) + MSCRNode* nodeptr = node; + for( j = 0; j < ncols-1; j++ ) { - Ne = 0; - int maskcpt = mask->step-mask->cols+1; - uchar* maskptr = mask->data.ptr; - MSCRNode* nodeptr = node; - initMSCRNode( nodeptr ); - nodeptr->index = 0; - *total += edge->chi = *dxptr; - if ( maskptr[0] && maskptr[1] ) - { - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - dxptr++; - nodeptr++; - maskptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i; - if ( maskptr[0] && maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - dxptr++; - nodeptr++; - maskptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = src->cols-1; - nodeptr++; - maskptr += maskcpt; - for ( int i = 1; i < src->rows-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i<<16; - if ( maskptr[0] ) - { - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - } - dyptr++; - dxptr++; - nodeptr++; - maskptr++; - for ( int j = 1; j < src->cols-1; j++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|j; - if ( maskptr[0] ) - { - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - } - dyptr++; - dxptr++; - nodeptr++; - maskptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|(src->cols-1); - if ( maskptr[0] && maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - dyptr++; - nodeptr++; - maskptr += maskcpt; - } - initMSCRNode( nodeptr ); - nodeptr->index = (src->rows-1)<<16; - if ( maskptr[0] ) - { - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - } - dxptr++; - dyptr++; - nodeptr++; - maskptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|i; - if ( maskptr[0] ) - { - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - } - dxptr++; - dyptr++; - nodeptr++; - maskptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|(src->cols-1); - if ( maskptr[0] && maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - Ne++; - } - } else { - MSCRNode* nodeptr = node; - initMSCRNode( nodeptr ); - nodeptr->index = 0; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = src->cols-1; - nodeptr++; - for ( int i = 1; i < src->rows-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i<<16; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - for ( int j = 1; j < src->cols-1; j++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|j; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|(src->cols-1); - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = (src->rows-1)<<16; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - nodeptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|i; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|(src->cols-1); - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; + nodeptr[j].init(j); + total += edge[j].init(dxptr[j], nodeptr+j, nodeptr+j+1); } + dxptr += ncols - 1; + edge += ncols - 1; + nodeptr[ncols-1].init(ncols - 1); + nodeptr += ncols; + for( i = 1; i < nrows; i++ ) + { + for( j = 0; j < ncols-1; j++ ) + { + nodeptr[j].init( (i<<16)|j ); + total += edge[j*2].init(dyptr[j], nodeptr + j - ncols, nodeptr + j); + total += edge[j*2+1].init(dxptr[j], nodeptr + j, nodeptr + j + 1); + } + nodeptr[ncols-1].init((i<<16)|(ncols - 1)); + total += edge[(ncols-1)*2].init(dyptr[ncols-1], nodeptr - 1, nodeptr + ncols-1); + dxptr += ncols-1; + dyptr += ncols; + edge += 2*ncols - 1; + nodeptr += ncols; + } + return Ne; } @@ -1062,63 +776,34 @@ public: bool operator()(const MSCREdge& a, const MSCREdge& b) const { return a.chi < b.chi; } }; -// to find the root of one region -static MSCRNode* findMSCR( MSCRNode* x ) -{ - MSCRNode* prev = x; - MSCRNode* next; - for ( ; ; ) - { - next = x->shortcut; - x->shortcut = prev; - if ( next == x ) break; - prev= x; - x = next; - } - MSCRNode* root = x; - for ( ; ; ) - { - prev = x->shortcut; - x->shortcut = root; - if ( prev == x ) break; - x = prev; - } - return root; -} - -// the stable mscr should be: -// bigger than minArea and smaller than maxArea -// differ from its ancestor more than minDiversity -static bool MSCRStableCheck( MSCRNode* x, MSERParams params ) -{ - if ( x->size <= params.minArea || x->size >= params.maxArea ) - return 0; - if ( x->gmsr == NULL ) - return 1; - double div = (double)(x->size-x->gmsr->size)/(double)x->size; - return div > params.minDiversity; -} static void -extractMSER_8UC3( CvMat* src, - CvMat* mask, - CvSeq* contours, - CvMemStorage* storage, - MSERParams params ) +extractMSER_8uC3( const Mat& src, Mat& labels, + vector& bboxvec, + const MSER_Impl::Params& params ) { - MSCRNode* map = (MSCRNode*)cvAlloc( src->cols*src->rows*sizeof(map[0]) ); - int Ne = src->cols*src->rows*2-src->cols-src->rows; - MSCREdge* edge = (MSCREdge*)cvAlloc( Ne*sizeof(edge[0]) ); - TempMSCR* mscr = (TempMSCR*)cvAlloc( src->cols*src->rows*sizeof(mscr[0]) ); + int npixels = src.cols*src.rows; + int currlabel = 0; + int* lptr = labels.ptr(); + int lstep = (int)(labels.step/sizeof(int)); + + vector mapvec(npixels); + MSCRNode* map = &mapvec[0]; + int Ne = npixels*2 - src.cols - src.rows; + vector edgevec(Ne+1); + MSCREdge* edge = &edgevec[0]; + vector mscrvec(npixels); + TempMSCR* mscr = &mscrvec[0]; double emean = 0; - CvMat* dx = cvCreateMat( src->rows, src->cols-1, CV_64FC1 ); - CvMat* dy = cvCreateMat( src->rows-1, src->cols, CV_64FC1 ); - Ne = preprocessMSER_8UC3( map, edge, &emean, src, mask, dx, dy, Ne, params.edgeBlurSize ); + Mat dx( src.rows, src.cols-1, CV_32FC1 ); + Mat dy( src.rows, src.cols, CV_32FC1 ); + Ne = preprocessMSER_8UC3( map, edge, emean, src, dx, dy, Ne, params.edgeBlurSize ); emean = emean / (double)Ne; std::sort(edge, edge + Ne, LessThanEdge()); MSCREdge* edge_ub = edge+Ne; MSCREdge* edgeptr = edge; TempMSCR* mscrptr = mscr; + // the evolution process for ( int i = 0; i < params.maxEvolution; i++ ) { @@ -1127,23 +812,24 @@ extractMSER_8UC3( CvMat* src, double reminder = k-ti; double thres = emean*(chitab3[ti]*(1-reminder)+chitab3[ti+1]*reminder); // to process all the edges in the list that chi < thres - while ( edgeptr < edge_ub && edgeptr->chi < thres ) + while( edgeptr < edge_ub && edgeptr->chi < thres ) { - MSCRNode* lr = findMSCR( edgeptr->left ); - MSCRNode* rr = findMSCR( edgeptr->right ); + MSCRNode* lr = edgeptr->left->findRoot(); + MSCRNode* rr = edgeptr->right->findRoot(); // get the region root (who is responsible) if ( lr != rr ) { + MSCRNode* tmp; // rank idea take from: N-tree Disjoint-Set Forests for Maximally Stable Extremal Regions if ( rr->rank > lr->rank ) { - MSCRNode* tmp; CV_SWAP( lr, rr, tmp ); - } else if ( lr->rank == rr->rank ) { + } + else if ( lr->rank == rr->rank ) + { // at the same rank, we will compare the size - if ( lr->size > rr->size ) + if( lr->size > rr->size ) { - MSCRNode* tmp; CV_SWAP( lr, rr, tmp ); } lr->rank++; @@ -1175,7 +861,7 @@ extractMSER_8UC3( CvMat* src, if ( s < lr->s ) { // skip the first one and check stablity - if ( i > lr->reinit+1 && MSCRStableCheck( lr, params ) ) + if ( i > lr->reinit+1 && lr->isStable( params ) ) { if ( lr->tmsr == NULL ) { @@ -1196,113 +882,124 @@ extractMSER_8UC3( CvMat* src, if ( edgeptr >= edge_ub ) break; } - for ( TempMSCR* ptr = mscr; ptr < mscrptr; ptr++ ) - // to prune area with margin less than minMargin - if ( ptr->m > params.minMargin ) - { - CvSeq* _contour = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage ); - cvSeqPushMulti( _contour, 0, ptr->size ); - MSCRNode* lpt = ptr->head; - for ( int i = 0; i < ptr->size; i++ ) - { - CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, _contour, i ); - pt->x = (lpt->index)&0xffff; - pt->y = (lpt->index)>>16; - lpt = lpt->next; - } - CvContour* contour = (CvContour*)_contour; - cvBoundingRect( contour ); - contour->color = 0; - cvSeqPush( contours, &contour ); - } - cvReleaseMat( &dx ); - cvReleaseMat( &dy ); - cvFree( &mscr ); - cvFree( &edge ); - cvFree( &map ); -} -static void -extractMSER( CvArr* _img, - CvArr* _mask, - CvSeq** _contours, - CvMemStorage* storage, - MSERParams params ) -{ - CvMat srchdr, *src = cvGetMat( _img, &srchdr ); - CvMat maskhdr, *mask = _mask ? cvGetMat( _mask, &maskhdr ) : 0; - CvSeq* contours = 0; - - CV_Assert(src != 0); - CV_Assert(CV_MAT_TYPE(src->type) == CV_8UC1 || CV_MAT_TYPE(src->type) == CV_8UC3); - CV_Assert(mask == 0 || (CV_ARE_SIZES_EQ(src, mask) && CV_MAT_TYPE(mask->type) == CV_8UC1)); - CV_Assert(storage != 0); - - contours = *_contours = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvSeq*), storage ); - - // choose different method for different image type - // for grey image, it is: Linear Time Maximally Stable Extremal Regions - // for color image, it is: Maximally Stable Colour Regions for Recognition and Matching - switch ( CV_MAT_TYPE(src->type) ) + for( TempMSCR* ptr = mscr; ptr < mscrptr; ptr++ ) { - case CV_8UC1: - extractMSER_8UC1( src, mask, contours, storage, params ); - break; - case CV_8UC3: - extractMSER_8UC3( src, mask, contours, storage, params ); - break; + // to prune area with margin less than minMargin + if( ptr->m > params.minMargin ) + { + int xmin = INT_MAX, ymin = INT_MAX, xmax = INT_MIN, ymax = INT_MIN; + currlabel++; + MSCRNode* lpt = ptr->head; + for( int i = 0; i < ptr->size; i++ ) + { + int x = (lpt->index)&0xffff; + int y = (lpt->index)>>16; + lpt = lpt->next; + + xmin = std::min(xmin, x); + xmax = std::max(xmax, x); + ymin = std::min(ymin, y); + ymax = std::max(ymax, y); + + lptr[lstep*y + x] = currlabel; + } + bboxvec.push_back(Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1)); + } } } +int MSER_Impl::detectAndLabel( InputArray _src, OutputArray _labels, OutputArray _bboxes ) +{ + Mat src = _src.getMat(); + size_t npix = src.total(); + vector bboxvec; -MSER::MSER( int _delta, int _min_area, int _max_area, + if( npix == 0 ) + { + _labels.release(); + return 0; + } + + Size size = src.size(); + _labels.create( size, CV_32S ); + Mat labels = _labels.getMat(); + labels.setTo(Scalar::all(0)); + + if( src.type() == CV_8U ) + { + int level_size[256]; + int lval = 1; + + if( !src.isContinuous() ) + { + src.copyTo(tempsrc); + src = tempsrc; + } + + // darker to brighter (MSER+) + preprocess1( src, level_size ); + if( !params.pass2Only ) + pass( src, labels, lval, bboxvec, size, level_size, 0 ); + // brighter to darker (MSER-) + preprocess2( src, level_size ); + pass( src, labels, lval, bboxvec, size, level_size, 255 ); + } + else + { + CV_Assert( src.type() == CV_8UC3 || src.type() == CV_8UC4 ); + extractMSER_8uC3( src, labels, bboxvec, params ); + } + if( _bboxes.needed() ) + Mat(bboxvec).copyTo(_bboxes); + return (int)bboxvec.size(); +} + +void MSER_Impl::detect( InputArray _image, vector& keypoints, InputArray _mask ) +{ + vector bboxes; + vector reg; + Mat labels, mask = _mask.getMat(); + + int i, x, y, ncomps = detectAndLabel(_image, labels, bboxes); + CV_Assert( ncomps == (int)bboxes.size() ); + + keypoints.clear(); + for( i = 0; i < ncomps; i++ ) + { + Rect r = bboxes[i]; + reg.reserve(r.area()); + reg.clear(); + + for( y = r.y; y < r.y + r.height; y++ ) + { + const int* lptr = labels.ptr(y); + for( x = r.x; x < r.x + r.width; x++ ) + { + if( lptr[x] == i+1 ) + reg.push_back(Point(x, y)); + } + } + // TODO check transformation from MSER region to KeyPoint + RotatedRect rect = fitEllipse(Mat(reg)); + float diam = std::sqrt(rect.size.height*rect.size.width); + + if( diam > std::numeric_limits::epsilon() && r.contains(rect.center) && + (mask.empty() || mask.at(cvRound(rect.center.y), cvRound(rect.center.x)) != 0) ) + keypoints.push_back( KeyPoint(rect.center, diam) ); + } +} + +Ptr MSER::create( int _delta, int _min_area, int _max_area, double _max_variation, double _min_diversity, int _max_evolution, double _area_threshold, double _min_margin, int _edge_blur_size ) - : delta(_delta), minArea(_min_area), maxArea(_max_area), - maxVariation(_max_variation), minDiversity(_min_diversity), - maxEvolution(_max_evolution), areaThreshold(_area_threshold), - minMargin(_min_margin), edgeBlurSize(_edge_blur_size) { -} - -void MSER::operator()( InputArray image, std::vector >& dstcontours, InputArray mask ) const -{ - CvMat _image = image.getMat(), _mask, *pmask = 0; - if( !mask.empty() ) - pmask = &(_mask = mask.getMat()); - MemStorage storage(cvCreateMemStorage(0)); - Seq contours; - extractMSER( &_image, pmask, &contours.seq, storage, - MSERParams(delta, minArea, maxArea, maxVariation, minDiversity, - maxEvolution, areaThreshold, minMargin, edgeBlurSize)); - SeqIterator it = contours.begin(); - size_t i, ncontours = contours.size(); - dstcontours.resize(ncontours); - for( i = 0; i < ncontours; i++, ++it ) - Seq(*it).copyTo(dstcontours[i]); -} - - -void MserFeatureDetector::detectImpl( InputArray _image, std::vector& keypoints, InputArray _mask ) const -{ - Mat image = _image.getMat(), mask = _mask.getMat(); - std::vector > msers; - - (*this)(image, msers, mask); - - std::vector >::const_iterator contour_it = msers.begin(); - Rect r(0, 0, image.cols, image.rows); - for( ; contour_it != msers.end(); ++contour_it ) - { - // TODO check transformation from MSER region to KeyPoint - RotatedRect rect = fitEllipse(Mat(*contour_it)); - float diam = std::sqrt(rect.size.height*rect.size.width); - - if( diam > std::numeric_limits::epsilon() && r.contains(rect.center) ) - keypoints.push_back( KeyPoint(rect.center, diam) ); - } - + return makePtr( + MSER_Impl::Params(_delta, _min_area, _max_area, + _max_variation, _min_diversity, + _max_evolution, _area_threshold, + _min_margin, _edge_blur_size)); } } diff --git a/modules/features2d/src/orb.cpp b/modules/features2d/src/orb.cpp index af91fb6bb..d7b8fce40 100644 --- a/modules/features2d/src/orb.cpp +++ b/modules/features2d/src/orb.cpp @@ -664,19 +664,11 @@ public: int defaultNorm() const; // Compute the ORB_Impl features and descriptors on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the ORB_Impl features and descriptors on an image - void operator()( InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints=false ) const; - - AlgorithmInfo* info() const; + void detectAndCompute( InputArray image, InputArray mask, std::vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ); protected: - void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - int nfeatures; double scaleFactor; int nlevels; @@ -703,17 +695,6 @@ int ORB_Impl::defaultNorm() const return NORM_HAMMING; } -/** Compute the ORB_Impl features and descriptors on an image - * @param img the image to compute the features and descriptors on - * @param mask the mask to apply - * @param keypoints the resulting keypoints - */ -void ORB_Impl::operator()(InputArray image, InputArray mask, std::vector& keypoints) const -{ - (*this)(image, mask, keypoints, noArray(), false); -} - - static void uploadORBKeypoints(const std::vector& src, std::vector& buf, OutputArray dst) { size_t i, n = src.size(); @@ -813,8 +794,10 @@ static void computeKeyPoints(const Mat& imagePyramid, Mat mask = maskPyramid.empty() ? Mat() : maskPyramid(layerInfo[level]); // Detect FAST features, 20 is a good threshold - FastFeatureDetector fd(fastThreshold, true); - fd.detect(img, keypoints, mask); + { + Ptr fd = FastFeatureDetector::create(fastThreshold, true); + fd->detect(img, keypoints, mask); + } // Remove keypoints very close to the border KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold); @@ -926,8 +909,9 @@ static void computeKeyPoints(const Mat& imagePyramid, * @param do_keypoints if true, the keypoints are computed, otherwise used as an input * @param do_descriptors if true, also computes the descriptors */ -void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector& keypoints, - OutputArray _descriptors, bool useProvidedKeypoints ) const +void ORB_Impl::detectAndCompute( InputArray _image, InputArray _mask, + std::vector& keypoints, + OutputArray _descriptors, bool useProvidedKeypoints ) { CV_Assert(patchSize >= 2); @@ -1153,15 +1137,11 @@ void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector& keypoints, InputArray mask) const +Ptr ORB::create(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold, + int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold) { - (*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false); + return makePtr(nfeatures, scaleFactor, nlevels, edgeThreshold, + firstLevel, WTA_K, scoreType, patchSize, fastThreshold); } -void ORB_Impl::computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors) const -{ - (*this)(image, Mat(), keypoints, descriptors, true); -} - - } diff --git a/modules/features2d/test/test_descriptors_regression.cpp b/modules/features2d/test/test_descriptors_regression.cpp index c09a466b8..baffeeb3f 100644 --- a/modules/features2d/test/test_descriptors_regression.cpp +++ b/modules/features2d/test/test_descriptors_regression.cpp @@ -314,31 +314,34 @@ private: TEST( Features2d_DescriptorExtractor_BRISK, regression ) { - CV_DescriptorExtractorTest test( "descriptor-brisk", (CV_DescriptorExtractorTest::DistanceType)2.f, - DescriptorExtractor::create("BRISK") ); + CV_DescriptorExtractorTest test( "descriptor-brisk", + (CV_DescriptorExtractorTest::DistanceType)2.f, + BRISK::create() ); test.safe_run(); } TEST( Features2d_DescriptorExtractor_ORB, regression ) { // TODO adjust the parameters below - CV_DescriptorExtractorTest test( "descriptor-orb", (CV_DescriptorExtractorTest::DistanceType)12.f, - DescriptorExtractor::create("ORB") ); + CV_DescriptorExtractorTest test( "descriptor-orb", + (CV_DescriptorExtractorTest::DistanceType)12.f, + ORB::create() ); test.safe_run(); } TEST( Features2d_DescriptorExtractor_KAZE, regression ) { CV_DescriptorExtractorTest< L2 > test( "descriptor-kaze", 0.03f, - DescriptorExtractor::create("KAZE"), - L2(), FeatureDetector::create("KAZE")); + KAZE::create(), + L2() ); test.safe_run(); } TEST( Features2d_DescriptorExtractor_AKAZE, regression ) { - CV_DescriptorExtractorTest test( "descriptor-akaze", (CV_DescriptorExtractorTest::DistanceType)12.f, - DescriptorExtractor::create("AKAZE"), - Hamming(), FeatureDetector::create("AKAZE")); + CV_DescriptorExtractorTest test( "descriptor-akaze", + (CV_DescriptorExtractorTest::DistanceType)12.f, + AKAZE::create(), + Hamming(), AKAZE::create()); test.safe_run(); } diff --git a/modules/features2d/test/test_detectors_regression.cpp b/modules/features2d/test/test_detectors_regression.cpp index 1a7d09a85..a3ca0a9f3 100644 --- a/modules/features2d/test/test_detectors_regression.cpp +++ b/modules/features2d/test/test_detectors_regression.cpp @@ -249,48 +249,48 @@ void CV_FeatureDetectorTest::run( int /*start_from*/ ) TEST( Features2d_Detector_BRISK, regression ) { - CV_FeatureDetectorTest test( "detector-brisk", FeatureDetector::create("BRISK") ); + CV_FeatureDetectorTest test( "detector-brisk", BRISK::create() ); test.safe_run(); } TEST( Features2d_Detector_FAST, regression ) { - CV_FeatureDetectorTest test( "detector-fast", FeatureDetector::create("FAST") ); + CV_FeatureDetectorTest test( "detector-fast", FastFeatureDetector::create() ); test.safe_run(); } TEST( Features2d_Detector_GFTT, regression ) { - CV_FeatureDetectorTest test( "detector-gftt", FeatureDetector::create("GFTT") ); + CV_FeatureDetectorTest test( "detector-gftt", GFTTDetector::create() ); test.safe_run(); } TEST( Features2d_Detector_Harris, regression ) { - CV_FeatureDetectorTest test( "detector-harris", FeatureDetector::create("HARRIS") ); + CV_FeatureDetectorTest test( "detector-harris", GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04)); test.safe_run(); } TEST( Features2d_Detector_MSER, DISABLED_regression ) { - CV_FeatureDetectorTest test( "detector-mser", FeatureDetector::create("MSER") ); + CV_FeatureDetectorTest test( "detector-mser", MSER::create() ); test.safe_run(); } TEST( Features2d_Detector_ORB, regression ) { - CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") ); + CV_FeatureDetectorTest test( "detector-orb", ORB::create() ); test.safe_run(); } TEST( Features2d_Detector_KAZE, regression ) { - CV_FeatureDetectorTest test( "detector-kaze", FeatureDetector::create("KAZE") ); + CV_FeatureDetectorTest test( "detector-kaze", KAZE::create() ); test.safe_run(); } TEST( Features2d_Detector_AKAZE, regression ) { - CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") ); + CV_FeatureDetectorTest test( "detector-akaze", AKAZE::create() ); test.safe_run(); } diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index 349c03ed4..561c3cc6e 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -61,7 +61,6 @@ public: protected: virtual void run(int) { - cv::initModule_features2d(); CV_Assert(detector); string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME; @@ -121,51 +120,51 @@ protected: TEST(Features2d_Detector_Keypoints_BRISK, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.BRISK")); + CV_FeatureDetectorKeypointsTest test(BRISK::create()); test.safe_run(); } TEST(Features2d_Detector_Keypoints_FAST, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.FAST")); + CV_FeatureDetectorKeypointsTest test(FastFeatureDetector::create()); test.safe_run(); } TEST(Features2d_Detector_Keypoints_HARRIS, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.HARRIS")); + CV_FeatureDetectorKeypointsTest test(GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04)); test.safe_run(); } TEST(Features2d_Detector_Keypoints_GFTT, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.GFTT")); + CV_FeatureDetectorKeypointsTest test(GFTTDetector::create()); test.safe_run(); } TEST(Features2d_Detector_Keypoints_MSER, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.MSER")); + CV_FeatureDetectorKeypointsTest test(MSER::create()); test.safe_run(); } TEST(Features2d_Detector_Keypoints_ORB, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.ORB")); + CV_FeatureDetectorKeypointsTest test(ORB::create()); test.safe_run(); } TEST(Features2d_Detector_Keypoints_KAZE, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.KAZE")); + CV_FeatureDetectorKeypointsTest test(KAZE::create()); test.safe_run(); } TEST(Features2d_Detector_Keypoints_AKAZE, validation) { - CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr(new cv::AKAZE(cv::DESCRIPTOR_KAZE))); + CV_FeatureDetectorKeypointsTest test_kaze(AKAZE::create(AKAZE::DESCRIPTOR_KAZE)); test_kaze.safe_run(); - CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr(new cv::AKAZE(cv::DESCRIPTOR_MLDB))); + CV_FeatureDetectorKeypointsTest test_mldb(AKAZE::create(AKAZE::DESCRIPTOR_MLDB)); test_mldb.safe_run(); } diff --git a/modules/features2d/test/test_mser.cpp b/modules/features2d/test/test_mser.cpp index 6b1b4ef63..29572870a 100644 --- a/modules/features2d/test/test_mser.cpp +++ b/modules/features2d/test/test_mser.cpp @@ -43,6 +43,8 @@ #include "test_precomp.hpp" #include "opencv2/imgproc/imgproc_c.h" +#if 0 + #include #include using namespace std; @@ -205,3 +207,5 @@ void CV_MserTest::run(int) } TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); } + +#endif diff --git a/modules/features2d/test/test_orb.cpp b/modules/features2d/test/test_orb.cpp index 45e76906d..47335b474 100644 --- a/modules/features2d/test/test_orb.cpp +++ b/modules/features2d/test/test_orb.cpp @@ -47,10 +47,10 @@ using namespace cv; TEST(Features2D_ORB, _1996) { - Ptr fd = FeatureDetector::create("ORB"); + Ptr fd = ORB::create(); fd->set("nFeatures", 10000);//setting a higher maximum to make effect of threshold visible fd->set("fastThreshold", 20);//more features than the default - Ptr de = DescriptorExtractor::create("ORB"); + Ptr de = fd; Mat image = imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.png"); ASSERT_FALSE(image.empty()); diff --git a/modules/stitching/src/matchers.cpp b/modules/stitching/src/matchers.cpp index adb6fab40..f1dec76c4 100644 --- a/modules/stitching/src/matchers.cpp +++ b/modules/stitching/src/matchers.cpp @@ -367,7 +367,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features) else { UMat descriptors; - (*surf)(gray_image, Mat(), features.keypoints, descriptors); + surf->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors); features.descriptors = descriptors.reshape(1, (int)features.keypoints.size()); } } @@ -375,7 +375,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features) OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels) { grid_size = _grid_size; - orb = makePtr(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels); + orb = ORB::create(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels); } void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features) @@ -395,7 +395,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features) } if (grid_size.area() == 1) - (*orb)(gray_image, Mat(), features.keypoints, features.descriptors); + orb->detectAndCompute(gray_image, Mat(), features.keypoints, features.descriptors); else { features.keypoints.clear(); @@ -425,7 +425,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features) // << " gray_image_part.dims=" << gray_image_part.dims << ", " // << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n"); - (*orb)(gray_image_part, UMat(), points, descriptors); + orb->detectAndCompute(gray_image_part, UMat(), points, descriptors); features.keypoints.reserve(features.keypoints.size() + points.size()); for (std::vector::iterator kp = points.begin(); kp != points.end(); ++kp) diff --git a/modules/videostab/src/global_motion.cpp b/modules/videostab/src/global_motion.cpp index 560d7b947..4875bef23 100644 --- a/modules/videostab/src/global_motion.cpp +++ b/modules/videostab/src/global_motion.cpp @@ -671,7 +671,7 @@ Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok) KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr estimator) : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator) { - setDetector(makePtr()); + setDetector(GFTTDetector::create()); setOpticalFlowEstimator(makePtr()); setOutlierRejector(makePtr()); } From d017faa06ce975d661ff9082b54aec1e86dbc278 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Thu, 16 Oct 2014 16:34:22 +0400 Subject: [PATCH 03/11] now all the samples and opencv_contrib compile! --- .../features2d/include/opencv2/features2d.hpp | 4 + modules/features2d/src/mser.cpp | 32 ++++ modules/stitching/src/matchers.cpp | 2 - .../src/RobustMatcher.h | 18 +- .../src/main_detection.cpp | 173 +++++++++--------- .../src/main_registration.cpp | 107 +++++------ .../tutorial_code/features2D/AKAZE_match.cpp | 6 +- .../AKAZE_tracking/planar_tracking.cpp | 8 +- samples/cpp/videostab.cpp | 4 +- 9 files changed, 197 insertions(+), 157 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index e9222918b..ddf5d66aa 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -197,6 +197,10 @@ public: CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label, OutputArray stats=noArray() ) = 0; + + CV_WRAP virtual void detectAndStore( InputArray image, + std::vector >& msers, + OutputArray stats=noArray() ) = 0; }; //! detects corners using FAST algorithm by E. Rosten diff --git a/modules/features2d/src/mser.cpp b/modules/features2d/src/mser.cpp index 0deaa26f3..c9db56703 100644 --- a/modules/features2d/src/mser.cpp +++ b/modules/features2d/src/mser.cpp @@ -301,6 +301,9 @@ public: }; int detectAndLabel( InputArray _src, OutputArray _labels, OutputArray _bboxes ); + void detectAndStore( InputArray image, + std::vector >& msers, + OutputArray stats ); void detect( InputArray _src, vector& keypoints, InputArray _mask ); void preprocess1( const Mat& img, int* level_size ) @@ -955,6 +958,35 @@ int MSER_Impl::detectAndLabel( InputArray _src, OutputArray _labels, OutputArray return (int)bboxvec.size(); } +void MSER_Impl::detectAndStore( InputArray image, + std::vector >& msers, + OutputArray stats ) +{ + vector bboxvec; + Mat labels; + int i, x, y, nregs = detectAndLabel(image, labels, bboxvec); + + msers.resize(nregs); + for( i = 0; i < nregs; i++ ) + { + Rect r = bboxvec[i]; + vector& msers_i = msers[i]; + msers_i.clear(); + for( y = r.y; y < r.y + r.height; y++ ) + { + const int* lptr = labels.ptr(y); + for( x = r.x; x < r.x + r.width; x++ ) + { + if( lptr[x] == i+1 ) + msers_i.push_back(Point(x, y)); + } + } + } + + if( stats.needed() ) + Mat(bboxvec).copyTo(stats); +} + void MSER_Impl::detect( InputArray _image, vector& keypoints, InputArray _mask ) { vector bboxes; diff --git a/modules/stitching/src/matchers.cpp b/modules/stitching/src/matchers.cpp index f1dec76c4..9279df70e 100644 --- a/modules/stitching/src/matchers.cpp +++ b/modules/stitching/src/matchers.cpp @@ -48,8 +48,6 @@ using namespace cv::cuda; #ifdef HAVE_OPENCV_XFEATURES2D #include "opencv2/xfeatures2d.hpp" - -static bool makeUseOfXfeatures2d = xfeatures2d::initModule_xfeatures2d(); #endif namespace { diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h index 1a77b92d7..a215c2b67 100644 --- a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h +++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h @@ -19,23 +19,23 @@ public: RobustMatcher() : ratio_(0.8f) { // ORB is the default feature - detector_ = new cv::OrbFeatureDetector(); - extractor_ = new cv::OrbDescriptorExtractor(); + detector_ = cv::ORB::create(); + extractor_ = cv::ORB::create(); // BruteFroce matcher with Norm Hamming is the default matcher - matcher_ = new cv::BFMatcher(cv::NORM_HAMMING, false); + matcher_ = cv::makePtr(cv::NORM_HAMMING, false); } virtual ~RobustMatcher(); // Set the feature detector - void setFeatureDetector(cv::FeatureDetector * detect) { detector_ = detect; } + void setFeatureDetector(const cv::Ptr& detect) { detector_ = detect; } // Set the descriptor extractor - void setDescriptorExtractor(cv::DescriptorExtractor * desc) { extractor_ = desc; } + void setDescriptorExtractor(const cv::Ptr& desc) { extractor_ = desc; } // Set the matcher - void setDescriptorMatcher(cv::DescriptorMatcher * match) { matcher_ = match; } + void setDescriptorMatcher(const cv::Ptr& match) { matcher_ = match; } // Compute the keypoints of an image void computeKeyPoints( const cv::Mat& image, std::vector& keypoints); @@ -69,11 +69,11 @@ public: private: // pointer to the feature point detector object - cv::FeatureDetector * detector_; + cv::Ptr detector_; // pointer to the feature descriptor extractor object - cv::DescriptorExtractor * extractor_; + cv::Ptr extractor_; // pointer to the matcher object - cv::DescriptorMatcher * matcher_; + cv::Ptr matcher_; // max ratio between 1st and 2nd NN float ratio_; }; diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp index f759e4910..6de590e68 100644 --- a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp +++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp @@ -18,11 +18,14 @@ /** GLOBAL VARIABLES **/ -std::string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial +using namespace cv; +using namespace std; -std::string video_read_path = tutorial_path + "Data/box.mp4"; // recorded video -std::string yml_read_path = tutorial_path + "Data/cookies_ORB.yml"; // 3dpts + descriptors -std::string ply_read_path = tutorial_path + "Data/box.ply"; // mesh +string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial + +string video_read_path = tutorial_path + "Data/box.mp4"; // recorded video +string yml_read_path = tutorial_path + "Data/cookies_ORB.yml"; // 3dpts + descriptors +string ply_read_path = tutorial_path + "Data/box.ply"; // mesh // Intrinsic camera parameters: UVC WEBCAM double f = 55; // focal length in mm @@ -35,15 +38,15 @@ double params_WEBCAM[] = { width*f/sx, // fx height/2}; // cy // Some basic colors -cv::Scalar red(0, 0, 255); -cv::Scalar green(0,255,0); -cv::Scalar blue(255,0,0); -cv::Scalar yellow(0,255,255); +Scalar red(0, 0, 255); +Scalar green(0,255,0); +Scalar blue(255,0,0); +Scalar yellow(0,255,255); // Robust Matcher parameters int numKeyPoints = 2000; // number of detected keypoints -float ratio = 0.70f; // ratio test +float ratioTest = 0.70f; // ratio test bool fast_match = true; // fastRobustMatch() or robustMatch() // RANSAC parameters @@ -55,16 +58,16 @@ double confidence = 0.95; // ransac successful confidence. int minInliersKalman = 30; // Kalman threshold updating // PnP parameters -int pnpMethod = cv::SOLVEPNP_ITERATIVE; +int pnpMethod = SOLVEPNP_ITERATIVE; /** Functions headers **/ void help(); -void initKalmanFilter( cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt); -void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurements, - cv::Mat &translation_estimated, cv::Mat &rotation_estimated ); -void fillMeasurements( cv::Mat &measurements, - const cv::Mat &translation_measured, const cv::Mat &rotation_measured); +void initKalmanFilter( KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt); +void updateKalmanFilter( KalmanFilter &KF, Mat &measurements, + Mat &translation_estimated, Mat &rotation_estimated ); +void fillMeasurements( Mat &measurements, + const Mat &translation_measured, const Mat &rotation_measured); /** Main program **/ @@ -73,7 +76,7 @@ int main(int argc, char *argv[]) help(); - const cv::String keys = + const String keys = "{help h | | print this message }" "{video v | | path to recorded video }" "{model | | path to yml model }" @@ -87,7 +90,7 @@ int main(int argc, char *argv[]) "{method pnp |0 | PnP method: (0) ITERATIVE - (1) EPNP - (2) P3P - (3) DLS}" "{fast f |true | use of robust fast match }" ; - cv::CommandLineParser parser(argc, argv, keys); + CommandLineParser parser(argc, argv, keys); if (parser.has("help")) { @@ -96,11 +99,11 @@ int main(int argc, char *argv[]) } else { - video_read_path = parser.get("video").size() > 0 ? parser.get("video") : video_read_path; - yml_read_path = parser.get("model").size() > 0 ? parser.get("model") : yml_read_path; - ply_read_path = parser.get("mesh").size() > 0 ? parser.get("mesh") : ply_read_path; + video_read_path = parser.get("video").size() > 0 ? parser.get("video") : video_read_path; + yml_read_path = parser.get("model").size() > 0 ? parser.get("model") : yml_read_path; + ply_read_path = parser.get("mesh").size() > 0 ? parser.get("mesh") : ply_read_path; numKeyPoints = !parser.has("keypoints") ? parser.get("keypoints") : numKeyPoints; - ratio = !parser.has("ratio") ? parser.get("ratio") : ratio; + ratioTest = !parser.has("ratio") ? parser.get("ratio") : ratioTest; fast_match = !parser.has("fast") ? parser.get("fast") : fast_match; iterationsCount = !parser.has("iterations") ? parser.get("iterations") : iterationsCount; reprojectionError = !parser.has("error") ? parser.get("error") : reprojectionError; @@ -120,45 +123,45 @@ int main(int argc, char *argv[]) RobustMatcher rmatcher; // instantiate RobustMatcher - cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); // instatiate ORB feature detector - cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // instatiate ORB descriptor extractor + Ptr orb = ORB::create(); - rmatcher.setFeatureDetector(detector); // set feature detector - rmatcher.setDescriptorExtractor(extractor); // set descriptor extractor + rmatcher.setFeatureDetector(orb); // set feature detector + rmatcher.setDescriptorExtractor(orb); // set descriptor extractor - cv::Ptr indexParams = cv::makePtr(6, 12, 1); // instantiate LSH index parameters - cv::Ptr searchParams = cv::makePtr(50); // instantiate flann search parameters + Ptr indexParams = makePtr(6, 12, 1); // instantiate LSH index parameters + Ptr searchParams = makePtr(50); // instantiate flann search parameters - cv::DescriptorMatcher * matcher = new cv::FlannBasedMatcher(indexParams, searchParams); // instantiate FlannBased matcher + // instantiate FlannBased matcher + Ptr matcher = makePtr(indexParams, searchParams); rmatcher.setDescriptorMatcher(matcher); // set matcher - rmatcher.setRatio(ratio); // set ratio test parameter + rmatcher.setRatio(ratioTest); // set ratio test parameter - cv::KalmanFilter KF; // instantiate Kalman Filter + KalmanFilter KF; // instantiate Kalman Filter int nStates = 18; // the number of states int nMeasurements = 6; // the number of measured states int nInputs = 0; // the number of control actions double dt = 0.125; // time between measurements (1/FPS) initKalmanFilter(KF, nStates, nMeasurements, nInputs, dt); // init function - cv::Mat measurements(nMeasurements, 1, CV_64F); measurements.setTo(cv::Scalar(0)); + Mat measurements(nMeasurements, 1, CV_64F); measurements.setTo(Scalar(0)); bool good_measurement = false; // Get the MODEL INFO - std::vector list_points3d_model = model.get_points3d(); // list with model 3D coordinates - cv::Mat descriptors_model = model.get_descriptors(); // list with descriptors of each 3D coordinate + vector list_points3d_model = model.get_points3d(); // list with model 3D coordinates + Mat descriptors_model = model.get_descriptors(); // list with descriptors of each 3D coordinate // Create & Open Window - cv::namedWindow("REAL TIME DEMO", cv::WINDOW_KEEPRATIO); + namedWindow("REAL TIME DEMO", WINDOW_KEEPRATIO); - cv::VideoCapture cap; // instantiate VideoCapture + VideoCapture cap; // instantiate VideoCapture cap.open(video_read_path); // open a recorded video if(!cap.isOpened()) // check if we succeeded { - std::cout << "Could not open the camera device" << std::endl; + cout << "Could not open the camera device" << endl; return -1; } @@ -175,9 +178,9 @@ int main(int argc, char *argv[]) // start the clock time(&start); - cv::Mat frame, frame_vis; + Mat frame, frame_vis; - while(cap.read(frame) && cv::waitKey(30) != 27) // capture frame until ESC is pressed + while(cap.read(frame) && waitKey(30) != 27) // capture frame until ESC is pressed { frame_vis = frame.clone(); // refresh visualisation frame @@ -185,8 +188,8 @@ int main(int argc, char *argv[]) // -- Step 1: Robust matching between model descriptors and scene descriptors - std::vector good_matches; // to obtain the 3D points of the model - std::vector keypoints_scene; // to obtain the 2D points of the scene + vector good_matches; // to obtain the 3D points of the model + vector keypoints_scene; // to obtain the 2D points of the scene if(fast_match) @@ -201,13 +204,13 @@ int main(int argc, char *argv[]) // -- Step 2: Find out the 2D/3D correspondences - std::vector list_points3d_model_match; // container for the model 3D coordinates found in the scene - std::vector list_points2d_scene_match; // container for the model 2D coordinates found in the scene + vector list_points3d_model_match; // container for the model 3D coordinates found in the scene + vector list_points2d_scene_match; // container for the model 2D coordinates found in the scene for(unsigned int match_index = 0; match_index < good_matches.size(); ++match_index) { - cv::Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ]; // 3D point from model - cv::Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene + Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ]; // 3D point from model + Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene list_points3d_model_match.push_back(point3d_model); // add 3D point list_points2d_scene_match.push_back(point2d_scene); // add 2D point } @@ -216,8 +219,8 @@ int main(int argc, char *argv[]) draw2DPoints(frame_vis, list_points2d_scene_match, red); - cv::Mat inliers_idx; - std::vector list_points2d_inliers; + Mat inliers_idx; + vector list_points2d_inliers; if(good_matches.size() > 0) // None matches, then RANSAC crashes { @@ -231,7 +234,7 @@ int main(int argc, char *argv[]) for(int inliers_index = 0; inliers_index < inliers_idx.rows; ++inliers_index) { int n = inliers_idx.at(inliers_index); // i-inlier - cv::Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D + Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D list_points2d_inliers.push_back(point2d); // add i-inlier to list } @@ -248,11 +251,11 @@ int main(int argc, char *argv[]) { // Get the measured translation - cv::Mat translation_measured(3, 1, CV_64F); + Mat translation_measured(3, 1, CV_64F); translation_measured = pnp_detection.get_t_matrix(); // Get the measured rotation - cv::Mat rotation_measured(3, 3, CV_64F); + Mat rotation_measured(3, 3, CV_64F); rotation_measured = pnp_detection.get_R_matrix(); // fill the measurements vector @@ -263,8 +266,8 @@ int main(int argc, char *argv[]) } // Instantiate estimated translation and rotation - cv::Mat translation_estimated(3, 1, CV_64F); - cv::Mat rotation_estimated(3, 3, CV_64F); + Mat translation_estimated(3, 1, CV_64F); + Mat rotation_estimated(3, 3, CV_64F); // update the Kalman filter with good measurements updateKalmanFilter( KF, measurements, @@ -288,11 +291,11 @@ int main(int argc, char *argv[]) } float l = 5; - std::vector pose_points2d; - pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,0))); // axis center - pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(l,0,0))); // axis x - pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,l,0))); // axis y - pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,l))); // axis z + vector pose_points2d; + pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,0))); // axis center + pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(l,0,0))); // axis x + pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,l,0))); // axis y + pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,l))); // axis z draw3DCoordinateAxes(frame_vis, pose_points2d); // draw axes // FRAME RATE @@ -316,49 +319,49 @@ int main(int argc, char *argv[]) // Draw some debug text int inliers_int = inliers_idx.rows; int outliers_int = (int)good_matches.size() - inliers_int; - std::string inliers_str = IntToString(inliers_int); - std::string outliers_str = IntToString(outliers_int); - std::string n = IntToString((int)good_matches.size()); - std::string text = "Found " + inliers_str + " of " + n + " matches"; - std::string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str; + string inliers_str = IntToString(inliers_int); + string outliers_str = IntToString(outliers_int); + string n = IntToString((int)good_matches.size()); + string text = "Found " + inliers_str + " of " + n + " matches"; + string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str; drawText(frame_vis, text, green); drawText2(frame_vis, text2, red); - cv::imshow("REAL TIME DEMO", frame_vis); + imshow("REAL TIME DEMO", frame_vis); } // Close and Destroy Window - cv::destroyWindow("REAL TIME DEMO"); + destroyWindow("REAL TIME DEMO"); - std::cout << "GOODBYE ..." << std::endl; + cout << "GOODBYE ..." << endl; } /**********************************************************************************************************/ void help() { -std::cout -<< "--------------------------------------------------------------------------" << std::endl +cout +<< "--------------------------------------------------------------------------" << endl << "This program shows how to detect an object given its 3D textured model. You can choose to " -<< "use a recorded video or the webcam." << std::endl -<< "Usage:" << std::endl -<< "./cpp-tutorial-pnp_detection -help" << std::endl -<< "Keys:" << std::endl -<< "'esc' - to quit." << std::endl -<< "--------------------------------------------------------------------------" << std::endl -<< std::endl; +<< "use a recorded video or the webcam." << endl +<< "Usage:" << endl +<< "./cpp-tutorial-pnp_detection -help" << endl +<< "Keys:" << endl +<< "'esc' - to quit." << endl +<< "--------------------------------------------------------------------------" << endl +<< endl; } /**********************************************************************************************************/ -void initKalmanFilter(cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt) +void initKalmanFilter(KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt) { KF.init(nStates, nMeasurements, nInputs, CV_64F); // init Kalman Filter - cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-5)); // set process noise - cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-2)); // set measurement noise - cv::setIdentity(KF.errorCovPost, cv::Scalar::all(1)); // error covariance + setIdentity(KF.processNoiseCov, Scalar::all(1e-5)); // set process noise + setIdentity(KF.measurementNoiseCov, Scalar::all(1e-2)); // set measurement noise + setIdentity(KF.errorCovPost, Scalar::all(1)); // error covariance /** DYNAMIC MODEL **/ @@ -424,15 +427,15 @@ void initKalmanFilter(cv::KalmanFilter &KF, int nStates, int nMeasurements, int } /**********************************************************************************************************/ -void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement, - cv::Mat &translation_estimated, cv::Mat &rotation_estimated ) +void updateKalmanFilter( KalmanFilter &KF, Mat &measurement, + Mat &translation_estimated, Mat &rotation_estimated ) { // First predict, to update the internal statePre variable - cv::Mat prediction = KF.predict(); + Mat prediction = KF.predict(); // The "correct" phase that is going to use the predicted value and our measurement - cv::Mat estimated = KF.correct(measurement); + Mat estimated = KF.correct(measurement); // Estimated translation translation_estimated.at(0) = estimated.at(0); @@ -440,7 +443,7 @@ void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement, translation_estimated.at(2) = estimated.at(2); // Estimated euler angles - cv::Mat eulers_estimated(3, 1, CV_64F); + Mat eulers_estimated(3, 1, CV_64F); eulers_estimated.at(0) = estimated.at(9); eulers_estimated.at(1) = estimated.at(10); eulers_estimated.at(2) = estimated.at(11); @@ -451,11 +454,11 @@ void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement, } /**********************************************************************************************************/ -void fillMeasurements( cv::Mat &measurements, - const cv::Mat &translation_measured, const cv::Mat &rotation_measured) +void fillMeasurements( Mat &measurements, + const Mat &translation_measured, const Mat &rotation_measured) { // Convert rotation matrix to euler angles - cv::Mat measured_eulers(3, 1, CV_64F); + Mat measured_eulers(3, 1, CV_64F); measured_eulers = rot2euler(rotation_measured); // Set measurement to predict diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp index baea3a55f..da775a063 100644 --- a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp +++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp @@ -13,13 +13,16 @@ #include "ModelRegistration.h" #include "Utils.h" +using namespace cv; +using namespace std; + /** GLOBAL VARIABLES **/ -std::string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial +string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial -std::string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register -std::string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh -std::string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file +string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register +string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh +string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file // Boolean the know if the registration it's done bool end_registration = false; @@ -39,10 +42,10 @@ int n = 8; int pts[] = {1, 2, 3, 4, 5, 6, 7, 8}; // 3 -> 4 // Some basic colors -cv::Scalar red(0, 0, 255); -cv::Scalar green(0,255,0); -cv::Scalar blue(255,0,0); -cv::Scalar yellow(0,255,255); +Scalar red(0, 0, 255); +Scalar green(0,255,0); +Scalar blue(255,0,0); +Scalar yellow(0,255,255); /* * CREATE MODEL REGISTRATION OBJECT @@ -61,13 +64,13 @@ void help(); // Mouse events for model registration static void onMouseModelRegistration( int event, int x, int y, int, void* ) { - if ( event == cv::EVENT_LBUTTONUP ) + if ( event == EVENT_LBUTTONUP ) { int n_regist = registration.getNumRegist(); int n_vertex = pts[n_regist]; - cv::Point2f point_2d = cv::Point2f((float)x,(float)y); - cv::Point3f point_3d = mesh.getVertex(n_vertex-1); + Point2f point_2d = Point2f((float)x,(float)y); + Point3f point_3d = mesh.getVertex(n_vertex-1); bool is_registrable = registration.is_registrable(); if (is_registrable) @@ -92,23 +95,23 @@ int main() //Instantiate robust matcher: detector, extractor, matcher RobustMatcher rmatcher; - cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); + Ptr detector = ORB::create(numKeyPoints); rmatcher.setFeatureDetector(detector); /** GROUND TRUTH OF THE FIRST IMAGE **/ // Create & Open Window - cv::namedWindow("MODEL REGISTRATION", cv::WINDOW_KEEPRATIO); + namedWindow("MODEL REGISTRATION", WINDOW_KEEPRATIO); // Set up the mouse events - cv::setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 ); + setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 ); // Open the image to register - cv::Mat img_in = cv::imread(img_path, cv::IMREAD_COLOR); - cv::Mat img_vis = img_in.clone(); + Mat img_in = imread(img_path, IMREAD_COLOR); + Mat img_vis = img_in.clone(); if (!img_in.data) { - std::cout << "Could not open or find the image" << std::endl; + cout << "Could not open or find the image" << endl; return -1; } @@ -116,18 +119,18 @@ int main() int num_registrations = n; registration.setNumMax(num_registrations); - std::cout << "Click the box corners ..." << std::endl; - std::cout << "Waiting ..." << std::endl; + cout << "Click the box corners ..." << endl; + cout << "Waiting ..." << endl; // Loop until all the points are registered - while ( cv::waitKey(30) < 0 ) + while ( waitKey(30) < 0 ) { // Refresh debug image img_vis = img_in.clone(); // Current registered points - std::vector list_points2d = registration.get_points2d(); - std::vector list_points3d = registration.get_points3d(); + vector list_points2d = registration.get_points2d(); + vector list_points3d = registration.get_points3d(); // Draw current registered points drawPoints(img_vis, list_points2d, list_points3d, red); @@ -139,7 +142,7 @@ int main() // Draw debug text int n_regist = registration.getNumRegist(); int n_vertex = pts[n_regist]; - cv::Point3f current_poin3d = mesh.getVertex(n_vertex-1); + Point3f current_poin3d = mesh.getVertex(n_vertex-1); drawQuestion(img_vis, current_poin3d, green); drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red); @@ -153,43 +156,43 @@ int main() } // Show the image - cv::imshow("MODEL REGISTRATION", img_vis); + imshow("MODEL REGISTRATION", img_vis); } /** COMPUTE CAMERA POSE **/ - std::cout << "COMPUTING POSE ..." << std::endl; + cout << "COMPUTING POSE ..." << endl; // The list of registered points - std::vector list_points2d = registration.get_points2d(); - std::vector list_points3d = registration.get_points3d(); + vector list_points2d = registration.get_points2d(); + vector list_points3d = registration.get_points3d(); // Estimate pose given the registered points - bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, cv::SOLVEPNP_ITERATIVE); + bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, SOLVEPNP_ITERATIVE); if ( is_correspondence ) { - std::cout << "Correspondence found" << std::endl; + cout << "Correspondence found" << endl; // Compute all the 2D points of the mesh to verify the algorithm and draw it - std::vector list_points2d_mesh = pnp_registration.verify_points(&mesh); + vector list_points2d_mesh = pnp_registration.verify_points(&mesh); draw2DPoints(img_vis, list_points2d_mesh, green); } else { - std::cout << "Correspondence not found" << std::endl << std::endl; + cout << "Correspondence not found" << endl << endl; } // Show the image - cv::imshow("MODEL REGISTRATION", img_vis); + imshow("MODEL REGISTRATION", img_vis); // Show image until ESC pressed - cv::waitKey(0); + waitKey(0); /** COMPUTE 3D of the image Keypoints **/ // Containers for keypoints and descriptors of the model - std::vector keypoints_model; - cv::Mat descriptors; + vector keypoints_model; + Mat descriptors; // Compute keypoints and descriptors rmatcher.computeKeyPoints(img_in, keypoints_model); @@ -197,8 +200,8 @@ int main() // Check if keypoints are on the surface of the registration image and add to the model for (unsigned int i = 0; i < keypoints_model.size(); ++i) { - cv::Point2f point2d(keypoints_model[i].pt); - cv::Point3f point3d; + Point2f point2d(keypoints_model[i].pt); + Point3f point3d; bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d); if (on_surface) { @@ -219,12 +222,12 @@ int main() img_vis = img_in.clone(); // The list of the points2d of the model - std::vector list_points_in = model.get_points2d_in(); - std::vector list_points_out = model.get_points2d_out(); + vector list_points_in = model.get_points2d_in(); + vector list_points_out = model.get_points2d_out(); // Draw some debug text - std::string num = IntToString((int)list_points_in.size()); - std::string text = "There are " + num + " inliers"; + string num = IntToString((int)list_points_in.size()); + string text = "There are " + num + " inliers"; drawText(img_vis, text, green); // Draw some debug text @@ -240,26 +243,26 @@ int main() draw2DPoints(img_vis, list_points_out, red); // Show the image - cv::imshow("MODEL REGISTRATION", img_vis); + imshow("MODEL REGISTRATION", img_vis); // Wait until ESC pressed - cv::waitKey(0); + waitKey(0); // Close and Destroy Window - cv::destroyWindow("MODEL REGISTRATION"); + destroyWindow("MODEL REGISTRATION"); - std::cout << "GOODBYE" << std::endl; + cout << "GOODBYE" << endl; } /**********************************************************************************************************/ void help() { - std::cout - << "--------------------------------------------------------------------------" << std::endl - << "This program shows how to create your 3D textured model. " << std::endl - << "Usage:" << std::endl - << "./cpp-tutorial-pnp_registration" << std::endl - << "--------------------------------------------------------------------------" << std::endl - << std::endl; + cout + << "--------------------------------------------------------------------------" << endl + << "This program shows how to create your 3D textured model. " << endl + << "Usage:" << endl + << "./cpp-tutorial-pnp_registration" << endl + << "--------------------------------------------------------------------------" << endl + << endl; } diff --git a/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp b/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp index 894627eda..5662cc9f2 100755 --- a/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp +++ b/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp @@ -22,9 +22,9 @@ int main(void) vector kpts1, kpts2; Mat desc1, desc2; - AKAZE akaze; - akaze(img1, noArray(), kpts1, desc1); - akaze(img2, noArray(), kpts2, desc2); + Ptr akaze = AKAZE::create(); + akaze->detectAndCompute(img1, noArray(), kpts1, desc1); + akaze->detectAndCompute(img2, noArray(), kpts2, desc2); BFMatcher matcher(NORM_HAMMING); vector< vector > nn_matches; diff --git a/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp b/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp index ddeae5c64..0c8cfdc3d 100755 --- a/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp +++ b/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp @@ -41,7 +41,7 @@ protected: void Tracker::setFirstFrame(const Mat frame, vector bb, string title, Stats& stats) { first_frame = frame.clone(); - (*detector)(first_frame, noArray(), first_kp, first_desc); + detector->detectAndCompute(first_frame, noArray(), first_kp, first_desc); stats.keypoints = (int)first_kp.size(); drawBoundingBox(first_frame, bb); putText(first_frame, title, Point(0, 60), FONT_HERSHEY_PLAIN, 5, Scalar::all(0), 4); @@ -52,7 +52,7 @@ Mat Tracker::process(const Mat frame, Stats& stats) { vector kp; Mat desc; - (*detector)(frame, noArray(), kp, desc); + detector->detectAndCompute(frame, noArray(), kp, desc); stats.keypoints = (int)kp.size(); vector< vector > matches; @@ -135,9 +135,9 @@ int main(int argc, char **argv) return 1; } fs["bounding_box"] >> bb; - Ptr akaze = Feature2D::create("AKAZE"); + Ptr akaze = AKAZE::create(); akaze->set("threshold", akaze_thresh); - Ptr orb = Feature2D::create("ORB"); + Ptr orb = ORB::create(); Ptr matcher = DescriptorMatcher::create("BruteForce-Hamming"); Tracker akaze_tracker(akaze, matcher); Tracker orb_tracker(orb, matcher); diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp index 261badd45..703acba41 100644 --- a/samples/cpp/videostab.cpp +++ b/samples/cpp/videostab.cpp @@ -227,7 +227,7 @@ public: #endif Ptr kbest = makePtr(est); - kbest->setDetector(makePtr(argi(prefix + "nkps"))); + kbest->setDetector(GFTTDetector::create(argi(prefix + "nkps"))); kbest->setOutlierRejector(outlierRejector); return kbest; } @@ -268,7 +268,7 @@ public: #endif Ptr kbest = makePtr(est); - kbest->setDetector(makePtr(argi(prefix + "nkps"))); + kbest->setDetector(GFTTDetector::create(argi(prefix + "nkps"))); kbest->setOutlierRejector(outlierRejector); return kbest; } From 97a5dd0eade17e2d4e67379998b5116d86f1192f Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Thu, 16 Oct 2014 17:00:40 +0400 Subject: [PATCH 04/11] fixed several test failures; currently 9 out of 73 tests fail --- modules/features2d/src/akaze.cpp | 22 +++++++++++++++++ modules/features2d/src/brisk.cpp | 2 +- modules/features2d/src/feature2d.cpp | 10 ++++++++ modules/features2d/src/kaze.cpp | 20 ++++++++++++++++ modules/features2d/src/kaze/KAZEFeatures.cpp | 2 +- modules/features2d/test/test_brisk.cpp | 2 +- .../test/test_matchers_algorithmic.cpp | 6 +++-- modules/features2d/test/test_orb.cpp | 4 +--- .../test_rotation_and_scale_invariance.cpp | 24 ++++++------------- 9 files changed, 67 insertions(+), 25 deletions(-) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index b72b9402a..290031d28 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -187,6 +187,28 @@ namespace cv } } + void write(FileStorage& fs) const + { + fs << "descriptor" << descriptor; + fs << "descriptor_channels" << descriptor_channels; + fs << "descriptor_size" << descriptor_size; + fs << "threshold" << threshold; + fs << "octaves" << octaves; + fs << "sublevels" << sublevels; + fs << "diffusivity" << diffusivity; + } + + void read(const FileNode& fn) + { + descriptor = (int)fn["descriptor"]; + descriptor_channels = (int)fn["descriptor_channels"]; + descriptor_size = (int)fn["descriptor_size"]; + threshold = (float)fn["threshold"]; + octaves = (int)fn["octaves"]; + sublevels = (int)fn["sublevels"]; + diffusivity = (int)fn["diffusivity"]; + } + int descriptor; int descriptor_channels; int descriptor_size; diff --git a/modules/features2d/src/brisk.cpp b/modules/features2d/src/brisk.cpp index 1facb3eac..d123a3a81 100644 --- a/modules/features2d/src/brisk.cpp +++ b/modules/features2d/src/brisk.cpp @@ -2099,7 +2099,7 @@ BriskLayer::BriskLayer(const BriskLayer& layer, int mode) void BriskLayer::getAgastPoints(int threshold, std::vector& keypoints) { - fast_9_16_->set("threshold", threshold); + fast_9_16_ = FastFeatureDetector::create(threshold); fast_9_16_->detect(img_, keypoints); // also write scores diff --git a/modules/features2d/src/feature2d.cpp b/modules/features2d/src/feature2d.cpp index 039b33d35..93b650bcb 100644 --- a/modules/features2d/src/feature2d.cpp +++ b/modules/features2d/src/feature2d.cpp @@ -60,6 +60,11 @@ void Feature2D::detect( InputArray image, std::vector& keypoints, InputArray mask ) { + if( image.empty() ) + { + keypoints.clear(); + return; + } detectAndCompute(image, mask, keypoints, noArray(), false); } @@ -97,6 +102,11 @@ void Feature2D::compute( InputArray image, std::vector& keypoints, OutputArray descriptors ) { + if( image.empty() ) + { + descriptors.release(); + return; + } detectAndCompute(image, noArray(), keypoints, descriptors, true); } diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 83eeecc4e..229bb0f08 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -132,6 +132,26 @@ namespace cv } } + void write(FileStorage& fs) const + { + fs << "extended" << (int)extended; + fs << "upright" << (int)upright; + fs << "threshold" << threshold; + fs << "octaves" << octaves; + fs << "sublevels" << sublevels; + fs << "diffusivity" << diffusivity; + } + + void read(const FileNode& fn) + { + extended = (int)fn["extended"] != 0; + upright = (int)fn["upright"] != 0; + threshold = (float)fn["threshold"]; + octaves = (int)fn["octaves"]; + sublevels = (int)fn["sublevels"]; + diffusivity = (int)fn["diffusivity"]; + } + bool extended; bool upright; float threshold; diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 702a8a021..294898c26 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -145,7 +145,7 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const Mat &img) */ void KAZEFeatures::Compute_KContrast(const Mat &img, const float &kpercentile) { - options_.kcontrast = compute_k_percentile(img, kpercentile, options_.sderivatives, options_.kcontrast_bins, 0, 0); + options_.kcontrast = compute_k_percentile(img, kpercentile, options_.sderivatives, options_.kcontrast_bins, 0, 0); } /* ************************************************************************* */ diff --git a/modules/features2d/test/test_brisk.cpp b/modules/features2d/test/test_brisk.cpp index 38e07c3f6..4e1d0370d 100644 --- a/modules/features2d/test/test_brisk.cpp +++ b/modules/features2d/test/test_brisk.cpp @@ -72,7 +72,7 @@ void CV_BRISKTest::run( int ) cvtColor(image1, gray1, COLOR_BGR2GRAY); cvtColor(image2, gray2, COLOR_BGR2GRAY); - Ptr detector = Algorithm::create("Feature2D.BRISK"); + Ptr detector = BRISK::create(); vector keypoints1; vector keypoints2; diff --git a/modules/features2d/test/test_matchers_algorithmic.cpp b/modules/features2d/test/test_matchers_algorithmic.cpp index a15578a1d..147ae55bb 100644 --- a/modules/features2d/test/test_matchers_algorithmic.cpp +++ b/modules/features2d/test/test_matchers_algorithmic.cpp @@ -532,12 +532,14 @@ void CV_DescriptorMatcherTest::run( int ) TEST( Features2d_DescriptorMatcher_BruteForce, regression ) { - CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force", Algorithm::create("DescriptorMatcher.BFMatcher"), 0.01f ); + CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force", + DescriptorMatcher::create("BFMatcher"), 0.01f ); test.safe_run(); } TEST( Features2d_DescriptorMatcher_FlannBased, regression ) { - CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based", Algorithm::create("DescriptorMatcher.FlannBasedMatcher"), 0.04f ); + CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based", + DescriptorMatcher::create("FlannBasedMatcher"), 0.04f ); test.safe_run(); } diff --git a/modules/features2d/test/test_orb.cpp b/modules/features2d/test/test_orb.cpp index 47335b474..b7f854ba8 100644 --- a/modules/features2d/test/test_orb.cpp +++ b/modules/features2d/test/test_orb.cpp @@ -47,9 +47,7 @@ using namespace cv; TEST(Features2D_ORB, _1996) { - Ptr fd = ORB::create(); - fd->set("nFeatures", 10000);//setting a higher maximum to make effect of threshold visible - fd->set("fastThreshold", 20);//more features than the default + Ptr fd = ORB::create(10000, 1.2f, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, 20); Ptr de = fd; Mat image = imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.png"); diff --git a/modules/features2d/test/test_rotation_and_scale_invariance.cpp b/modules/features2d/test/test_rotation_and_scale_invariance.cpp index fa6a136f3..90fc2f81c 100644 --- a/modules/features2d/test/test_rotation_and_scale_invariance.cpp +++ b/modules/features2d/test/test_rotation_and_scale_invariance.cpp @@ -615,19 +615,15 @@ TEST(Features2d_RotationInvariance_Detector_ORB, regression) TEST(Features2d_RotationInvariance_Descriptor_BRISK, regression) { - DescriptorRotationInvarianceTest test(Algorithm::create("Feature2D.BRISK"), - Algorithm::create("Feature2D.BRISK"), - Algorithm::create("Feature2D.BRISK")->defaultNorm(), - 0.99f); + Ptr f2d = BRISK::create(); + DescriptorRotationInvarianceTest test(f2d, f2d, f2d->defaultNorm(), 0.99f); test.safe_run(); } TEST(Features2d_RotationInvariance_Descriptor_ORB, regression) { - DescriptorRotationInvarianceTest test(Algorithm::create("Feature2D.ORB"), - Algorithm::create("Feature2D.ORB"), - Algorithm::create("Feature2D.ORB")->defaultNorm(), - 0.99f); + Ptr f2d = ORB::create(); + DescriptorRotationInvarianceTest test(f2d, f2d, f2d->defaultNorm(), 0.99f); test.safe_run(); } @@ -646,25 +642,19 @@ TEST(Features2d_RotationInvariance_Descriptor_ORB, regression) TEST(Features2d_ScaleInvariance_Detector_BRISK, regression) { - DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.BRISK"), - 0.08f, - 0.49f); + DetectorScaleInvarianceTest test(BRISK::create(), 0.08f, 0.49f); test.safe_run(); } TEST(Features2d_ScaleInvariance_Detector_KAZE, regression) { - DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.KAZE"), - 0.08f, - 0.49f); + DetectorScaleInvarianceTest test(KAZE::create(), 0.08f, 0.49f); test.safe_run(); } TEST(Features2d_ScaleInvariance_Detector_AKAZE, regression) { - DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.AKAZE"), - 0.08f, - 0.49f); + DetectorScaleInvarianceTest test(AKAZE::create(), 0.08f, 0.49f); test.safe_run(); } From c5261ea3d228e352ec422f7959e348eb661e425d Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Thu, 16 Oct 2014 20:58:29 +0400 Subject: [PATCH 05/11] all the tests now pass except for MSER --- modules/core/include/opencv2/core.hpp | 3 + modules/core/src/algorithm.cpp | 3 + .../features2d/include/opencv2/features2d.hpp | 5 +- modules/features2d/perf/opencl/perf_fast.cpp | 5 +- modules/features2d/perf/perf_fast.cpp | 5 +- modules/features2d/src/brisk.cpp | 2 +- modules/features2d/src/fast.cpp | 24 + modules/features2d/src/mser.cpp | 3 + .../test/test_descriptors_regression.cpp | 4 +- modules/features2d/test/test_keypoints.cpp | 683 ++++++++++++++++++ .../test/test_matchers_algorithmic.cpp | 4 +- .../test_rotation_and_scale_invariance.cpp | 4 +- modules/stitching/src/matchers.cpp | 6 +- 13 files changed, 730 insertions(+), 21 deletions(-) diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index a258e8c4d..2fba9fc6b 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -874,6 +874,9 @@ public: virtual ~Algorithm(); String name() const; + virtual void set(int, double); + virtual double get(int) const; + template typename ParamType<_Tp>::member_type get(const String& name) const; template typename ParamType<_Tp>::member_type get(const char* name) const; diff --git a/modules/core/src/algorithm.cpp b/modules/core/src/algorithm.cpp index 9f9493e8a..bd28bdcdc 100644 --- a/modules/core/src/algorithm.cpp +++ b/modules/core/src/algorithm.cpp @@ -179,6 +179,9 @@ String Algorithm::name() const return info()->name(); } +void Algorithm::set(int, double) {} +double Algorithm::get(int) const { return 0.; } + void Algorithm::set(const String& parameter, int value) { info()->set(this, parameter.c_str(), ParamType::type, &value); diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index ddf5d66aa..243d75577 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -213,9 +213,10 @@ CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, class CV_EXPORTS_W FastFeatureDetector : public Feature2D { public: - enum Type + enum { - TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 + TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2, + THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002, }; CV_WRAP static Ptr create( int threshold=10, diff --git a/modules/features2d/perf/opencl/perf_fast.cpp b/modules/features2d/perf/opencl/perf_fast.cpp index c4a8e078c..be310bacb 100644 --- a/modules/features2d/perf/opencl/perf_fast.cpp +++ b/modules/features2d/perf/opencl/perf_fast.cpp @@ -32,11 +32,8 @@ OCL_PERF_TEST_P(FASTFixture, FastDetect, testing::Combine( mframe.copyTo(frame); declare.in(frame); - Ptr fd = Algorithm::create("Feature2D.FAST"); + Ptr fd = FastFeatureDetector::create(20, true, type); ASSERT_FALSE( fd.empty() ); - fd->set("threshold", 20); - fd->set("nonmaxSuppression", true); - fd->set("type", type); vector points; OCL_TEST_CYCLE() fd->detect(frame, points); diff --git a/modules/features2d/perf/perf_fast.cpp b/modules/features2d/perf/perf_fast.cpp index 25b82bd41..f70636400 100644 --- a/modules/features2d/perf/perf_fast.cpp +++ b/modules/features2d/perf/perf_fast.cpp @@ -30,11 +30,8 @@ PERF_TEST_P(fast, detect, testing::Combine( declare.in(frame); - Ptr fd = Algorithm::create("Feature2D.FAST"); + Ptr fd = FastFeatureDetector::create(20, true, type); ASSERT_FALSE( fd.empty() ); - fd->set("threshold", 20); - fd->set("nonmaxSuppression", true); - fd->set("type", type); vector points; TEST_CYCLE() fd->detect(frame, points); diff --git a/modules/features2d/src/brisk.cpp b/modules/features2d/src/brisk.cpp index d123a3a81..0a14148cf 100644 --- a/modules/features2d/src/brisk.cpp +++ b/modules/features2d/src/brisk.cpp @@ -2099,7 +2099,7 @@ BriskLayer::BriskLayer(const BriskLayer& layer, int mode) void BriskLayer::getAgastPoints(int threshold, std::vector& keypoints) { - fast_9_16_ = FastFeatureDetector::create(threshold); + fast_9_16_->set(FastFeatureDetector::THRESHOLD, threshold); fast_9_16_->detect(img_, keypoints); // also write scores diff --git a/modules/features2d/src/fast.cpp b/modules/features2d/src/fast.cpp index 8acdbb1dd..d829e46b0 100644 --- a/modules/features2d/src/fast.cpp +++ b/modules/features2d/src/fast.cpp @@ -380,6 +380,30 @@ public: KeyPointsFilter::runByPixelsMask( keypoints, mask ); } + void set(int prop, double value) + { + if(prop == THRESHOLD) + threshold = cvRound(value); + else if(prop == NONMAX_SUPPRESSION) + nonmaxSuppression = value != 0; + else if(prop == FAST_N) + type = cvRound(value); + else + CV_Error(Error::StsBadArg, ""); + } + + double get(int prop) const + { + if(prop == THRESHOLD) + return threshold; + if(prop == NONMAX_SUPPRESSION) + return nonmaxSuppression; + if(prop == FAST_N) + return type; + CV_Error(Error::StsBadArg, ""); + return 0; + } + int threshold; bool nonmaxSuppression; int type; diff --git a/modules/features2d/src/mser.cpp b/modules/features2d/src/mser.cpp index c9db56703..49d173b9b 100644 --- a/modules/features2d/src/mser.cpp +++ b/modules/features2d/src/mser.cpp @@ -800,9 +800,12 @@ extractMSER_8uC3( const Mat& src, Mat& labels, double emean = 0; Mat dx( src.rows, src.cols-1, CV_32FC1 ); Mat dy( src.rows, src.cols, CV_32FC1 ); + Ne = preprocessMSER_8UC3( map, edge, emean, src, dx, dy, Ne, params.edgeBlurSize ); emean = emean / (double)Ne; + std::sort(edge, edge + Ne, LessThanEdge()); + MSCREdge* edge_ub = edge+Ne; MSCREdge* edgeptr = edge; TempMSCR* mscrptr = mscr; diff --git a/modules/features2d/test/test_descriptors_regression.cpp b/modules/features2d/test/test_descriptors_regression.cpp index baffeeb3f..c0956fda5 100644 --- a/modules/features2d/test/test_descriptors_regression.cpp +++ b/modules/features2d/test/test_descriptors_regression.cpp @@ -106,8 +106,6 @@ public: ~CV_DescriptorExtractorTest() { - if(!detector.empty()) - detector.release(); } protected: virtual void createDescriptorExtractor() {} @@ -333,7 +331,7 @@ TEST( Features2d_DescriptorExtractor_KAZE, regression ) { CV_DescriptorExtractorTest< L2 > test( "descriptor-kaze", 0.03f, KAZE::create(), - L2() ); + L2(), KAZE::create() ); test.safe_run(); } diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index 561c3cc6e..c494f9882 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -41,6 +41,7 @@ #include "test_precomp.hpp" #include "opencv2/highgui.hpp" +#include "opencv2/core/core_c.h" using namespace std; using namespace cv; @@ -48,6 +49,683 @@ using namespace cv; const string FEATURES2D_DIR = "features2d"; const string IMAGE_FILENAME = "tsukuba.png"; +const int TABLE_SIZE = 400; + +static const float chitab3[]= +{ + 0.f, 0.0150057f, 0.0239478f, 0.0315227f, + 0.0383427f, 0.0446605f, 0.0506115f, 0.0562786f, + 0.0617174f, 0.0669672f, 0.0720573f, 0.0770099f, + 0.081843f, 0.0865705f, 0.0912043f, 0.0957541f, + 0.100228f, 0.104633f, 0.108976f, 0.113261f, + 0.117493f, 0.121676f, 0.125814f, 0.12991f, + 0.133967f, 0.137987f, 0.141974f, 0.145929f, + 0.149853f, 0.15375f, 0.15762f, 0.161466f, + 0.165287f, 0.169087f, 0.172866f, 0.176625f, + 0.180365f, 0.184088f, 0.187794f, 0.191483f, + 0.195158f, 0.198819f, 0.202466f, 0.2061f, + 0.209722f, 0.213332f, 0.216932f, 0.220521f, + 0.2241f, 0.22767f, 0.231231f, 0.234783f, + 0.238328f, 0.241865f, 0.245395f, 0.248918f, + 0.252435f, 0.255947f, 0.259452f, 0.262952f, + 0.266448f, 0.269939f, 0.273425f, 0.276908f, + 0.280386f, 0.283862f, 0.287334f, 0.290803f, + 0.29427f, 0.297734f, 0.301197f, 0.304657f, + 0.308115f, 0.311573f, 0.315028f, 0.318483f, + 0.321937f, 0.32539f, 0.328843f, 0.332296f, + 0.335749f, 0.339201f, 0.342654f, 0.346108f, + 0.349562f, 0.353017f, 0.356473f, 0.35993f, + 0.363389f, 0.366849f, 0.37031f, 0.373774f, + 0.377239f, 0.380706f, 0.384176f, 0.387648f, + 0.391123f, 0.3946f, 0.39808f, 0.401563f, + 0.405049f, 0.408539f, 0.412032f, 0.415528f, + 0.419028f, 0.422531f, 0.426039f, 0.429551f, + 0.433066f, 0.436586f, 0.440111f, 0.44364f, + 0.447173f, 0.450712f, 0.454255f, 0.457803f, + 0.461356f, 0.464915f, 0.468479f, 0.472049f, + 0.475624f, 0.479205f, 0.482792f, 0.486384f, + 0.489983f, 0.493588f, 0.4972f, 0.500818f, + 0.504442f, 0.508073f, 0.511711f, 0.515356f, + 0.519008f, 0.522667f, 0.526334f, 0.530008f, + 0.533689f, 0.537378f, 0.541075f, 0.54478f, + 0.548492f, 0.552213f, 0.555942f, 0.55968f, + 0.563425f, 0.56718f, 0.570943f, 0.574715f, + 0.578497f, 0.582287f, 0.586086f, 0.589895f, + 0.593713f, 0.597541f, 0.601379f, 0.605227f, + 0.609084f, 0.612952f, 0.61683f, 0.620718f, + 0.624617f, 0.628526f, 0.632447f, 0.636378f, + 0.64032f, 0.644274f, 0.648239f, 0.652215f, + 0.656203f, 0.660203f, 0.664215f, 0.668238f, + 0.672274f, 0.676323f, 0.680384f, 0.684457f, + 0.688543f, 0.692643f, 0.696755f, 0.700881f, + 0.70502f, 0.709172f, 0.713339f, 0.717519f, + 0.721714f, 0.725922f, 0.730145f, 0.734383f, + 0.738636f, 0.742903f, 0.747185f, 0.751483f, + 0.755796f, 0.760125f, 0.76447f, 0.768831f, + 0.773208f, 0.777601f, 0.782011f, 0.786438f, + 0.790882f, 0.795343f, 0.799821f, 0.804318f, + 0.808831f, 0.813363f, 0.817913f, 0.822482f, + 0.827069f, 0.831676f, 0.836301f, 0.840946f, + 0.84561f, 0.850295f, 0.854999f, 0.859724f, + 0.864469f, 0.869235f, 0.874022f, 0.878831f, + 0.883661f, 0.888513f, 0.893387f, 0.898284f, + 0.903204f, 0.908146f, 0.913112f, 0.918101f, + 0.923114f, 0.928152f, 0.933214f, 0.938301f, + 0.943413f, 0.94855f, 0.953713f, 0.958903f, + 0.964119f, 0.969361f, 0.974631f, 0.979929f, + 0.985254f, 0.990608f, 0.99599f, 1.0014f, + 1.00684f, 1.01231f, 1.01781f, 1.02335f, + 1.02891f, 1.0345f, 1.04013f, 1.04579f, + 1.05148f, 1.05721f, 1.06296f, 1.06876f, + 1.07459f, 1.08045f, 1.08635f, 1.09228f, + 1.09826f, 1.10427f, 1.11032f, 1.1164f, + 1.12253f, 1.1287f, 1.1349f, 1.14115f, + 1.14744f, 1.15377f, 1.16015f, 1.16656f, + 1.17303f, 1.17954f, 1.18609f, 1.19269f, + 1.19934f, 1.20603f, 1.21278f, 1.21958f, + 1.22642f, 1.23332f, 1.24027f, 1.24727f, + 1.25433f, 1.26144f, 1.26861f, 1.27584f, + 1.28312f, 1.29047f, 1.29787f, 1.30534f, + 1.31287f, 1.32046f, 1.32812f, 1.33585f, + 1.34364f, 1.3515f, 1.35943f, 1.36744f, + 1.37551f, 1.38367f, 1.39189f, 1.4002f, + 1.40859f, 1.41705f, 1.42561f, 1.43424f, + 1.44296f, 1.45177f, 1.46068f, 1.46967f, + 1.47876f, 1.48795f, 1.49723f, 1.50662f, + 1.51611f, 1.52571f, 1.53541f, 1.54523f, + 1.55517f, 1.56522f, 1.57539f, 1.58568f, + 1.59611f, 1.60666f, 1.61735f, 1.62817f, + 1.63914f, 1.65025f, 1.66152f, 1.67293f, + 1.68451f, 1.69625f, 1.70815f, 1.72023f, + 1.73249f, 1.74494f, 1.75757f, 1.77041f, + 1.78344f, 1.79669f, 1.81016f, 1.82385f, + 1.83777f, 1.85194f, 1.86635f, 1.88103f, + 1.89598f, 1.91121f, 1.92674f, 1.94257f, + 1.95871f, 1.97519f, 1.99201f, 2.0092f, + 2.02676f, 2.04471f, 2.06309f, 2.08189f, + 2.10115f, 2.12089f, 2.14114f, 2.16192f, + 2.18326f, 2.2052f, 2.22777f, 2.25101f, + 2.27496f, 2.29966f, 2.32518f, 2.35156f, + 2.37886f, 2.40717f, 2.43655f, 2.46709f, + 2.49889f, 2.53206f, 2.56673f, 2.60305f, + 2.64117f, 2.6813f, 2.72367f, 2.76854f, + 2.81623f, 2.86714f, 2.92173f, 2.98059f, + 3.04446f, 3.1143f, 3.19135f, 3.27731f, + 3.37455f, 3.48653f, 3.61862f, 3.77982f, + 3.98692f, 4.2776f, 4.77167f, 133.333f +}; + +struct MSCRNode; + +struct TempMSCR +{ + MSCRNode* head; + MSCRNode* tail; + double m; // the margin used to prune area later + int size; +}; + +struct MSCRNode +{ + MSCRNode* shortcut; + // to make the finding of root less painful + MSCRNode* prev; + MSCRNode* next; + // a point double-linked list + TempMSCR* tmsr; + // the temporary msr (set to NULL at every re-initialise) + TempMSCR* gmsr; + // the global msr (once set, never to NULL) + int index; + // the index of the node, at this point, it should be x at the first 16-bits, and y at the last 16-bits. + int rank; + int reinit; + int size, sizei; + double dt, di; + double s; +}; + +struct MSCREdge +{ + double chi; + MSCRNode* left; + MSCRNode* right; +}; + +static double ChiSquaredDistance( uchar* x, uchar* y ) +{ + return (double)((x[0]-y[0])*(x[0]-y[0]))/(double)(x[0]+y[0]+1e-10)+ + (double)((x[1]-y[1])*(x[1]-y[1]))/(double)(x[1]+y[1]+1e-10)+ + (double)((x[2]-y[2])*(x[2]-y[2]))/(double)(x[2]+y[2]+1e-10); +} + +static void initMSCRNode( MSCRNode* node ) +{ + node->gmsr = node->tmsr = NULL; + node->reinit = 0xffff; + node->rank = 0; + node->sizei = node->size = 1; + node->prev = node->next = node->shortcut = node; +} + +// the preprocess to get the edge list with proper gaussian blur +static int preprocessMSER_8UC3( MSCRNode* node, + MSCREdge* edge, + double* total, + CvMat* src, + CvMat* mask, + CvMat* dx, + CvMat* dy, + int Ne, + int edgeBlurSize ) +{ + int srccpt = src->step-src->cols*3; + uchar* srcptr = src->data.ptr; + uchar* lastptr = src->data.ptr+3; + double* dxptr = dx->data.db; + for ( int i = 0; i < src->rows; i++ ) + { + for ( int j = 0; j < src->cols-1; j++ ) + { + *dxptr = ChiSquaredDistance( srcptr, lastptr ); + dxptr++; + srcptr += 3; + lastptr += 3; + } + srcptr += srccpt+3; + lastptr += srccpt+3; + } + srcptr = src->data.ptr; + lastptr = src->data.ptr+src->step; + double* dyptr = dy->data.db; + for ( int i = 0; i < src->rows-1; i++ ) + { + for ( int j = 0; j < src->cols; j++ ) + { + *dyptr = ChiSquaredDistance( srcptr, lastptr ); + dyptr++; + srcptr += 3; + lastptr += 3; + } + srcptr += srccpt; + lastptr += srccpt; + } + // get dx and dy and blur it + if ( edgeBlurSize >= 1 ) + { + Mat _dx(dx->rows, dx->cols, dx->type, dx->data.ptr, dx->step); + Mat _dy(dy->rows, dy->cols, dy->type, dy->data.ptr, dy->step); + GaussianBlur( _dx, _dx, Size(edgeBlurSize, edgeBlurSize), 0 ); + GaussianBlur( _dy, _dy, Size(edgeBlurSize, edgeBlurSize), 0 ); + } + dxptr = dx->data.db; + dyptr = dy->data.db; + // assian dx, dy to proper edge list and initialize mscr node + // the nasty code here intended to avoid extra loops + if ( mask ) + { + Ne = 0; + int maskcpt = mask->step-mask->cols+1; + uchar* maskptr = mask->data.ptr; + MSCRNode* nodeptr = node; + initMSCRNode( nodeptr ); + nodeptr->index = 0; + *total += edge->chi = *dxptr; + if ( maskptr[0] && maskptr[1] ) + { + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + Ne++; + } + dxptr++; + nodeptr++; + maskptr++; + for ( int i = 1; i < src->cols-1; i++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = i; + if ( maskptr[0] && maskptr[1] ) + { + *total += edge->chi = *dxptr; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + Ne++; + } + dxptr++; + nodeptr++; + maskptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = src->cols-1; + nodeptr++; + maskptr += maskcpt; + for ( int i = 1; i < src->rows-1; i++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = i<<16; + if ( maskptr[0] ) + { + if ( maskptr[-mask->step] ) + { + *total += edge->chi = *dyptr; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + Ne++; + } + if ( maskptr[1] ) + { + *total += edge->chi = *dxptr; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + Ne++; + } + } + dyptr++; + dxptr++; + nodeptr++; + maskptr++; + for ( int j = 1; j < src->cols-1; j++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = (i<<16)|j; + if ( maskptr[0] ) + { + if ( maskptr[-mask->step] ) + { + *total += edge->chi = *dyptr; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + Ne++; + } + if ( maskptr[1] ) + { + *total += edge->chi = *dxptr; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + Ne++; + } + } + dyptr++; + dxptr++; + nodeptr++; + maskptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = (i<<16)|(src->cols-1); + if ( maskptr[0] && maskptr[-mask->step] ) + { + *total += edge->chi = *dyptr; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + Ne++; + } + dyptr++; + nodeptr++; + maskptr += maskcpt; + } + initMSCRNode( nodeptr ); + nodeptr->index = (src->rows-1)<<16; + if ( maskptr[0] ) + { + if ( maskptr[1] ) + { + *total += edge->chi = *dxptr; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + Ne++; + } + if ( maskptr[-mask->step] ) + { + *total += edge->chi = *dyptr; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + Ne++; + } + } + dxptr++; + dyptr++; + nodeptr++; + maskptr++; + for ( int i = 1; i < src->cols-1; i++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = ((src->rows-1)<<16)|i; + if ( maskptr[0] ) + { + if ( maskptr[1] ) + { + *total += edge->chi = *dxptr; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + Ne++; + } + if ( maskptr[-mask->step] ) + { + *total += edge->chi = *dyptr; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + Ne++; + } + } + dxptr++; + dyptr++; + nodeptr++; + maskptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = ((src->rows-1)<<16)|(src->cols-1); + if ( maskptr[0] && maskptr[-mask->step] ) + { + *total += edge->chi = *dyptr; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + Ne++; + } + } else { + MSCRNode* nodeptr = node; + initMSCRNode( nodeptr ); + nodeptr->index = 0; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; + for ( int i = 1; i < src->cols-1; i++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = i; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = src->cols-1; + nodeptr++; + for ( int i = 1; i < src->rows-1; i++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = i<<16; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; + for ( int j = 1; j < src->cols-1; j++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = (i<<16)|j; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = (i<<16)|(src->cols-1); + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + nodeptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = (src->rows-1)<<16; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + nodeptr++; + for ( int i = 1; i < src->cols-1; i++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = ((src->rows-1)<<16)|i; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + edge++; + nodeptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = ((src->rows-1)<<16)|(src->cols-1); + *total += edge->chi = *dyptr; + edge->left = nodeptr-src->cols; + edge->right = nodeptr; + } + return Ne; +} + +class LessThanEdge +{ +public: + bool operator()(const MSCREdge& a, const MSCREdge& b) const { return a.chi < b.chi; } +}; + +// to find the root of one region +static MSCRNode* findMSCR( MSCRNode* x ) +{ + MSCRNode* prev = x; + MSCRNode* next; + for ( ; ; ) + { + next = x->shortcut; + x->shortcut = prev; + if ( next == x ) break; + prev= x; + x = next; + } + MSCRNode* root = x; + for ( ; ; ) + { + prev = x->shortcut; + x->shortcut = root; + if ( prev == x ) break; + x = prev; + } + return root; +} + +struct MSERParams +{ + MSERParams( int _delta=5, int _min_area=60, int _max_area=14400, + double _max_variation=0.25, double _min_diversity=.2, + int _max_evolution=200, double _area_threshold=1.01, + double _min_margin=0.003, int _edge_blur_size=5 ) + { + delta = _delta; + minArea = _min_area; + maxArea = _max_area; + maxVariation = _max_variation; + minDiversity = _min_diversity; + maxEvolution = _max_evolution; + areaThreshold = _area_threshold; + minMargin = _min_margin; + edgeBlurSize = _edge_blur_size; + } + + int delta; + int minArea; + int maxArea; + double maxVariation; + double minDiversity; + int maxEvolution; + double areaThreshold; + double minMargin; + int edgeBlurSize; +}; + +// the stable mscr should be: +// bigger than minArea and smaller than maxArea +// differ from its ancestor more than minDiversity +static bool MSCRStableCheck( MSCRNode* x, MSERParams params ) +{ + if ( x->size <= params.minArea || x->size >= params.maxArea ) + return 0; + if ( x->gmsr == NULL ) + return 1; + double div = (double)(x->size-x->gmsr->size)/(double)x->size; + return div > params.minDiversity; +} + +static void +extractMSER_8UC3( CvMat* src, + CvMat* mask, + vector >& msers, + MSERParams params ) +{ + msers.clear(); + MSCRNode* map = (MSCRNode*)cvAlloc( src->cols*src->rows*sizeof(map[0]) ); + int Ne = src->cols*src->rows*2-src->cols-src->rows; + MSCREdge* edge = (MSCREdge*)cvAlloc( Ne*sizeof(edge[0]) ); + TempMSCR* mscr = (TempMSCR*)cvAlloc( src->cols*src->rows*sizeof(mscr[0]) ); + double emean = 0; + CvMat* dx = cvCreateMat( src->rows, src->cols-1, CV_64FC1 ); + CvMat* dy = cvCreateMat( src->rows-1, src->cols, CV_64FC1 ); + Ne = preprocessMSER_8UC3( map, edge, &emean, src, mask, dx, dy, Ne, params.edgeBlurSize ); + emean = emean / (double)Ne; + std::sort(edge, edge + Ne, LessThanEdge()); + MSCREdge* edge_ub = edge+Ne; + MSCREdge* edgeptr = edge; + TempMSCR* mscrptr = mscr; + // the evolution process + for ( int i = 0; i < params.maxEvolution; i++ ) + { + double k = (double)i/(double)params.maxEvolution*(TABLE_SIZE-1); + int ti = cvFloor(k); + double reminder = k-ti; + double thres = emean*(chitab3[ti]*(1-reminder)+chitab3[ti+1]*reminder); + // to process all the edges in the list that chi < thres + while ( edgeptr < edge_ub && edgeptr->chi < thres ) + { + MSCRNode* lr = findMSCR( edgeptr->left ); + MSCRNode* rr = findMSCR( edgeptr->right ); + // get the region root (who is responsible) + if ( lr != rr ) + { + // rank idea take from: N-tree Disjoint-Set Forests for Maximally Stable Extremal Regions + if ( rr->rank > lr->rank ) + { + MSCRNode* tmp; + CV_SWAP( lr, rr, tmp ); + } else if ( lr->rank == rr->rank ) { + // at the same rank, we will compare the size + if ( lr->size > rr->size ) + { + MSCRNode* tmp; + CV_SWAP( lr, rr, tmp ); + } + lr->rank++; + } + rr->shortcut = lr; + lr->size += rr->size; + // join rr to the end of list lr (lr is a endless double-linked list) + lr->prev->next = rr; + lr->prev = rr->prev; + rr->prev->next = lr; + rr->prev = lr; + // area threshold force to reinitialize + if ( lr->size > (lr->size-rr->size)*params.areaThreshold ) + { + lr->sizei = lr->size; + lr->reinit = i; + if ( lr->tmsr != NULL ) + { + lr->tmsr->m = lr->dt-lr->di; + lr->tmsr = NULL; + } + lr->di = edgeptr->chi; + lr->s = 1e10; + } + lr->dt = edgeptr->chi; + if ( i > lr->reinit ) + { + double s = (double)(lr->size-lr->sizei)/(lr->dt-lr->di); + if ( s < lr->s ) + { + // skip the first one and check stablity + if ( i > lr->reinit+1 && MSCRStableCheck( lr, params ) ) + { + if ( lr->tmsr == NULL ) + { + lr->gmsr = lr->tmsr = mscrptr; + mscrptr++; + } + lr->tmsr->size = lr->size; + lr->tmsr->head = lr; + lr->tmsr->tail = lr->prev; + lr->tmsr->m = 0; + } + lr->s = s; + } + } + } + edgeptr++; + } + if ( edgeptr >= edge_ub ) + break; + } + for ( TempMSCR* ptr = mscr; ptr < mscrptr; ptr++ ) + // to prune area with margin less than minMargin + if ( ptr->m > params.minMargin ) + { + vector mser; + MSCRNode* lpt = ptr->head; + for ( int i = 0; i < ptr->size; i++ ) + { + Point pt; + pt.x = (lpt->index)&0xffff; + pt.y = (lpt->index)>>16; + lpt = lpt->next; + mser.push_back(pt); + } + msers.push_back(mser); + } + cvReleaseMat( &dx ); + cvReleaseMat( &dy ); + cvFree( &mscr ); + cvFree( &edge ); + cvFree( &map ); +} + /****************************************************************************************\ * Test for KeyPoint * \****************************************************************************************/ @@ -74,6 +752,11 @@ protected: } vector keypoints; + vector > msers; + CvMat src = image; + + extractMSER_8UC3( &src, 0, msers, MSERParams()); + detector->detect(image, keypoints); if(keypoints.empty()) diff --git a/modules/features2d/test/test_matchers_algorithmic.cpp b/modules/features2d/test/test_matchers_algorithmic.cpp index 147ae55bb..ec71e5a5a 100644 --- a/modules/features2d/test/test_matchers_algorithmic.cpp +++ b/modules/features2d/test/test_matchers_algorithmic.cpp @@ -533,13 +533,13 @@ void CV_DescriptorMatcherTest::run( int ) TEST( Features2d_DescriptorMatcher_BruteForce, regression ) { CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force", - DescriptorMatcher::create("BFMatcher"), 0.01f ); + DescriptorMatcher::create("BruteForce"), 0.01f ); test.safe_run(); } TEST( Features2d_DescriptorMatcher_FlannBased, regression ) { CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based", - DescriptorMatcher::create("FlannBasedMatcher"), 0.04f ); + DescriptorMatcher::create("FlannBased"), 0.04f ); test.safe_run(); } diff --git a/modules/features2d/test/test_rotation_and_scale_invariance.cpp b/modules/features2d/test/test_rotation_and_scale_invariance.cpp index 90fc2f81c..f03fa1447 100644 --- a/modules/features2d/test/test_rotation_and_scale_invariance.cpp +++ b/modules/features2d/test/test_rotation_and_scale_invariance.cpp @@ -595,7 +595,7 @@ protected: TEST(Features2d_RotationInvariance_Detector_BRISK, regression) { - DetectorRotationInvarianceTest test(Algorithm::create("Feature2D.BRISK"), + DetectorRotationInvarianceTest test(BRISK::create(), 0.32f, 0.76f); test.safe_run(); @@ -603,7 +603,7 @@ TEST(Features2d_RotationInvariance_Detector_BRISK, regression) TEST(Features2d_RotationInvariance_Detector_ORB, regression) { - DetectorRotationInvarianceTest test(Algorithm::create("Feature2D.ORB"), + DetectorRotationInvarianceTest test(ORB::create(), 0.47f, 0.76f); test.safe_run(); diff --git a/modules/stitching/src/matchers.cpp b/modules/stitching/src/matchers.cpp index 9279df70e..ad3f6320f 100644 --- a/modules/stitching/src/matchers.cpp +++ b/modules/stitching/src/matchers.cpp @@ -321,7 +321,7 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int { if (num_octaves_descr == num_octaves && num_layers_descr == num_layers) { - surf = Algorithm::create("Feature2D.SURF"); + surf = xfeatures2d::SURF::create(); if( !surf ) CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" ); surf->set("hessianThreshold", hess_thresh); @@ -330,8 +330,8 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int } else { - detector_ = Algorithm::create("Feature2D.SURF"); - extractor_ = Algorithm::create("Feature2D.SURF"); + detector_ = xfeatures2d::SURF::create(); + extractor_ = xfeatures2d::SURF::create(); if( !detector_ || !extractor_ ) CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" ); From 25a7d023dd47d78dc7c5dcf4527776ea780725f6 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Thu, 16 Oct 2014 21:59:38 +0400 Subject: [PATCH 06/11] ok; all the tests now pass --- .../features2d/include/opencv2/features2d.hpp | 9 +- modules/features2d/src/mser.cpp | 497 +++++++------ modules/features2d/test/test_keypoints.cpp | 679 ------------------ 3 files changed, 261 insertions(+), 924 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 243d75577..a42d454c4 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -195,12 +195,9 @@ public: int _max_evolution=200, double _area_threshold=1.01, double _min_margin=0.003, int _edge_blur_size=5 ); - CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label, - OutputArray stats=noArray() ) = 0; - - CV_WRAP virtual void detectAndStore( InputArray image, - std::vector >& msers, - OutputArray stats=noArray() ) = 0; + CV_WRAP virtual void detectRegions( InputArray image, + std::vector >& msers, + std::vector& bboxes ) = 0; }; //! detects corners using FAST algorithm by E. Rosten diff --git a/modules/features2d/src/mser.cpp b/modules/features2d/src/mser.cpp index 49d173b9b..44b23acb8 100644 --- a/modules/features2d/src/mser.cpp +++ b/modules/features2d/src/mser.cpp @@ -53,23 +53,22 @@ class MSER_Impl : public MSER public: struct Params { - explicit Params( int _delta=5, double _maxVariation=0.25, - int _minArea=60, int _maxArea=14400, - double _minDiversity=.2, int _maxEvolution=200, - double _areaThreshold=1.01, - double _minMargin=0.003, int _edgeBlurSize=5 ) + Params( int _delta=5, int _min_area=60, int _max_area=14400, + double _max_variation=0.25, double _min_diversity=.2, + int _max_evolution=200, double _area_threshold=1.01, + double _min_margin=0.003, int _edge_blur_size=5 ) { delta = _delta; - minArea = _minArea; - maxArea = _maxArea; - maxVariation = _maxVariation; - minDiversity = _minDiversity; - pass2Only = false; - maxEvolution = _maxEvolution; - areaThreshold = _areaThreshold; - minMargin = _minMargin; - edgeBlurSize = _edgeBlurSize; + minArea = _min_area; + maxArea = _max_area; + maxVariation = _max_variation; + minDiversity = _min_diversity; + maxEvolution = _max_evolution; + areaThreshold = _area_threshold; + minMargin = _min_margin; + edgeBlurSize = _edge_blur_size; } + int delta; int minArea; int maxArea; @@ -269,11 +268,10 @@ public: } // convert the point set to CvSeq - Rect label( Mat& labels, int lval, const Pixel* pix0, int step ) const + Rect capture( const Pixel* pix0, int step, vector& region ) const { - int* lptr = labels.ptr(); - int lstep = labels.step/sizeof(lptr[0]); int xmin = INT_MAX, ymin = INT_MAX, xmax = INT_MIN, ymax = INT_MIN; + region.clear(); for( PPixel pix = head; pix != 0; pix = pix0[pix].getNext() ) { @@ -285,7 +283,7 @@ public: ymin = std::min(ymin, y); ymax = std::max(ymax, y); - lptr[lstep*y + x] = lval; + region.push_back(Point(x, y)); } return Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1); @@ -300,10 +298,9 @@ public: bool dvar; // the derivative of last var }; - int detectAndLabel( InputArray _src, OutputArray _labels, OutputArray _bboxes ); - void detectAndStore( InputArray image, - std::vector >& msers, - OutputArray stats ); + void detectRegions( InputArray image, + std::vector >& msers, + std::vector& bboxes ); void detect( InputArray _src, vector& keypoints, InputArray _mask ); void preprocess1( const Mat& img, int* level_size ) @@ -359,7 +356,7 @@ public: } } - void pass( const Mat& img, Mat& labels, int& lval, vector& bboxvec, + void pass( const Mat& img, vector >& msers, vector& bboxvec, Size size, const int* level_size, int mask ) { CompHistory* histptr = &histbuf[0]; @@ -452,7 +449,10 @@ public: // check the stablity and push a new history, increase the grey level if( comptr->isStable(params) ) { - Rect box = comptr->label( labels, lval++, ptr0, step ); + msers.push_back(vector()); + vector& mser = msers.back(); + + Rect box = comptr->capture( ptr0, step, mser ); bboxvec.push_back(box); } comptr->growHistory( histptr++ ); @@ -472,7 +472,10 @@ public: // check the stablity here otherwise it wouldn't be an ER if( comptr->isStable(params) ) { - Rect box = comptr->label( labels, lval++, ptr0, step ); + msers.push_back(vector()); + vector& mser = msers.back(); + + Rect box = comptr->capture( ptr0, step, mser ); bboxvec.push_back(box); } comptr->growHistory( histptr++ ); @@ -607,7 +610,6 @@ static const float chitab3[]= 3.98692f, 4.2776f, 4.77167f, 133.333f }; - struct MSCRNode; struct TempMSCR @@ -620,56 +622,6 @@ struct TempMSCR struct MSCRNode { - // the stable mscr should be: - // bigger than minArea and smaller than maxArea - // differ from its ancestor more than minDiversity - bool isStable( const MSER_Impl::Params& params ) const - { - if( size <= params.minArea || size >= params.maxArea ) - return 0; - if( gmsr == NULL ) - return 1; - double div = (double)(size - gmsr->size)/(double)size; - return div > params.minDiversity; - } - - void init( int _index ) - { - gmsr = tmsr = NULL; - reinit = 0xffff; - rank = 0; - sizei = size = 1; - prev = next = shortcut = this; - index = _index; - } - - // to find the root of one region - MSCRNode* findRoot() - { - MSCRNode* x = this; - MSCRNode* _prev = x; - MSCRNode* _next; - for(;;) - { - _next = x->shortcut; - x->shortcut = _prev; - if( _next == x ) - break; - _prev = x; - x = _next; - } - MSCRNode* root = x; - for(;;) - { - _prev = x->shortcut; - x->shortcut = root; - if( _prev == x ) - break; - x = _prev; - } - return root; - } - MSCRNode* shortcut; // to make the finding of root less painful MSCRNode* prev; @@ -690,85 +642,175 @@ struct MSCRNode struct MSCREdge { - double init(float _chi, MSCRNode* _left, MSCRNode* _right) - { - chi = _chi; - left = _left; - right = _right; - return chi; - } - float chi; + double chi; MSCRNode* left; MSCRNode* right; }; -static float ChiSquaredDistance( const uchar* x, const uchar* y ) +static double ChiSquaredDistance( const uchar* x, const uchar* y ) { - return (float)((x[0]-y[0])*(x[0]-y[0]))/(float)(x[0]+y[0]+FLT_EPSILON)+ - (float)((x[1]-y[1])*(x[1]-y[1]))/(float)(x[1]+y[1]+FLT_EPSILON)+ - (float)((x[2]-y[2])*(x[2]-y[2]))/(float)(x[2]+y[2]+FLT_EPSILON); + return (double)((x[0]-y[0])*(x[0]-y[0]))/(double)(x[0]+y[0]+1e-10)+ + (double)((x[1]-y[1])*(x[1]-y[1]))/(double)(x[1]+y[1]+1e-10)+ + (double)((x[2]-y[2])*(x[2]-y[2]))/(double)(x[2]+y[2]+1e-10); } +static void initMSCRNode( MSCRNode* node ) +{ + node->gmsr = node->tmsr = NULL; + node->reinit = 0xffff; + node->rank = 0; + node->sizei = node->size = 1; + node->prev = node->next = node->shortcut = node; +} // the preprocess to get the edge list with proper gaussian blur -static int preprocessMSER_8UC3( MSCRNode* node, MSCREdge* edge, - double& total, const Mat& src, - Mat& dx, Mat& dy, int Ne, int edgeBlurSize ) +static int preprocessMSER_8uC3( MSCRNode* node, + MSCREdge* edge, + double* total, + const Mat& src, + Mat& dx, + Mat& dy, + int Ne, + int edgeBlurSize ) { - int nch = src.channels(); - int i, j, nrows = src.rows, ncols = src.cols; - float* dxptr = 0; - float* dyptr = 0; - for( i = 0; i < nrows; i++ ) + int srccpt = src.step-src.cols*3; + const uchar* srcptr = src.ptr(); + const uchar* lastptr = srcptr+3; + double* dxptr = dx.ptr(); + for ( int i = 0; i < src.rows; i++ ) { - const uchar* srcptr = src.ptr(i); - const uchar* nextsrc = src.ptr(std::min(i+1, nrows-1)); - dxptr = dx.ptr(i); - dyptr = dy.ptr(i); - - for( j = 0; j < ncols-1; j++ ) + for ( int j = 0; j < src.cols-1; j++ ) { - dxptr[j] = ChiSquaredDistance( srcptr + j*nch, srcptr + (j+1)*nch ); - dyptr[j] = ChiSquaredDistance( srcptr + j*nch, nextsrc + j*nch ); + *dxptr = ChiSquaredDistance( srcptr, lastptr ); + dxptr++; + srcptr += 3; + lastptr += 3; } - dyptr[ncols-1] = ChiSquaredDistance( srcptr + (ncols-1)*nch, nextsrc + (ncols-1)*nch ); + srcptr += srccpt+3; + lastptr += srccpt+3; } - - // get dx and dy and blur it - if( edgeBlurSize >= 1 ) + srcptr = src.ptr(); + lastptr = srcptr+src.step; + double* dyptr = dy.ptr(); + for ( int i = 0; i < src.rows-1; i++ ) { - GaussianBlur(dx, dx, Size(edgeBlurSize, edgeBlurSize), 0); - GaussianBlur(dy, dy, Size(edgeBlurSize, edgeBlurSize), 0); + for ( int j = 0; j < src.cols; j++ ) + { + *dyptr = ChiSquaredDistance( srcptr, lastptr ); + dyptr++; + srcptr += 3; + lastptr += 3; + } + srcptr += srccpt; + lastptr += srccpt; } - dxptr = dx.ptr(); - dyptr = dy.ptr(); + // get dx and dy and blur it + if ( edgeBlurSize >= 1 ) + { + GaussianBlur( dx, dx, Size(edgeBlurSize, edgeBlurSize), 0 ); + GaussianBlur( dy, dy, Size(edgeBlurSize, edgeBlurSize), 0 ); + } + dxptr = dx.ptr(); + dyptr = dy.ptr(); // assian dx, dy to proper edge list and initialize mscr node // the nasty code here intended to avoid extra loops MSCRNode* nodeptr = node; - for( j = 0; j < ncols-1; j++ ) + initMSCRNode( nodeptr ); + nodeptr->index = 0; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; + for ( int i = 1; i < src.cols-1; i++ ) { - nodeptr[j].init(j); - total += edge[j].init(dxptr[j], nodeptr+j, nodeptr+j+1); + initMSCRNode( nodeptr ); + nodeptr->index = i; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; } - dxptr += ncols - 1; - edge += ncols - 1; - nodeptr[ncols-1].init(ncols - 1); - nodeptr += ncols; - for( i = 1; i < nrows; i++ ) + initMSCRNode( nodeptr ); + nodeptr->index = src.cols-1; + nodeptr++; + for ( int i = 1; i < src.rows-1; i++ ) { - for( j = 0; j < ncols-1; j++ ) + initMSCRNode( nodeptr ); + nodeptr->index = i<<16; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src.cols; + edge->right = nodeptr; + edge++; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; + for ( int j = 1; j < src.cols-1; j++ ) { - nodeptr[j].init( (i<<16)|j ); - total += edge[j*2].init(dyptr[j], nodeptr + j - ncols, nodeptr + j); - total += edge[j*2+1].init(dxptr[j], nodeptr + j, nodeptr + j + 1); + initMSCRNode( nodeptr ); + nodeptr->index = (i<<16)|j; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src.cols; + edge->right = nodeptr; + edge++; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + nodeptr++; } - nodeptr[ncols-1].init((i<<16)|(ncols - 1)); - total += edge[(ncols-1)*2].init(dyptr[ncols-1], nodeptr - 1, nodeptr + ncols-1); - dxptr += ncols-1; - dyptr += ncols; - edge += 2*ncols - 1; - nodeptr += ncols; + initMSCRNode( nodeptr ); + nodeptr->index = (i<<16)|(src.cols-1); + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src.cols; + edge->right = nodeptr; + edge++; + nodeptr++; } + initMSCRNode( nodeptr ); + nodeptr->index = (src.rows-1)<<16; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src.cols; + edge->right = nodeptr; + edge++; + nodeptr++; + for ( int i = 1; i < src.cols-1; i++ ) + { + initMSCRNode( nodeptr ); + nodeptr->index = ((src.rows-1)<<16)|i; + *total += edge->chi = *dxptr; + dxptr++; + edge->left = nodeptr; + edge->right = nodeptr+1; + edge++; + *total += edge->chi = *dyptr; + dyptr++; + edge->left = nodeptr-src.cols; + edge->right = nodeptr; + edge++; + nodeptr++; + } + initMSCRNode( nodeptr ); + nodeptr->index = ((src.rows-1)<<16)|(src.cols-1); + *total += edge->chi = *dyptr; + edge->left = nodeptr-src.cols; + edge->right = nodeptr; return Ne; } @@ -779,37 +821,63 @@ public: bool operator()(const MSCREdge& a, const MSCREdge& b) const { return a.chi < b.chi; } }; +// to find the root of one region +static MSCRNode* findMSCR( MSCRNode* x ) +{ + MSCRNode* prev = x; + MSCRNode* next; + for ( ; ; ) + { + next = x->shortcut; + x->shortcut = prev; + if ( next == x ) break; + prev= x; + x = next; + } + MSCRNode* root = x; + for ( ; ; ) + { + prev = x->shortcut; + x->shortcut = root; + if ( prev == x ) break; + x = prev; + } + return root; +} + +// the stable mscr should be: +// bigger than minArea and smaller than maxArea +// differ from its ancestor more than minDiversity +static bool MSCRStableCheck( MSCRNode* x, const MSER_Impl::Params& params ) +{ + if ( x->size <= params.minArea || x->size >= params.maxArea ) + return false; + if ( x->gmsr == NULL ) + return true; + double div = (double)(x->size-x->gmsr->size)/(double)x->size; + return div > params.minDiversity; +} static void -extractMSER_8uC3( const Mat& src, Mat& labels, +extractMSER_8uC3( const Mat& src, + vector >& msers, vector& bboxvec, const MSER_Impl::Params& params ) { - int npixels = src.cols*src.rows; - int currlabel = 0; - int* lptr = labels.ptr(); - int lstep = (int)(labels.step/sizeof(int)); - - vector mapvec(npixels); - MSCRNode* map = &mapvec[0]; - int Ne = npixels*2 - src.cols - src.rows; - vector edgevec(Ne+1); - MSCREdge* edge = &edgevec[0]; - vector mscrvec(npixels); - TempMSCR* mscr = &mscrvec[0]; + bboxvec.clear(); + MSCRNode* map = (MSCRNode*)cvAlloc( src.cols*src.rows*sizeof(map[0]) ); + int Ne = src.cols*src.rows*2-src.cols-src.rows; + MSCREdge* edge = (MSCREdge*)cvAlloc( Ne*sizeof(edge[0]) ); + TempMSCR* mscr = (TempMSCR*)cvAlloc( src.cols*src.rows*sizeof(mscr[0]) ); double emean = 0; - Mat dx( src.rows, src.cols-1, CV_32FC1 ); - Mat dy( src.rows, src.cols, CV_32FC1 ); - - Ne = preprocessMSER_8UC3( map, edge, emean, src, dx, dy, Ne, params.edgeBlurSize ); + Mat dx( src.rows, src.cols-1, CV_64FC1 ); + Mat dy( src.rows-1, src.cols, CV_64FC1 ); + Ne = preprocessMSER_8uC3( map, edge, &emean, src, dx, dy, Ne, params.edgeBlurSize ); emean = emean / (double)Ne; - std::sort(edge, edge + Ne, LessThanEdge()); - MSCREdge* edge_ub = edge+Ne; MSCREdge* edgeptr = edge; TempMSCR* mscrptr = mscr; - // the evolution process for ( int i = 0; i < params.maxEvolution; i++ ) { @@ -818,24 +886,23 @@ extractMSER_8uC3( const Mat& src, Mat& labels, double reminder = k-ti; double thres = emean*(chitab3[ti]*(1-reminder)+chitab3[ti+1]*reminder); // to process all the edges in the list that chi < thres - while( edgeptr < edge_ub && edgeptr->chi < thres ) + while ( edgeptr < edge_ub && edgeptr->chi < thres ) { - MSCRNode* lr = edgeptr->left->findRoot(); - MSCRNode* rr = edgeptr->right->findRoot(); + MSCRNode* lr = findMSCR( edgeptr->left ); + MSCRNode* rr = findMSCR( edgeptr->right ); // get the region root (who is responsible) if ( lr != rr ) { - MSCRNode* tmp; // rank idea take from: N-tree Disjoint-Set Forests for Maximally Stable Extremal Regions if ( rr->rank > lr->rank ) { + MSCRNode* tmp; CV_SWAP( lr, rr, tmp ); - } - else if ( lr->rank == rr->rank ) - { + } else if ( lr->rank == rr->rank ) { // at the same rank, we will compare the size - if( lr->size > rr->size ) + if ( lr->size > rr->size ) { + MSCRNode* tmp; CV_SWAP( lr, rr, tmp ); } lr->rank++; @@ -867,7 +934,7 @@ extractMSER_8uC3( const Mat& src, Mat& labels, if ( s < lr->s ) { // skip the first one and check stablity - if ( i > lr->reinit+1 && lr->isStable( params ) ) + if ( i > lr->reinit+1 && MSCRStableCheck( lr, params ) ) { if ( lr->tmsr == NULL ) { @@ -888,55 +955,51 @@ extractMSER_8uC3( const Mat& src, Mat& labels, if ( edgeptr >= edge_ub ) break; } - - for( TempMSCR* ptr = mscr; ptr < mscrptr; ptr++ ) - { + for ( TempMSCR* ptr = mscr; ptr < mscrptr; ptr++ ) // to prune area with margin less than minMargin - if( ptr->m > params.minMargin ) + if ( ptr->m > params.minMargin ) { - int xmin = INT_MAX, ymin = INT_MAX, xmax = INT_MIN, ymax = INT_MIN; - currlabel++; MSCRNode* lpt = ptr->head; - for( int i = 0; i < ptr->size; i++ ) + int xmin = INT_MAX, ymin = INT_MAX, xmax = INT_MIN, ymax = INT_MIN; + msers.push_back(vector()); + vector& mser = msers.back(); + + for ( int i = 0; i < ptr->size; i++ ) { - int x = (lpt->index)&0xffff; - int y = (lpt->index)>>16; + Point pt; + pt.x = (lpt->index)&0xffff; + pt.y = (lpt->index)>>16; + xmin = std::min(xmin, pt.x); + xmax = std::max(xmax, pt.x); + ymin = std::min(ymin, pt.y); + ymax = std::max(ymax, pt.y); + lpt = lpt->next; - - xmin = std::min(xmin, x); - xmax = std::max(xmax, x); - ymin = std::min(ymin, y); - ymax = std::max(ymax, y); - - lptr[lstep*y + x] = currlabel; + mser.push_back(pt); } bboxvec.push_back(Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1)); } - } + cvFree( &mscr ); + cvFree( &edge ); + cvFree( &map ); } -int MSER_Impl::detectAndLabel( InputArray _src, OutputArray _labels, OutputArray _bboxes ) +void MSER_Impl::detectRegions( InputArray _src, vector >& msers, vector& bboxes ) { Mat src = _src.getMat(); size_t npix = src.total(); - vector bboxvec; + + msers.clear(); + bboxes.clear(); if( npix == 0 ) - { - _labels.release(); - return 0; - } + return; Size size = src.size(); - _labels.create( size, CV_32S ); - Mat labels = _labels.getMat(); - labels.setTo(Scalar::all(0)); if( src.type() == CV_8U ) { int level_size[256]; - int lval = 1; - if( !src.isContinuous() ) { src.copyTo(tempsrc); @@ -946,77 +1009,33 @@ int MSER_Impl::detectAndLabel( InputArray _src, OutputArray _labels, OutputArray // darker to brighter (MSER+) preprocess1( src, level_size ); if( !params.pass2Only ) - pass( src, labels, lval, bboxvec, size, level_size, 0 ); + pass( src, msers, bboxes, size, level_size, 0 ); // brighter to darker (MSER-) preprocess2( src, level_size ); - pass( src, labels, lval, bboxvec, size, level_size, 255 ); + pass( src, msers, bboxes, size, level_size, 255 ); } else { CV_Assert( src.type() == CV_8UC3 || src.type() == CV_8UC4 ); - extractMSER_8uC3( src, labels, bboxvec, params ); + extractMSER_8uC3( src, msers, bboxes, params ); } - if( _bboxes.needed() ) - Mat(bboxvec).copyTo(_bboxes); - return (int)bboxvec.size(); -} - -void MSER_Impl::detectAndStore( InputArray image, - std::vector >& msers, - OutputArray stats ) -{ - vector bboxvec; - Mat labels; - int i, x, y, nregs = detectAndLabel(image, labels, bboxvec); - - msers.resize(nregs); - for( i = 0; i < nregs; i++ ) - { - Rect r = bboxvec[i]; - vector& msers_i = msers[i]; - msers_i.clear(); - for( y = r.y; y < r.y + r.height; y++ ) - { - const int* lptr = labels.ptr(y); - for( x = r.x; x < r.x + r.width; x++ ) - { - if( lptr[x] == i+1 ) - msers_i.push_back(Point(x, y)); - } - } - } - - if( stats.needed() ) - Mat(bboxvec).copyTo(stats); } void MSER_Impl::detect( InputArray _image, vector& keypoints, InputArray _mask ) { vector bboxes; - vector reg; - Mat labels, mask = _mask.getMat(); + vector > msers; + Mat mask = _mask.getMat(); - int i, x, y, ncomps = detectAndLabel(_image, labels, bboxes); - CV_Assert( ncomps == (int)bboxes.size() ); + detectRegions(_image, msers, bboxes); + int i, ncomps = (int)msers.size(); keypoints.clear(); for( i = 0; i < ncomps; i++ ) { Rect r = bboxes[i]; - reg.reserve(r.area()); - reg.clear(); - - for( y = r.y; y < r.y + r.height; y++ ) - { - const int* lptr = labels.ptr(y); - for( x = r.x; x < r.x + r.width; x++ ) - { - if( lptr[x] == i+1 ) - reg.push_back(Point(x, y)); - } - } // TODO check transformation from MSER region to KeyPoint - RotatedRect rect = fitEllipse(Mat(reg)); + RotatedRect rect = fitEllipse(Mat(msers[i])); float diam = std::sqrt(rect.size.height*rect.size.width); if( diam > std::numeric_limits::epsilon() && r.contains(rect.center) && diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index c494f9882..bdfd5e991 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -49,683 +49,6 @@ using namespace cv; const string FEATURES2D_DIR = "features2d"; const string IMAGE_FILENAME = "tsukuba.png"; -const int TABLE_SIZE = 400; - -static const float chitab3[]= -{ - 0.f, 0.0150057f, 0.0239478f, 0.0315227f, - 0.0383427f, 0.0446605f, 0.0506115f, 0.0562786f, - 0.0617174f, 0.0669672f, 0.0720573f, 0.0770099f, - 0.081843f, 0.0865705f, 0.0912043f, 0.0957541f, - 0.100228f, 0.104633f, 0.108976f, 0.113261f, - 0.117493f, 0.121676f, 0.125814f, 0.12991f, - 0.133967f, 0.137987f, 0.141974f, 0.145929f, - 0.149853f, 0.15375f, 0.15762f, 0.161466f, - 0.165287f, 0.169087f, 0.172866f, 0.176625f, - 0.180365f, 0.184088f, 0.187794f, 0.191483f, - 0.195158f, 0.198819f, 0.202466f, 0.2061f, - 0.209722f, 0.213332f, 0.216932f, 0.220521f, - 0.2241f, 0.22767f, 0.231231f, 0.234783f, - 0.238328f, 0.241865f, 0.245395f, 0.248918f, - 0.252435f, 0.255947f, 0.259452f, 0.262952f, - 0.266448f, 0.269939f, 0.273425f, 0.276908f, - 0.280386f, 0.283862f, 0.287334f, 0.290803f, - 0.29427f, 0.297734f, 0.301197f, 0.304657f, - 0.308115f, 0.311573f, 0.315028f, 0.318483f, - 0.321937f, 0.32539f, 0.328843f, 0.332296f, - 0.335749f, 0.339201f, 0.342654f, 0.346108f, - 0.349562f, 0.353017f, 0.356473f, 0.35993f, - 0.363389f, 0.366849f, 0.37031f, 0.373774f, - 0.377239f, 0.380706f, 0.384176f, 0.387648f, - 0.391123f, 0.3946f, 0.39808f, 0.401563f, - 0.405049f, 0.408539f, 0.412032f, 0.415528f, - 0.419028f, 0.422531f, 0.426039f, 0.429551f, - 0.433066f, 0.436586f, 0.440111f, 0.44364f, - 0.447173f, 0.450712f, 0.454255f, 0.457803f, - 0.461356f, 0.464915f, 0.468479f, 0.472049f, - 0.475624f, 0.479205f, 0.482792f, 0.486384f, - 0.489983f, 0.493588f, 0.4972f, 0.500818f, - 0.504442f, 0.508073f, 0.511711f, 0.515356f, - 0.519008f, 0.522667f, 0.526334f, 0.530008f, - 0.533689f, 0.537378f, 0.541075f, 0.54478f, - 0.548492f, 0.552213f, 0.555942f, 0.55968f, - 0.563425f, 0.56718f, 0.570943f, 0.574715f, - 0.578497f, 0.582287f, 0.586086f, 0.589895f, - 0.593713f, 0.597541f, 0.601379f, 0.605227f, - 0.609084f, 0.612952f, 0.61683f, 0.620718f, - 0.624617f, 0.628526f, 0.632447f, 0.636378f, - 0.64032f, 0.644274f, 0.648239f, 0.652215f, - 0.656203f, 0.660203f, 0.664215f, 0.668238f, - 0.672274f, 0.676323f, 0.680384f, 0.684457f, - 0.688543f, 0.692643f, 0.696755f, 0.700881f, - 0.70502f, 0.709172f, 0.713339f, 0.717519f, - 0.721714f, 0.725922f, 0.730145f, 0.734383f, - 0.738636f, 0.742903f, 0.747185f, 0.751483f, - 0.755796f, 0.760125f, 0.76447f, 0.768831f, - 0.773208f, 0.777601f, 0.782011f, 0.786438f, - 0.790882f, 0.795343f, 0.799821f, 0.804318f, - 0.808831f, 0.813363f, 0.817913f, 0.822482f, - 0.827069f, 0.831676f, 0.836301f, 0.840946f, - 0.84561f, 0.850295f, 0.854999f, 0.859724f, - 0.864469f, 0.869235f, 0.874022f, 0.878831f, - 0.883661f, 0.888513f, 0.893387f, 0.898284f, - 0.903204f, 0.908146f, 0.913112f, 0.918101f, - 0.923114f, 0.928152f, 0.933214f, 0.938301f, - 0.943413f, 0.94855f, 0.953713f, 0.958903f, - 0.964119f, 0.969361f, 0.974631f, 0.979929f, - 0.985254f, 0.990608f, 0.99599f, 1.0014f, - 1.00684f, 1.01231f, 1.01781f, 1.02335f, - 1.02891f, 1.0345f, 1.04013f, 1.04579f, - 1.05148f, 1.05721f, 1.06296f, 1.06876f, - 1.07459f, 1.08045f, 1.08635f, 1.09228f, - 1.09826f, 1.10427f, 1.11032f, 1.1164f, - 1.12253f, 1.1287f, 1.1349f, 1.14115f, - 1.14744f, 1.15377f, 1.16015f, 1.16656f, - 1.17303f, 1.17954f, 1.18609f, 1.19269f, - 1.19934f, 1.20603f, 1.21278f, 1.21958f, - 1.22642f, 1.23332f, 1.24027f, 1.24727f, - 1.25433f, 1.26144f, 1.26861f, 1.27584f, - 1.28312f, 1.29047f, 1.29787f, 1.30534f, - 1.31287f, 1.32046f, 1.32812f, 1.33585f, - 1.34364f, 1.3515f, 1.35943f, 1.36744f, - 1.37551f, 1.38367f, 1.39189f, 1.4002f, - 1.40859f, 1.41705f, 1.42561f, 1.43424f, - 1.44296f, 1.45177f, 1.46068f, 1.46967f, - 1.47876f, 1.48795f, 1.49723f, 1.50662f, - 1.51611f, 1.52571f, 1.53541f, 1.54523f, - 1.55517f, 1.56522f, 1.57539f, 1.58568f, - 1.59611f, 1.60666f, 1.61735f, 1.62817f, - 1.63914f, 1.65025f, 1.66152f, 1.67293f, - 1.68451f, 1.69625f, 1.70815f, 1.72023f, - 1.73249f, 1.74494f, 1.75757f, 1.77041f, - 1.78344f, 1.79669f, 1.81016f, 1.82385f, - 1.83777f, 1.85194f, 1.86635f, 1.88103f, - 1.89598f, 1.91121f, 1.92674f, 1.94257f, - 1.95871f, 1.97519f, 1.99201f, 2.0092f, - 2.02676f, 2.04471f, 2.06309f, 2.08189f, - 2.10115f, 2.12089f, 2.14114f, 2.16192f, - 2.18326f, 2.2052f, 2.22777f, 2.25101f, - 2.27496f, 2.29966f, 2.32518f, 2.35156f, - 2.37886f, 2.40717f, 2.43655f, 2.46709f, - 2.49889f, 2.53206f, 2.56673f, 2.60305f, - 2.64117f, 2.6813f, 2.72367f, 2.76854f, - 2.81623f, 2.86714f, 2.92173f, 2.98059f, - 3.04446f, 3.1143f, 3.19135f, 3.27731f, - 3.37455f, 3.48653f, 3.61862f, 3.77982f, - 3.98692f, 4.2776f, 4.77167f, 133.333f -}; - -struct MSCRNode; - -struct TempMSCR -{ - MSCRNode* head; - MSCRNode* tail; - double m; // the margin used to prune area later - int size; -}; - -struct MSCRNode -{ - MSCRNode* shortcut; - // to make the finding of root less painful - MSCRNode* prev; - MSCRNode* next; - // a point double-linked list - TempMSCR* tmsr; - // the temporary msr (set to NULL at every re-initialise) - TempMSCR* gmsr; - // the global msr (once set, never to NULL) - int index; - // the index of the node, at this point, it should be x at the first 16-bits, and y at the last 16-bits. - int rank; - int reinit; - int size, sizei; - double dt, di; - double s; -}; - -struct MSCREdge -{ - double chi; - MSCRNode* left; - MSCRNode* right; -}; - -static double ChiSquaredDistance( uchar* x, uchar* y ) -{ - return (double)((x[0]-y[0])*(x[0]-y[0]))/(double)(x[0]+y[0]+1e-10)+ - (double)((x[1]-y[1])*(x[1]-y[1]))/(double)(x[1]+y[1]+1e-10)+ - (double)((x[2]-y[2])*(x[2]-y[2]))/(double)(x[2]+y[2]+1e-10); -} - -static void initMSCRNode( MSCRNode* node ) -{ - node->gmsr = node->tmsr = NULL; - node->reinit = 0xffff; - node->rank = 0; - node->sizei = node->size = 1; - node->prev = node->next = node->shortcut = node; -} - -// the preprocess to get the edge list with proper gaussian blur -static int preprocessMSER_8UC3( MSCRNode* node, - MSCREdge* edge, - double* total, - CvMat* src, - CvMat* mask, - CvMat* dx, - CvMat* dy, - int Ne, - int edgeBlurSize ) -{ - int srccpt = src->step-src->cols*3; - uchar* srcptr = src->data.ptr; - uchar* lastptr = src->data.ptr+3; - double* dxptr = dx->data.db; - for ( int i = 0; i < src->rows; i++ ) - { - for ( int j = 0; j < src->cols-1; j++ ) - { - *dxptr = ChiSquaredDistance( srcptr, lastptr ); - dxptr++; - srcptr += 3; - lastptr += 3; - } - srcptr += srccpt+3; - lastptr += srccpt+3; - } - srcptr = src->data.ptr; - lastptr = src->data.ptr+src->step; - double* dyptr = dy->data.db; - for ( int i = 0; i < src->rows-1; i++ ) - { - for ( int j = 0; j < src->cols; j++ ) - { - *dyptr = ChiSquaredDistance( srcptr, lastptr ); - dyptr++; - srcptr += 3; - lastptr += 3; - } - srcptr += srccpt; - lastptr += srccpt; - } - // get dx and dy and blur it - if ( edgeBlurSize >= 1 ) - { - Mat _dx(dx->rows, dx->cols, dx->type, dx->data.ptr, dx->step); - Mat _dy(dy->rows, dy->cols, dy->type, dy->data.ptr, dy->step); - GaussianBlur( _dx, _dx, Size(edgeBlurSize, edgeBlurSize), 0 ); - GaussianBlur( _dy, _dy, Size(edgeBlurSize, edgeBlurSize), 0 ); - } - dxptr = dx->data.db; - dyptr = dy->data.db; - // assian dx, dy to proper edge list and initialize mscr node - // the nasty code here intended to avoid extra loops - if ( mask ) - { - Ne = 0; - int maskcpt = mask->step-mask->cols+1; - uchar* maskptr = mask->data.ptr; - MSCRNode* nodeptr = node; - initMSCRNode( nodeptr ); - nodeptr->index = 0; - *total += edge->chi = *dxptr; - if ( maskptr[0] && maskptr[1] ) - { - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - dxptr++; - nodeptr++; - maskptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i; - if ( maskptr[0] && maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - dxptr++; - nodeptr++; - maskptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = src->cols-1; - nodeptr++; - maskptr += maskcpt; - for ( int i = 1; i < src->rows-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i<<16; - if ( maskptr[0] ) - { - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - } - dyptr++; - dxptr++; - nodeptr++; - maskptr++; - for ( int j = 1; j < src->cols-1; j++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|j; - if ( maskptr[0] ) - { - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - } - dyptr++; - dxptr++; - nodeptr++; - maskptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|(src->cols-1); - if ( maskptr[0] && maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - dyptr++; - nodeptr++; - maskptr += maskcpt; - } - initMSCRNode( nodeptr ); - nodeptr->index = (src->rows-1)<<16; - if ( maskptr[0] ) - { - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - } - dxptr++; - dyptr++; - nodeptr++; - maskptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|i; - if ( maskptr[0] ) - { - if ( maskptr[1] ) - { - *total += edge->chi = *dxptr; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - Ne++; - } - if ( maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - Ne++; - } - } - dxptr++; - dyptr++; - nodeptr++; - maskptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|(src->cols-1); - if ( maskptr[0] && maskptr[-mask->step] ) - { - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - Ne++; - } - } else { - MSCRNode* nodeptr = node; - initMSCRNode( nodeptr ); - nodeptr->index = 0; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = src->cols-1; - nodeptr++; - for ( int i = 1; i < src->rows-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = i<<16; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - for ( int j = 1; j < src->cols-1; j++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|j; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = (i<<16)|(src->cols-1); - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = (src->rows-1)<<16; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - nodeptr++; - for ( int i = 1; i < src->cols-1; i++ ) - { - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|i; - *total += edge->chi = *dxptr; - dxptr++; - edge->left = nodeptr; - edge->right = nodeptr+1; - edge++; - *total += edge->chi = *dyptr; - dyptr++; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - edge++; - nodeptr++; - } - initMSCRNode( nodeptr ); - nodeptr->index = ((src->rows-1)<<16)|(src->cols-1); - *total += edge->chi = *dyptr; - edge->left = nodeptr-src->cols; - edge->right = nodeptr; - } - return Ne; -} - -class LessThanEdge -{ -public: - bool operator()(const MSCREdge& a, const MSCREdge& b) const { return a.chi < b.chi; } -}; - -// to find the root of one region -static MSCRNode* findMSCR( MSCRNode* x ) -{ - MSCRNode* prev = x; - MSCRNode* next; - for ( ; ; ) - { - next = x->shortcut; - x->shortcut = prev; - if ( next == x ) break; - prev= x; - x = next; - } - MSCRNode* root = x; - for ( ; ; ) - { - prev = x->shortcut; - x->shortcut = root; - if ( prev == x ) break; - x = prev; - } - return root; -} - -struct MSERParams -{ - MSERParams( int _delta=5, int _min_area=60, int _max_area=14400, - double _max_variation=0.25, double _min_diversity=.2, - int _max_evolution=200, double _area_threshold=1.01, - double _min_margin=0.003, int _edge_blur_size=5 ) - { - delta = _delta; - minArea = _min_area; - maxArea = _max_area; - maxVariation = _max_variation; - minDiversity = _min_diversity; - maxEvolution = _max_evolution; - areaThreshold = _area_threshold; - minMargin = _min_margin; - edgeBlurSize = _edge_blur_size; - } - - int delta; - int minArea; - int maxArea; - double maxVariation; - double minDiversity; - int maxEvolution; - double areaThreshold; - double minMargin; - int edgeBlurSize; -}; - -// the stable mscr should be: -// bigger than minArea and smaller than maxArea -// differ from its ancestor more than minDiversity -static bool MSCRStableCheck( MSCRNode* x, MSERParams params ) -{ - if ( x->size <= params.minArea || x->size >= params.maxArea ) - return 0; - if ( x->gmsr == NULL ) - return 1; - double div = (double)(x->size-x->gmsr->size)/(double)x->size; - return div > params.minDiversity; -} - -static void -extractMSER_8UC3( CvMat* src, - CvMat* mask, - vector >& msers, - MSERParams params ) -{ - msers.clear(); - MSCRNode* map = (MSCRNode*)cvAlloc( src->cols*src->rows*sizeof(map[0]) ); - int Ne = src->cols*src->rows*2-src->cols-src->rows; - MSCREdge* edge = (MSCREdge*)cvAlloc( Ne*sizeof(edge[0]) ); - TempMSCR* mscr = (TempMSCR*)cvAlloc( src->cols*src->rows*sizeof(mscr[0]) ); - double emean = 0; - CvMat* dx = cvCreateMat( src->rows, src->cols-1, CV_64FC1 ); - CvMat* dy = cvCreateMat( src->rows-1, src->cols, CV_64FC1 ); - Ne = preprocessMSER_8UC3( map, edge, &emean, src, mask, dx, dy, Ne, params.edgeBlurSize ); - emean = emean / (double)Ne; - std::sort(edge, edge + Ne, LessThanEdge()); - MSCREdge* edge_ub = edge+Ne; - MSCREdge* edgeptr = edge; - TempMSCR* mscrptr = mscr; - // the evolution process - for ( int i = 0; i < params.maxEvolution; i++ ) - { - double k = (double)i/(double)params.maxEvolution*(TABLE_SIZE-1); - int ti = cvFloor(k); - double reminder = k-ti; - double thres = emean*(chitab3[ti]*(1-reminder)+chitab3[ti+1]*reminder); - // to process all the edges in the list that chi < thres - while ( edgeptr < edge_ub && edgeptr->chi < thres ) - { - MSCRNode* lr = findMSCR( edgeptr->left ); - MSCRNode* rr = findMSCR( edgeptr->right ); - // get the region root (who is responsible) - if ( lr != rr ) - { - // rank idea take from: N-tree Disjoint-Set Forests for Maximally Stable Extremal Regions - if ( rr->rank > lr->rank ) - { - MSCRNode* tmp; - CV_SWAP( lr, rr, tmp ); - } else if ( lr->rank == rr->rank ) { - // at the same rank, we will compare the size - if ( lr->size > rr->size ) - { - MSCRNode* tmp; - CV_SWAP( lr, rr, tmp ); - } - lr->rank++; - } - rr->shortcut = lr; - lr->size += rr->size; - // join rr to the end of list lr (lr is a endless double-linked list) - lr->prev->next = rr; - lr->prev = rr->prev; - rr->prev->next = lr; - rr->prev = lr; - // area threshold force to reinitialize - if ( lr->size > (lr->size-rr->size)*params.areaThreshold ) - { - lr->sizei = lr->size; - lr->reinit = i; - if ( lr->tmsr != NULL ) - { - lr->tmsr->m = lr->dt-lr->di; - lr->tmsr = NULL; - } - lr->di = edgeptr->chi; - lr->s = 1e10; - } - lr->dt = edgeptr->chi; - if ( i > lr->reinit ) - { - double s = (double)(lr->size-lr->sizei)/(lr->dt-lr->di); - if ( s < lr->s ) - { - // skip the first one and check stablity - if ( i > lr->reinit+1 && MSCRStableCheck( lr, params ) ) - { - if ( lr->tmsr == NULL ) - { - lr->gmsr = lr->tmsr = mscrptr; - mscrptr++; - } - lr->tmsr->size = lr->size; - lr->tmsr->head = lr; - lr->tmsr->tail = lr->prev; - lr->tmsr->m = 0; - } - lr->s = s; - } - } - } - edgeptr++; - } - if ( edgeptr >= edge_ub ) - break; - } - for ( TempMSCR* ptr = mscr; ptr < mscrptr; ptr++ ) - // to prune area with margin less than minMargin - if ( ptr->m > params.minMargin ) - { - vector mser; - MSCRNode* lpt = ptr->head; - for ( int i = 0; i < ptr->size; i++ ) - { - Point pt; - pt.x = (lpt->index)&0xffff; - pt.y = (lpt->index)>>16; - lpt = lpt->next; - mser.push_back(pt); - } - msers.push_back(mser); - } - cvReleaseMat( &dx ); - cvReleaseMat( &dy ); - cvFree( &mscr ); - cvFree( &edge ); - cvFree( &map ); -} - /****************************************************************************************\ * Test for KeyPoint * \****************************************************************************************/ @@ -755,8 +78,6 @@ protected: vector > msers; CvMat src = image; - extractMSER_8UC3( &src, 0, msers, MSERParams()); - detector->detect(image, keypoints); if(keypoints.empty()) From 61cddd8b6362d861a90f1b24c2bce4a871f4239d Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Thu, 16 Oct 2014 22:52:04 +0400 Subject: [PATCH 07/11] fixed opencv_java build --- modules/features2d/src/mser.cpp | 2 +- modules/features2d/test/test_keypoints.cpp | 3 - .../generator/src/cpp/features2d_manual.hpp | 88 ++++++++++--------- modules/java/generator/src/cpp/jni_part.cpp | 6 -- 4 files changed, 46 insertions(+), 53 deletions(-) diff --git a/modules/features2d/src/mser.cpp b/modules/features2d/src/mser.cpp index 44b23acb8..d5058d955 100644 --- a/modules/features2d/src/mser.cpp +++ b/modules/features2d/src/mser.cpp @@ -673,7 +673,7 @@ static int preprocessMSER_8uC3( MSCRNode* node, int Ne, int edgeBlurSize ) { - int srccpt = src.step-src.cols*3; + int srccpt = (int)(src.step-src.cols*3); const uchar* srcptr = src.ptr(); const uchar* lastptr = srcptr+3; double* dxptr = dx.ptr(); diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index bdfd5e991..46fd39578 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -75,9 +75,6 @@ protected: } vector keypoints; - vector > msers; - CvMat src = image; - detector->detect(image, keypoints); if(keypoints.empty()) diff --git a/modules/java/generator/src/cpp/features2d_manual.hpp b/modules/java/generator/src/cpp/features2d_manual.hpp index 90a16117b..81668f3ff 100644 --- a/modules/java/generator/src/cpp/features2d_manual.hpp +++ b/modules/java/generator/src/cpp/features2d_manual.hpp @@ -90,67 +90,68 @@ public: //not supported: SimpleBlob, Dense CV_WRAP static javaFeatureDetector* create( int detectorType ) { - String name; + //String name; if (detectorType > DYNAMICDETECTOR) { - name = "Dynamic"; + //name = "Dynamic"; detectorType -= DYNAMICDETECTOR; } if (detectorType > PYRAMIDDETECTOR) { - name = "Pyramid"; + //name = "Pyramid"; detectorType -= PYRAMIDDETECTOR; } if (detectorType > GRIDDETECTOR) { - name = "Grid"; + //name = "Grid"; detectorType -= GRIDDETECTOR; } + Ptr fd; switch(detectorType) { case FAST: - name = name + "FAST"; - break; - case STAR: - name = name + "STAR"; - break; - case SIFT: - name = name + "SIFT"; - break; - case SURF: - name = name + "SURF"; + fd = FastFeatureDetector::create(); break; + //case STAR: + // fd = xfeatures2d::StarDetector::create(); + // break; + //case SIFT: + // name = name + "SIFT"; + // break; + //case SURF: + // name = name + "SURF"; + // break; case ORB: - name = name + "ORB"; + fd = ORB::create(); break; case MSER: - name = name + "MSER"; + fd = MSER::create(); break; case GFTT: - name = name + "GFTT"; + fd = GFTTDetector::create(); break; case HARRIS: - name = name + "HARRIS"; + fd = GFTTDetector::create(); break; case SIMPLEBLOB: - name = name + "SimpleBlob"; - break; - case DENSE: - name = name + "Dense"; + fd = SimpleBlobDetector::create(); break; + //case DENSE: + // name = name + "Dense"; + // break; case BRISK: - name = name + "BRISK"; + fd = BRISK::create(); break; case AKAZE: - name = name + "AKAZE"; + fd = AKAZE::create(); break; default: CV_Error( Error::StsBadArg, "Specified feature detector type is not supported." ); break; } - return new javaFeatureDetector(FeatureDetector::create(name)); + return new javaFeatureDetector(fd); } CV_WRAP void write( const String& fileName ) const @@ -332,43 +333,44 @@ public: //not supported: Calonder CV_WRAP static javaDescriptorExtractor* create( int extractorType ) { - String name; + //String name; if (extractorType > OPPONENTEXTRACTOR) { - name = "Opponent"; + //name = "Opponent"; extractorType -= OPPONENTEXTRACTOR; } + Ptr de; switch(extractorType) { - case SIFT: - name = name + "SIFT"; - break; - case SURF: - name = name + "SURF"; - break; + //case SIFT: + // name = name + "SIFT"; + // break; + //case SURF: + // name = name + "SURF"; + // break; case ORB: - name = name + "ORB"; - break; - case BRIEF: - name = name + "BRIEF"; + de = ORB::create(); break; + //case BRIEF: + // name = name + "BRIEF"; + // break; case BRISK: - name = name + "BRISK"; - break; - case FREAK: - name = name + "FREAK"; + de = BRISK::create(); break; + //case FREAK: + // name = name + "FREAK"; + // break; case AKAZE: - name = name + "AKAZE"; + de = AKAZE::create(); break; default: CV_Error( Error::StsBadArg, "Specified descriptor extractor type is not supported." ); break; } - return new javaDescriptorExtractor(DescriptorExtractor::create(name)); + return new javaDescriptorExtractor(de); } CV_WRAP void write( const String& fileName ) const diff --git a/modules/java/generator/src/cpp/jni_part.cpp b/modules/java/generator/src/cpp/jni_part.cpp index c8475d0e8..e9776cee1 100644 --- a/modules/java/generator/src/cpp/jni_part.cpp +++ b/modules/java/generator/src/cpp/jni_part.cpp @@ -24,15 +24,9 @@ JNI_OnLoad(JavaVM* vm, void* ) return -1; bool init = true; -#ifdef HAVE_OPENCV_FEATURES2D - init &= cv::initModule_features2d(); -#endif #ifdef HAVE_OPENCV_VIDEO init &= cv::initModule_video(); #endif -#ifdef HAVE_OPENCV_CONTRIB - init &= cv::initModule_contrib(); -#endif if(!init) return -1; From 48a860a33594568fe0cd54739712d76a5132c02e Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Thu, 16 Oct 2014 23:10:00 +0400 Subject: [PATCH 08/11] trying to fix strange compiler bug --- .../calib3d/real_time_pose_estimation/src/RobustMatcher.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h index a215c2b67..82aab2008 100644 --- a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h +++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h @@ -23,7 +23,7 @@ public: extractor_ = cv::ORB::create(); // BruteFroce matcher with Norm Hamming is the default matcher - matcher_ = cv::makePtr(cv::NORM_HAMMING, false); + matcher_ = cv::makePtr((int)cv::NORM_HAMMING, false); } virtual ~RobustMatcher(); From c422bdc3f864263503ff30a20b21917333e0b221 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Fri, 17 Oct 2014 14:22:02 +0400 Subject: [PATCH 09/11] fixed some more compile errors and test failures --- .../detection_of_planar_objects.rst | 17 +-- doc/user_guide/ug_features2d.rst | 12 +- modules/calib3d/include/opencv2/calib3d.hpp | 2 +- modules/calib3d/src/solvepnp.cpp | 5 +- ...common_interfaces_of_feature_detectors.rst | 106 ++++++------------ .../doc/feature_detection_and_description.rst | 7 +- .../features2d/include/opencv2/features2d.hpp | 8 +- modules/features2d/src/gftt.cpp | 18 +++ modules/features2d/src/orb.cpp | 82 +++++++++++--- .../test/test_detectors_regression.cpp | 4 +- modules/features2d/test/test_keypoints.cpp | 5 +- .../generator/src/cpp/features2d_manual.hpp | 1 + modules/python/test/test.py | 2 +- modules/stitching/src/matchers.cpp | 24 ++-- .../jni/jni_part.cpp | 4 +- .../OcvImageProcessing/MainPage.xaml.cpp | 4 +- 16 files changed, 165 insertions(+), 136 deletions(-) diff --git a/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst b/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst index add22756e..5075be3c0 100644 --- a/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst +++ b/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst @@ -16,25 +16,16 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu Mat img2 = imread(argv[2], IMREAD_GRAYSCALE); #. - Detect keypoints in both images. :: + Detect keypoints in both images and compute descriptors for each of the keypoints. :: // detecting keypoints - FastFeatureDetector detector(15); + Ptr surf = SURF::create(); vector keypoints1; - detector.detect(img1, keypoints1); + Mat descriptors1; + surf->detectAndCompute(img1, Mat(), keypoints1, descriptors1); ... // do the same for the second image -#. - Compute descriptors for each of the keypoints. :: - - // computing descriptors - SurfDescriptorExtractor extractor; - Mat descriptors1; - extractor.compute(img1, keypoints1, descriptors1); - - ... // process keypoints from the second image as well - #. Now, find the closest matches between descriptors from the first image to the second: :: diff --git a/doc/user_guide/ug_features2d.rst b/doc/user_guide/ug_features2d.rst index 37f2097ff..e5903c1f4 100644 --- a/doc/user_guide/ug_features2d.rst +++ b/doc/user_guide/ug_features2d.rst @@ -65,18 +65,18 @@ Let us break the code down. :: We load two images and check if they are loaded correctly.:: // detecting keypoints - FastFeatureDetector detector(15); + Ptr detector = FastFeatureDetector::create(15); vector keypoints1, keypoints2; - detector.detect(img1, keypoints1); - detector.detect(img2, keypoints2); + detector->detect(img1, keypoints1); + detector->detect(img2, keypoints2); First, we create an instance of a keypoint detector. All detectors inherit the abstract ``FeatureDetector`` interface, but the constructors are algorithm-dependent. The first argument to each detector usually controls the balance between the amount of keypoints and their stability. The range of values is different for different detectors (For instance, *FAST* threshold has the meaning of pixel intensity difference and usually varies in the region *[0,40]*. *SURF* threshold is applied to a Hessian of an image and usually takes on values larger than *100*), so use defaults in case of doubt. :: // computing descriptors - SurfDescriptorExtractor extractor; + Ptr extractor = SURF::create(); Mat descriptors1, descriptors2; - extractor.compute(img1, keypoints1, descriptors1); - extractor.compute(img2, keypoints2, descriptors2); + extractor->compute(img1, keypoints1, descriptors1); + extractor->compute(img2, keypoints2, descriptors2); We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). :: diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 3250358bd..62d7ccec6 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -185,7 +185,7 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz //! finds circles' grid pattern of the specified size in the image CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID, - const Ptr &blobDetector = makePtr()); + const Ptr &blobDetector = SimpleBlobDetector::create()); //! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern. CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints, diff --git a/modules/calib3d/src/solvepnp.cpp b/modules/calib3d/src/solvepnp.cpp index 6b03b0023..bf8464877 100644 --- a/modules/calib3d/src/solvepnp.cpp +++ b/modules/calib3d/src/solvepnp.cpp @@ -115,7 +115,10 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints, cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat(); double f = PnP.compute_pose(R, tvec); cv::Rodrigues(R, rvec); - cameraMatrix.at(0,0) = cameraMatrix.at(1,1) = f; + if(cameraMatrix.type() == CV_32F) + cameraMatrix.at(0,0) = cameraMatrix.at(1,1) = f; + else + cameraMatrix.at(0,0) = cameraMatrix.at(1,1) = f; return true; } else diff --git a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst index f4e424445..5ebbe8134 100644 --- a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst +++ b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst @@ -59,100 +59,60 @@ Detects keypoints in an image (first variant) or image set (second variant). :param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``. -FeatureDetector::create ------------------------ -Creates a feature detector by its name. - -.. ocv:function:: Ptr FeatureDetector::create( const String& detectorType ) - -.. ocv:pyfunction:: cv2.FeatureDetector_create(detectorType) -> retval - - :param detectorType: Feature detector type. - -The following detector types are supported: - -* ``"FAST"`` -- :ocv:class:`FastFeatureDetector` -* ``"ORB"`` -- :ocv:class:`ORB` -* ``"BRISK"`` -- :ocv:class:`BRISK` -* ``"MSER"`` -- :ocv:class:`MSER` -* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector` -* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled -* ``"SimpleBlob"`` -- :ocv:class:`SimpleBlobDetector` - FastFeatureDetector ------------------- -.. ocv:class:: FastFeatureDetector : public FeatureDetector +.. ocv:class:: FastFeatureDetector : public Feature2D Wrapping class for feature detection using the :ocv:func:`FAST` method. :: - class FastFeatureDetector : public FeatureDetector + class FastFeatureDetector : public Feature2D { public: - FastFeatureDetector( int threshold=1, bool nonmaxSuppression=true, type=FastFeatureDetector::TYPE_9_16 ); - virtual void read( const FileNode& fn ); - virtual void write( FileStorage& fs ) const; - protected: - ... + static Ptr create( int threshold=1, bool nonmaxSuppression=true, type=FastFeatureDetector::TYPE_9_16 ); }; -GoodFeaturesToTrackDetector +GFTTDetector --------------------------- -.. ocv:class:: GoodFeaturesToTrackDetector : public FeatureDetector +.. ocv:class:: GFTTDetector : public FeatureDetector Wrapping class for feature detection using the :ocv:func:`goodFeaturesToTrack` function. :: - class GoodFeaturesToTrackDetector : public FeatureDetector + class GFTTDetector : public Feature2D { public: - class Params - { - public: - Params( int maxCorners=1000, double qualityLevel=0.01, - double minDistance=1., int blockSize=3, - bool useHarrisDetector=false, double k=0.04 ); - void read( const FileNode& fn ); - void write( FileStorage& fs ) const; - - int maxCorners; - double qualityLevel; - double minDistance; - int blockSize; - bool useHarrisDetector; - double k; - }; - - GoodFeaturesToTrackDetector( const GoodFeaturesToTrackDetector::Params& params= - GoodFeaturesToTrackDetector::Params() ); - GoodFeaturesToTrackDetector( int maxCorners, double qualityLevel, - double minDistance, int blockSize=3, - bool useHarrisDetector=false, double k=0.04 ); - virtual void read( const FileNode& fn ); - virtual void write( FileStorage& fs ) const; - protected: - ... + enum { USE_HARRIS_DETECTOR=10000 }; + static Ptr create( int maxCorners=1000, double qualityLevel=0.01, + double minDistance=1, int blockSize=3, + bool useHarrisDetector=false, double k=0.04 ); }; -MserFeatureDetector +MSER ------------------- -.. ocv:class:: MserFeatureDetector : public FeatureDetector +.. ocv:class:: MSER : public Feature2D -Wrapping class for feature detection using the -:ocv:class:`MSER` class. :: +Maximally stable region detector :: - class MserFeatureDetector : public FeatureDetector + class MSER : public Feature2D { public: - MserFeatureDetector( CvMSERParams params=cvMSERParams() ); - MserFeatureDetector( int delta, int minArea, int maxArea, - double maxVariation, double minDiversity, - int maxEvolution, double areaThreshold, - double minMargin, int edgeBlurSize ); - virtual void read( const FileNode& fn ); - virtual void write( FileStorage& fs ) const; - protected: - ... + enum + { + DELTA=10000, MIN_AREA=10001, MAX_AREA=10002, PASS2_ONLY=10003, + MAX_EVOLUTION=10004, AREA_THRESHOLD=10005, + MIN_MARGIN=10006, EDGE_BLUR_SIZE=10007 + }; + + //! the full constructor + static Ptr create( int _delta=5, int _min_area=60, int _max_area=14400, + double _max_variation=0.25, double _min_diversity=.2, + int _max_evolution=200, double _area_threshold=1.01, + double _min_margin=0.003, int _edge_blur_size=5 ); + + virtual void detectRegions( InputArray image, + std::vector >& msers, + std::vector& bboxes ) = 0; }; SimpleBlobDetector @@ -189,10 +149,8 @@ Class for extracting blobs from an image. :: float minConvexity, maxConvexity; }; - SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); - - protected: - ... + static Ptr create(const SimpleBlobDetector::Params + ¶meters = SimpleBlobDetector::Params()); }; The class implements a simple algorithm for extracting blobs from an image: diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index de024a41c..20e2df8fa 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -14,11 +14,6 @@ Detects corners using the FAST algorithm .. ocv:function:: void FAST( InputArray image, vector& keypoints, int threshold, bool nonmaxSuppression=true ) .. ocv:function:: void FAST( InputArray image, vector& keypoints, int threshold, bool nonmaxSuppression, int type ) -.. ocv:pyfunction:: cv2.FastFeatureDetector([, threshold[, nonmaxSuppression]]) -> -.. ocv:pyfunction:: cv2.FastFeatureDetector(threshold, nonmaxSuppression, type) -> -.. ocv:pyfunction:: cv2.FastFeatureDetector.detect(image[, mask]) -> keypoints - - :param image: grayscale image where keypoints (corners) are detected. :param keypoints: keypoints detected on the image. @@ -55,7 +50,7 @@ Maximally stable extremal region extractor. :: // runs the extractor on the specified image; returns the MSERs, // each encoded as a contour (vector, see findContours) // the optional mask marks the area where MSERs are searched for - void operator()( const Mat& image, vector >& msers, const Mat& mask ) const; + void detectRegions( InputArray image, vector >& msers, vector& bboxes ) const; }; The class encapsulates all the parameters of the MSER extraction algorithm (see diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index a42d454c4..193d5b93e 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -164,7 +164,13 @@ class CV_EXPORTS_W ORB : public Feature2D { public: // the size of the signature in bytes - enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; + enum + { + kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1, + NFEATURES=10000, SCALE_FACTOR=10001, NLEVELS=10002, + EDGE_THRESHOLD=10003, FIRST_LEVEL=10004, WTA_K=10005, + SCORE_TYPE=10006, PATCH_SIZE=10007, FAST_THRESHOLD=10008 + }; CV_WRAP static Ptr create(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold = 20); diff --git a/modules/features2d/src/gftt.cpp b/modules/features2d/src/gftt.cpp index 899e2f274..9cfc77d83 100644 --- a/modules/features2d/src/gftt.cpp +++ b/modules/features2d/src/gftt.cpp @@ -55,6 +55,24 @@ public: { } + void set(int prop, double value) + { + if( prop == USE_HARRIS_DETECTOR ) + useHarrisDetector = value != 0; + else + CV_Error(Error::StsBadArg, ""); + } + + double get(int prop) const + { + double value = 0; + if( prop == USE_HARRIS_DETECTOR ) + value = useHarrisDetector; + else + CV_Error(Error::StsBadArg, ""); + return value; + } + void detect( InputArray _image, std::vector& keypoints, InputArray _mask ) { std::vector corners; diff --git a/modules/features2d/src/orb.cpp b/modules/features2d/src/orb.cpp index d7b8fce40..97d29e29c 100644 --- a/modules/features2d/src/orb.cpp +++ b/modules/features2d/src/orb.cpp @@ -100,12 +100,12 @@ ocl_ICAngles(const UMat& imgbuf, const UMat& layerinfo, static bool ocl_computeOrbDescriptors(const UMat& imgbuf, const UMat& layerInfo, const UMat& keypoints, UMat& desc, const UMat& pattern, - int nkeypoints, int dsize, int WTA_K) + int nkeypoints, int dsize, int wta_k) { size_t globalSize[] = {nkeypoints}; ocl::Kernel desc_ker("ORB_computeDescriptor", ocl::features2d::orb_oclsrc, - format("-D ORB_DESCRIPTORS -D WTA_K=%d", WTA_K)); + format("-D ORB_DESCRIPTORS -D wta_k=%d", wta_k)); if( desc_ker.empty() ) return false; @@ -209,7 +209,7 @@ static void ICAngles(const Mat& img, const std::vector& layerinfo, static void computeOrbDescriptors( const Mat& imagePyramid, const std::vector& layerInfo, const std::vector& layerScale, std::vector& keypoints, - Mat& descriptors, const std::vector& _pattern, int dsize, int WTA_K ) + Mat& descriptors, const std::vector& _pattern, int dsize, int wta_k ) { int step = (int)imagePyramid.step; int j, i, nkeypoints = (int)keypoints.size(); @@ -248,7 +248,7 @@ computeOrbDescriptors( const Mat& imagePyramid, const std::vector& layerIn center[iy*step + ix+1]*x*(1-y) + center[(iy+1)*step + ix+1]*x*y)) #endif - if( WTA_K == 2 ) + if( wta_k == 2 ) { for (i = 0; i < dsize; ++i, pattern += 16) { @@ -273,7 +273,7 @@ computeOrbDescriptors( const Mat& imagePyramid, const std::vector& layerIn desc[i] = (uchar)val; } } - else if( WTA_K == 3 ) + else if( wta_k == 3 ) { for (i = 0; i < dsize; ++i, pattern += 12) { @@ -293,7 +293,7 @@ computeOrbDescriptors( const Mat& imagePyramid, const std::vector& layerIn desc[i] = (uchar)val; } } - else if( WTA_K == 4 ) + else if( wta_k == 4 ) { for (i = 0; i < dsize; ++i, pattern += 16) { @@ -334,7 +334,7 @@ computeOrbDescriptors( const Mat& imagePyramid, const std::vector& layerIn } } else - CV_Error( Error::StsBadSize, "Wrong WTA_K. It can be only 2, 3 or 4." ); + CV_Error( Error::StsBadSize, "Wrong wta_k. It can be only 2, 3 or 4." ); #undef GET_VALUE } } @@ -652,10 +652,60 @@ public: explicit ORB_Impl(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold, int _firstLevel, int _WTA_K, int _scoreType, int _patchSize, int _fastThreshold) : nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels), - edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K), + edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), wta_k(_WTA_K), scoreType(_scoreType), patchSize(_patchSize), fastThreshold(_fastThreshold) {} + void set(int prop, double value) + { + if( prop == NFEATURES ) + nfeatures = cvRound(value); + else if( prop == SCALE_FACTOR ) + scaleFactor = value; + else if( prop == NLEVELS ) + nlevels = cvRound(value); + else if( prop == EDGE_THRESHOLD ) + edgeThreshold = cvRound(value); + else if( prop == FIRST_LEVEL ) + firstLevel = cvRound(value); + else if( prop == WTA_K ) + wta_k = cvRound(value); + else if( prop == SCORE_TYPE ) + scoreType = cvRound(value); + else if( prop == PATCH_SIZE ) + patchSize = cvRound(value); + else if( prop == FAST_THRESHOLD ) + fastThreshold = cvRound(value); + else + CV_Error(Error::StsBadArg, ""); + } + + double get(int prop) const + { + double value = 0; + if( prop == NFEATURES ) + value = nfeatures; + else if( prop == SCALE_FACTOR ) + value = scaleFactor; + else if( prop == NLEVELS ) + value = nlevels; + else if( prop == EDGE_THRESHOLD ) + value = edgeThreshold; + else if( prop == FIRST_LEVEL ) + value = firstLevel; + else if( prop == WTA_K ) + value = wta_k; + else if( prop == SCORE_TYPE ) + value = scoreType; + else if( prop == PATCH_SIZE ) + value = patchSize; + else if( prop == FAST_THRESHOLD ) + value = fastThreshold; + else + CV_Error(Error::StsBadArg, ""); + return value; + } + // returns the descriptor size in bytes int descriptorSize() const; // returns the descriptor type @@ -674,7 +724,7 @@ protected: int nlevels; int edgeThreshold; int firstLevel; - int WTA_K; + int wta_k; int scoreType; int patchSize; int fastThreshold; @@ -1095,14 +1145,14 @@ void ORB_Impl::detectAndCompute( InputArray _image, InputArray _mask, makeRandomPattern(patchSize, patternbuf, npoints); } - CV_Assert( WTA_K == 2 || WTA_K == 3 || WTA_K == 4 ); + CV_Assert( wta_k == 2 || wta_k == 3 || wta_k == 4 ); - if( WTA_K == 2 ) + if( wta_k == 2 ) std::copy(pattern0, pattern0 + npoints, std::back_inserter(pattern)); else { int ntuples = descriptorSize()*4; - initializeOrbPattern(pattern0, pattern, ntuples, WTA_K, npoints); + initializeOrbPattern(pattern0, pattern, ntuples, wta_k, npoints); } for( level = 0; level < nLevels; level++ ) @@ -1125,23 +1175,23 @@ void ORB_Impl::detectAndCompute( InputArray _image, InputArray _mask, UMat udescriptors = _descriptors.getUMat(); useOCL = ocl_computeOrbDescriptors(uimagePyramid, ulayerInfo, ukeypoints, udescriptors, upattern, - nkeypoints, dsize, WTA_K); + nkeypoints, dsize, wta_k); } if( !useOCL ) { Mat descriptors = _descriptors.getMat(); computeOrbDescriptors(imagePyramid, layerInfo, layerScale, - keypoints, descriptors, pattern, dsize, WTA_K); + keypoints, descriptors, pattern, dsize, wta_k); } } } Ptr ORB::create(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold, - int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold) + int firstLevel, int wta_k, int scoreType, int patchSize, int fastThreshold) { return makePtr(nfeatures, scaleFactor, nlevels, edgeThreshold, - firstLevel, WTA_K, scoreType, patchSize, fastThreshold); + firstLevel, wta_k, scoreType, patchSize, fastThreshold); } } diff --git a/modules/features2d/test/test_detectors_regression.cpp b/modules/features2d/test/test_detectors_regression.cpp index a3ca0a9f3..d06418cfe 100644 --- a/modules/features2d/test/test_detectors_regression.cpp +++ b/modules/features2d/test/test_detectors_regression.cpp @@ -267,7 +267,9 @@ TEST( Features2d_Detector_GFTT, regression ) TEST( Features2d_Detector_Harris, regression ) { - CV_FeatureDetectorTest test( "detector-harris", GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04)); + Ptr gftt = GFTTDetector::create(); + gftt->set(GFTTDetector::USE_HARRIS_DETECTOR, 1); + CV_FeatureDetectorTest test( "detector-harris", gftt); test.safe_run(); } diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index 46fd39578..7ede45403 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -133,13 +133,16 @@ TEST(Features2d_Detector_Keypoints_FAST, validation) TEST(Features2d_Detector_Keypoints_HARRIS, validation) { + CV_FeatureDetectorKeypointsTest test(GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04)); test.safe_run(); } TEST(Features2d_Detector_Keypoints_GFTT, validation) { - CV_FeatureDetectorKeypointsTest test(GFTTDetector::create()); + Ptr gftt = GFTTDetector::create(); + gftt->set(GFTTDetector::USE_HARRIS_DETECTOR, 1); + CV_FeatureDetectorKeypointsTest test(gftt); test.safe_run(); } diff --git a/modules/java/generator/src/cpp/features2d_manual.hpp b/modules/java/generator/src/cpp/features2d_manual.hpp index 81668f3ff..346180fe2 100644 --- a/modules/java/generator/src/cpp/features2d_manual.hpp +++ b/modules/java/generator/src/cpp/features2d_manual.hpp @@ -133,6 +133,7 @@ public: break; case HARRIS: fd = GFTTDetector::create(); + fd->set(GFTTDetector::USE_HARRIS_DETECTOR, 1); break; case SIMPLEBLOB: fd = SimpleBlobDetector::create(); diff --git a/modules/python/test/test.py b/modules/python/test/test.py index 76f64fc52..e4edc88b5 100644 --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -91,7 +91,7 @@ class Hackathon244Tests(NewOpenCVTests): self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1]) def test_fast(self): - fd = cv2.FastFeatureDetector(30, True) + fd = cv2.FastFeatureDetector_create(30, True) img = self.get_sample("samples/cpp/right02.jpg", 0) img = cv2.medianBlur(img, 3) imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) diff --git a/modules/stitching/src/matchers.cpp b/modules/stitching/src/matchers.cpp index ad3f6320f..0d6edb679 100644 --- a/modules/stitching/src/matchers.cpp +++ b/modules/stitching/src/matchers.cpp @@ -50,6 +50,8 @@ using namespace cv::cuda; #include "opencv2/xfeatures2d.hpp" #endif +using xfeatures2d::SURF; + namespace { struct DistIdxPair @@ -321,27 +323,27 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int { if (num_octaves_descr == num_octaves && num_layers_descr == num_layers) { - surf = xfeatures2d::SURF::create(); + surf = SURF::create(); if( !surf ) CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" ); - surf->set("hessianThreshold", hess_thresh); - surf->set("nOctaves", num_octaves); - surf->set("nOctaveLayers", num_layers); + surf->set(SURF::HESSIAN_THRESHOLD, hess_thresh); + surf->set(SURF::NOCTAVES, num_octaves); + surf->set(SURF::NOCTAVE_LAYERS, num_layers); } else { - detector_ = xfeatures2d::SURF::create(); - extractor_ = xfeatures2d::SURF::create(); + detector_ = SURF::create(); + extractor_ = SURF::create(); if( !detector_ || !extractor_ ) CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" ); - detector_->set("hessianThreshold", hess_thresh); - detector_->set("nOctaves", num_octaves); - detector_->set("nOctaveLayers", num_layers); + detector_->set(SURF::HESSIAN_THRESHOLD, hess_thresh); + detector_->set(SURF::NOCTAVES, num_octaves); + detector_->set(SURF::NOCTAVE_LAYERS, num_layers); - extractor_->set("nOctaves", num_octaves_descr); - extractor_->set("nOctaveLayers", num_layers_descr); + extractor_->set(SURF::NOCTAVES, num_octaves_descr); + extractor_->set(SURF::NOCTAVE_LAYERS, num_layers_descr); } } diff --git a/samples/android/tutorial-2-mixedprocessing/jni/jni_part.cpp b/samples/android/tutorial-2-mixedprocessing/jni/jni_part.cpp index f8e3ada72..2c0996156 100644 --- a/samples/android/tutorial-2-mixedprocessing/jni/jni_part.cpp +++ b/samples/android/tutorial-2-mixedprocessing/jni/jni_part.cpp @@ -16,8 +16,8 @@ JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindF Mat& mRgb = *(Mat*)addrRgba; vector v; - FastFeatureDetector detector(50); - detector.detect(mGr, v); + Ptr detector = FastFeatureDetector::create(50); + detector->detect(mGr, v); for( unsigned int i = 0; i < v.size(); i++ ) { const KeyPoint& kp = v[i]; diff --git a/samples/winrt/OcvImageProcessing/OcvImageProcessing/MainPage.xaml.cpp b/samples/winrt/OcvImageProcessing/OcvImageProcessing/MainPage.xaml.cpp index 4de13401d..6d5ac04bd 100644 --- a/samples/winrt/OcvImageProcessing/OcvImageProcessing/MainPage.xaml.cpp +++ b/samples/winrt/OcvImageProcessing/OcvImageProcessing/MainPage.xaml.cpp @@ -129,12 +129,12 @@ cv::Mat OcvImageProcessing::MainPage::ApplyFindFeaturesFilter(const cv::Mat& ima { cv::Mat result; cv::Mat intermediateMat; - cv::FastFeatureDetector detector(50); + cv::Ptr detector = cv::FastFeatureDetector::create(50); std::vector features; image.copyTo(result); cv::cvtColor(image, intermediateMat, CV_RGBA2GRAY); - detector.detect(intermediateMat, features); + detector->detect(intermediateMat, features); for( unsigned int i = 0; i < std::min(features.size(), (size_t)50); i++ ) { From c5292c25493d093943fc3621140d07c68d4cac2a Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Fri, 17 Oct 2014 14:30:12 +0400 Subject: [PATCH 10/11] fixed error in OpenCL ORB --- modules/features2d/src/orb.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/features2d/src/orb.cpp b/modules/features2d/src/orb.cpp index 97d29e29c..3a9a2e9ca 100644 --- a/modules/features2d/src/orb.cpp +++ b/modules/features2d/src/orb.cpp @@ -105,7 +105,7 @@ ocl_computeOrbDescriptors(const UMat& imgbuf, const UMat& layerInfo, size_t globalSize[] = {nkeypoints}; ocl::Kernel desc_ker("ORB_computeDescriptor", ocl::features2d::orb_oclsrc, - format("-D ORB_DESCRIPTORS -D wta_k=%d", wta_k)); + format("-D ORB_DESCRIPTORS -D WTA_K=%d", wta_k)); if( desc_ker.empty() ) return false; From f6f839eb3c37a6d676910d98ecf196d16d609e58 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Fri, 17 Oct 2014 14:46:08 +0400 Subject: [PATCH 11/11] trying to resolve the merge conflict --- modules/features2d/src/orb.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/modules/features2d/src/orb.cpp b/modules/features2d/src/orb.cpp index 3a9a2e9ca..3007309d8 100644 --- a/modules/features2d/src/orb.cpp +++ b/modules/features2d/src/orb.cpp @@ -38,6 +38,10 @@ #include "opencl_kernels_features2d.hpp" #include +#ifndef CV_IMPL_ADD +#define CV_IMPL_ADD(x) +#endif + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace cv @@ -888,6 +892,7 @@ static void computeKeyPoints(const Mat& imagePyramid, uresponses, nkeypoints, 7, HARRIS_K ); if( useOCL ) { + CV_IMPL_ADD(CV_IMPL_OCL); uresponses.copyTo(responses); for( i = 0; i < nkeypoints; i++ ) allKeypoints[i].response = responses.at(i); @@ -932,6 +937,7 @@ static void computeKeyPoints(const Mat& imagePyramid, if( useOCL ) { + CV_IMPL_ADD(CV_IMPL_OCL); uresponses.copyTo(responses); for( i = 0; i < nkeypoints; i++ ) allKeypoints[i].angle = responses.at(i); @@ -1176,6 +1182,10 @@ void ORB_Impl::detectAndCompute( InputArray _image, InputArray _mask, useOCL = ocl_computeOrbDescriptors(uimagePyramid, ulayerInfo, ukeypoints, udescriptors, upattern, nkeypoints, dsize, wta_k); + if(useOCL) + { + CV_IMPL_ADD(CV_IMPL_OCL); + } } if( !useOCL )