yet another attempt to refactor features2d; the first commit, features2d does not even compile
This commit is contained in:
parent
83ef276697
commit
1796a26fc7
@ -49,8 +49,6 @@
|
|||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
CV_EXPORTS bool initModule_features2d(void);
|
|
||||||
|
|
||||||
// //! writes vector of keypoints to the file storage
|
// //! writes vector of keypoints to the file storage
|
||||||
// CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);
|
// CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);
|
||||||
// //! reads vector of keypoints from the specified file storage node
|
// //! reads vector of keypoints from the specified file storage node
|
||||||
@ -94,12 +92,12 @@ public:
|
|||||||
/************************************ Base Classes ************************************/
|
/************************************ Base Classes ************************************/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Abstract base class for 2D image feature detectors.
|
* Abstract base class for 2D image feature detectors and descriptor extractors
|
||||||
*/
|
*/
|
||||||
class CV_EXPORTS_W FeatureDetector : public virtual Algorithm
|
class CV_EXPORTS_W Feature2D : public virtual Algorithm
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
virtual ~FeatureDetector();
|
virtual ~Feature2D();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detect keypoints in an image.
|
* Detect keypoints in an image.
|
||||||
@ -108,47 +106,9 @@ public:
|
|||||||
* mask Mask specifying where to look for keypoints (optional). Must be a char
|
* mask Mask specifying where to look for keypoints (optional). Must be a char
|
||||||
* matrix with non-zero values in the region of interest.
|
* matrix with non-zero values in the region of interest.
|
||||||
*/
|
*/
|
||||||
CV_WRAP void detect( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
CV_WRAP virtual void detect( InputArray image,
|
||||||
|
CV_OUT std::vector<KeyPoint>& keypoints,
|
||||||
/*
|
InputArray mask=noArray() );
|
||||||
* Detect keypoints in an image set.
|
|
||||||
* images Image collection.
|
|
||||||
* keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i].
|
|
||||||
* masks Masks for image set. masks[i] is a mask for images[i].
|
|
||||||
*/
|
|
||||||
void detect( InputArrayOfArrays images, std::vector<std::vector<KeyPoint> >& keypoints, InputArrayOfArrays masks=noArray() ) const;
|
|
||||||
|
|
||||||
// Return true if detector object is empty
|
|
||||||
CV_WRAP virtual bool empty() const;
|
|
||||||
|
|
||||||
// Create feature detector by detector name.
|
|
||||||
CV_WRAP static Ptr<FeatureDetector> create( const String& detectorType );
|
|
||||||
|
|
||||||
protected:
|
|
||||||
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove keypoints that are not in the mask.
|
|
||||||
* Helper function, useful when wrapping a library call for keypoint detection that
|
|
||||||
* does not support a mask argument.
|
|
||||||
*/
|
|
||||||
static void removeInvalidPoints( const Mat & mask, std::vector<KeyPoint>& keypoints );
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Abstract base class for computing descriptors for image keypoints.
|
|
||||||
*
|
|
||||||
* In this interface we assume a keypoint descriptor can be represented as a
|
|
||||||
* dense, fixed-dimensional vector of some basic type. Most descriptors used
|
|
||||||
* in practice follow this pattern, as it makes it very easy to compute
|
|
||||||
* distances between descriptors. Therefore we represent a collection of
|
|
||||||
* descriptors as a Mat, where each row is one keypoint descriptor.
|
|
||||||
*/
|
|
||||||
class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
virtual ~DescriptorExtractor();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute the descriptors for a set of keypoints in an image.
|
* Compute the descriptors for a set of keypoints in an image.
|
||||||
@ -156,62 +116,26 @@ public:
|
|||||||
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
|
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
|
||||||
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
|
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
|
||||||
*/
|
*/
|
||||||
CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
CV_WRAP virtual void compute( InputArray image,
|
||||||
|
CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints,
|
||||||
|
OutputArray descriptors );
|
||||||
|
|
||||||
/*
|
/* Detects keypoints and computes the descriptors */
|
||||||
* Compute the descriptors for a keypoints collection detected in image collection.
|
CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask,
|
||||||
* images Image collection.
|
CV_OUT std::vector<KeyPoint>& keypoints,
|
||||||
* keypoints Input keypoints collection. keypoints[i] is keypoints detected in images[i].
|
OutputArray descriptors,
|
||||||
* Keypoints for which a descriptor cannot be computed are removed.
|
bool useProvidedKeypoints=false );
|
||||||
* descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i].
|
|
||||||
*/
|
|
||||||
void compute( InputArrayOfArrays images, std::vector<std::vector<KeyPoint> >& keypoints, OutputArrayOfArrays descriptors ) const;
|
|
||||||
|
|
||||||
CV_WRAP virtual int descriptorSize() const = 0;
|
CV_WRAP virtual int descriptorSize() const;
|
||||||
CV_WRAP virtual int descriptorType() const = 0;
|
CV_WRAP virtual int descriptorType() const;
|
||||||
CV_WRAP virtual int defaultNorm() const = 0;
|
CV_WRAP virtual int defaultNorm() const;
|
||||||
|
|
||||||
|
// Return true if detector object is empty
|
||||||
CV_WRAP virtual bool empty() const;
|
CV_WRAP virtual bool empty() const;
|
||||||
|
|
||||||
CV_WRAP static Ptr<DescriptorExtractor> create( const String& descriptorExtractorType );
|
|
||||||
|
|
||||||
protected:
|
|
||||||
virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove keypoints within borderPixels of an image edge.
|
|
||||||
*/
|
|
||||||
static void removeBorderKeypoints( std::vector<KeyPoint>& keypoints,
|
|
||||||
Size imageSize, int borderSize );
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef Feature2D FeatureDetector;
|
||||||
|
typedef Feature2D DescriptorExtractor;
|
||||||
/*
|
|
||||||
* Abstract base class for simultaneous 2D feature detection descriptor extraction.
|
|
||||||
*/
|
|
||||||
class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
/*
|
|
||||||
* Detect keypoints in an image.
|
|
||||||
* image The image.
|
|
||||||
* keypoints The detected keypoints.
|
|
||||||
* mask Mask specifying where to look for keypoints (optional). Must be a char
|
|
||||||
* matrix with non-zero values in the region of interest.
|
|
||||||
* useProvidedKeypoints If true, the method will skip the detection phase and will compute
|
|
||||||
* descriptors for the provided keypoints
|
|
||||||
*/
|
|
||||||
CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask,
|
|
||||||
CV_OUT std::vector<KeyPoint>& keypoints,
|
|
||||||
OutputArray descriptors,
|
|
||||||
bool useProvidedKeypoints=false ) const = 0;
|
|
||||||
|
|
||||||
CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
|
||||||
|
|
||||||
// Create feature detector and descriptor extractor by name.
|
|
||||||
CV_WRAP static Ptr<Feature2D> create( const String& name );
|
|
||||||
};
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
BRISK implementation
|
BRISK implementation
|
||||||
@ -219,94 +143,12 @@ public:
|
|||||||
class CV_EXPORTS_W BRISK : public Feature2D
|
class CV_EXPORTS_W BRISK : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CV_WRAP explicit BRISK(int thresh=30, int octaves=3, float patternScale=1.0f);
|
CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f);
|
||||||
|
|
||||||
virtual ~BRISK();
|
|
||||||
|
|
||||||
// returns the descriptor size in bytes
|
|
||||||
int descriptorSize() const;
|
|
||||||
// returns the descriptor type
|
|
||||||
int descriptorType() const;
|
|
||||||
// returns the default norm type
|
|
||||||
int defaultNorm() const;
|
|
||||||
|
|
||||||
// Compute the BRISK features on an image
|
|
||||||
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
|
||||||
|
|
||||||
// Compute the BRISK features and descriptors on an image
|
|
||||||
void operator()( InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
|
||||||
OutputArray descriptors, bool useProvidedKeypoints=false ) const;
|
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
// custom setup
|
// custom setup
|
||||||
CV_WRAP explicit BRISK(std::vector<float> &radiusList, std::vector<int> &numberList,
|
CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList,
|
||||||
float dMax=5.85f, float dMin=8.2f, std::vector<int> indexChange=std::vector<int>());
|
float dMax=5.85f, float dMin=8.2f, const std::vector<int> indexChange=std::vector<int>());
|
||||||
|
|
||||||
// call this to generate the kernel:
|
|
||||||
// circle of radius r (pixels), with n points;
|
|
||||||
// short pairings with dMax, long pairings with dMin
|
|
||||||
CV_WRAP void generateKernel(std::vector<float> &radiusList,
|
|
||||||
std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
|
|
||||||
std::vector<int> indexChange=std::vector<int>());
|
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
|
||||||
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
|
||||||
|
|
||||||
void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
|
||||||
void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
|
||||||
OutputArray descriptors, bool doDescriptors, bool doOrientation,
|
|
||||||
bool useProvidedKeypoints) const;
|
|
||||||
|
|
||||||
// Feature parameters
|
|
||||||
CV_PROP_RW int threshold;
|
|
||||||
CV_PROP_RW int octaves;
|
|
||||||
|
|
||||||
// some helper structures for the Brisk pattern representation
|
|
||||||
struct BriskPatternPoint{
|
|
||||||
float x; // x coordinate relative to center
|
|
||||||
float y; // x coordinate relative to center
|
|
||||||
float sigma; // Gaussian smoothing sigma
|
|
||||||
};
|
|
||||||
struct BriskShortPair{
|
|
||||||
unsigned int i; // index of the first pattern point
|
|
||||||
unsigned int j; // index of other pattern point
|
|
||||||
};
|
|
||||||
struct BriskLongPair{
|
|
||||||
unsigned int i; // index of the first pattern point
|
|
||||||
unsigned int j; // index of other pattern point
|
|
||||||
int weighted_dx; // 1024.0/dx
|
|
||||||
int weighted_dy; // 1024.0/dy
|
|
||||||
};
|
|
||||||
inline int smoothedIntensity(const cv::Mat& image,
|
|
||||||
const cv::Mat& integral,const float key_x,
|
|
||||||
const float key_y, const unsigned int scale,
|
|
||||||
const unsigned int rot, const unsigned int point) const;
|
|
||||||
// pattern properties
|
|
||||||
BriskPatternPoint* patternPoints_; //[i][rotation][scale]
|
|
||||||
unsigned int points_; // total number of collocation points
|
|
||||||
float* scaleList_; // lists the scaling per scale index [scale]
|
|
||||||
unsigned int* sizeList_; // lists the total pattern size per scale index [scale]
|
|
||||||
static const unsigned int scales_; // scales discretization
|
|
||||||
static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted...
|
|
||||||
static const unsigned int n_rot_; // discretization of the rotation look-up
|
|
||||||
|
|
||||||
// pairs
|
|
||||||
int strings_; // number of uchars the descriptor consists of
|
|
||||||
float dMax_; // short pair maximum distance
|
|
||||||
float dMin_; // long pair maximum distance
|
|
||||||
BriskShortPair* shortPairs_; // d<_dMax
|
|
||||||
BriskLongPair* longPairs_; // d>_dMin
|
|
||||||
unsigned int noShortPairs_; // number of shortParis
|
|
||||||
unsigned int noLongPairs_; // number of longParis
|
|
||||||
|
|
||||||
// general
|
|
||||||
static const float basicSize_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
ORB implementation.
|
ORB implementation.
|
||||||
*/
|
*/
|
||||||
@ -316,44 +158,10 @@ public:
|
|||||||
// the size of the signature in bytes
|
// the size of the signature in bytes
|
||||||
enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
|
enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
|
||||||
|
|
||||||
CV_WRAP explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31,
|
CV_WRAP static Ptr<ORB> create(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31,
|
||||||
int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold = 20);
|
int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold = 20);
|
||||||
|
|
||||||
// returns the descriptor size in bytes
|
|
||||||
int descriptorSize() const;
|
|
||||||
// returns the descriptor type
|
|
||||||
int descriptorType() const;
|
|
||||||
// returns the default norm type
|
|
||||||
int defaultNorm() const;
|
|
||||||
|
|
||||||
// Compute the ORB features and descriptors on an image
|
|
||||||
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
|
||||||
|
|
||||||
// Compute the ORB features and descriptors on an image
|
|
||||||
void operator()( InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
|
||||||
OutputArray descriptors, bool useProvidedKeypoints=false ) const;
|
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
|
||||||
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
|
||||||
|
|
||||||
CV_PROP_RW int nfeatures;
|
|
||||||
CV_PROP_RW double scaleFactor;
|
|
||||||
CV_PROP_RW int nlevels;
|
|
||||||
CV_PROP_RW int edgeThreshold;
|
|
||||||
CV_PROP_RW int firstLevel;
|
|
||||||
CV_PROP_RW int WTA_K;
|
|
||||||
CV_PROP_RW int scoreType;
|
|
||||||
CV_PROP_RW int patchSize;
|
|
||||||
CV_PROP_RW int fastThreshold;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef ORB OrbFeatureDetector;
|
|
||||||
typedef ORB OrbDescriptorExtractor;
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
Maximal Stable Extremal Regions class.
|
Maximal Stable Extremal Regions class.
|
||||||
|
|
||||||
@ -363,36 +171,19 @@ typedef ORB OrbDescriptorExtractor;
|
|||||||
|
|
||||||
It returns the regions, each of those is encoded as a contour.
|
It returns the regions, each of those is encoded as a contour.
|
||||||
*/
|
*/
|
||||||
class CV_EXPORTS_W MSER : public FeatureDetector
|
class CV_EXPORTS_W MSER : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
//! the full constructor
|
//! the full constructor
|
||||||
CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400,
|
CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,
|
||||||
double _max_variation=0.25, double _min_diversity=.2,
|
double _max_variation=0.25, double _min_diversity=.2,
|
||||||
int _max_evolution=200, double _area_threshold=1.01,
|
int _max_evolution=200, double _area_threshold=1.01,
|
||||||
double _min_margin=0.003, int _edge_blur_size=5 );
|
double _min_margin=0.003, int _edge_blur_size=5 );
|
||||||
|
|
||||||
//! the operator that extracts the MSERs from the image or the specific part of it
|
CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label,
|
||||||
CV_WRAP_AS(detect) void operator()( InputArray image, CV_OUT std::vector<std::vector<Point> >& msers,
|
OutputArray stats=noArray() ) const = 0;
|
||||||
InputArray mask=noArray() ) const;
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
|
||||||
|
|
||||||
int delta;
|
|
||||||
int minArea;
|
|
||||||
int maxArea;
|
|
||||||
double maxVariation;
|
|
||||||
double minDiversity;
|
|
||||||
int maxEvolution;
|
|
||||||
double areaThreshold;
|
|
||||||
double minMargin;
|
|
||||||
int edgeBlurSize;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef MSER MserFeatureDetector;
|
|
||||||
|
|
||||||
//! detects corners using FAST algorithm by E. Rosten
|
//! detects corners using FAST algorithm by E. Rosten
|
||||||
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
|
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
|
||||||
int threshold, bool nonmaxSuppression=true );
|
int threshold, bool nonmaxSuppression=true );
|
||||||
@ -400,48 +191,27 @@ CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
|
|||||||
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
|
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
|
||||||
int threshold, bool nonmaxSuppression, int type );
|
int threshold, bool nonmaxSuppression, int type );
|
||||||
|
|
||||||
class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector
|
class CV_EXPORTS_W FastFeatureDetector : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum Type
|
enum Type
|
||||||
{
|
{
|
||||||
TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2
|
TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true);
|
CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10, bool nonmaxSuppression=true, int type=TYPE_9_16 );
|
||||||
CV_WRAP FastFeatureDetector( int threshold, bool nonmaxSuppression, int type);
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
|
||||||
|
|
||||||
int threshold;
|
|
||||||
bool nonmaxSuppression;
|
|
||||||
int type;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class CV_EXPORTS_W GFTTDetector : public FeatureDetector
|
class CV_EXPORTS_W GFTTDetector : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CV_WRAP GFTTDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
|
CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
|
||||||
int blockSize=3, bool useHarrisDetector=false, double k=0.04 );
|
int blockSize=3, bool useHarrisDetector=false, double k=0.04 );
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
|
||||||
|
|
||||||
int nfeatures;
|
|
||||||
double qualityLevel;
|
|
||||||
double minDistance;
|
|
||||||
int blockSize;
|
|
||||||
bool useHarrisDetector;
|
|
||||||
double k;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef GFTTDetector GoodFeaturesToTrackDetector;
|
|
||||||
|
|
||||||
class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector
|
class CV_EXPORTS_W SimpleBlobDetector : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
struct CV_EXPORTS_W_SIMPLE Params
|
struct CV_EXPORTS_W_SIMPLE Params
|
||||||
@ -472,81 +242,29 @@ public:
|
|||||||
void write( FileStorage& fs ) const;
|
void write( FileStorage& fs ) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_WRAP SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params());
|
CV_WRAP static Ptr<SimpleBlobDetector>
|
||||||
|
create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params());
|
||||||
virtual void read( const FileNode& fn );
|
|
||||||
virtual void write( FileStorage& fs ) const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
struct CV_EXPORTS Center
|
|
||||||
{
|
|
||||||
Point2d location;
|
|
||||||
double radius;
|
|
||||||
double confidence;
|
|
||||||
};
|
|
||||||
|
|
||||||
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
|
||||||
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> ¢ers) const;
|
|
||||||
|
|
||||||
Params params;
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// KAZE/AKAZE diffusivity
|
|
||||||
enum {
|
|
||||||
DIFF_PM_G1 = 0,
|
|
||||||
DIFF_PM_G2 = 1,
|
|
||||||
DIFF_WEICKERT = 2,
|
|
||||||
DIFF_CHARBONNIER = 3
|
|
||||||
};
|
|
||||||
|
|
||||||
// AKAZE descriptor type
|
|
||||||
enum {
|
|
||||||
DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation
|
|
||||||
DESCRIPTOR_KAZE = 3,
|
|
||||||
DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation
|
|
||||||
DESCRIPTOR_MLDB = 5
|
|
||||||
};
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
KAZE implementation
|
KAZE implementation
|
||||||
*/
|
*/
|
||||||
class CV_EXPORTS_W KAZE : public Feature2D
|
class CV_EXPORTS_W KAZE : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CV_WRAP KAZE();
|
enum
|
||||||
CV_WRAP explicit KAZE(bool extended, bool upright, float threshold = 0.001f,
|
{
|
||||||
int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2);
|
DIFF_PM_G1 = 0,
|
||||||
|
DIFF_PM_G2 = 1,
|
||||||
|
DIFF_WEICKERT = 2,
|
||||||
|
DIFF_CHARBONNIER = 3
|
||||||
|
};
|
||||||
|
|
||||||
virtual ~KAZE();
|
CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,
|
||||||
|
float threshold = 0.001f,
|
||||||
// returns the descriptor size in bytes
|
int octaves = 4, int sublevels = 4,
|
||||||
int descriptorSize() const;
|
int diffusivity = KAZE::DIFF_PM_G2);
|
||||||
// returns the descriptor type
|
|
||||||
int descriptorType() const;
|
|
||||||
// returns the default norm type
|
|
||||||
int defaultNorm() const;
|
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
// Compute the KAZE features on an image
|
|
||||||
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
|
||||||
|
|
||||||
// Compute the KAZE features and descriptors on an image
|
|
||||||
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
|
||||||
OutputArray descriptors, bool useProvidedKeypoints = false) const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
void detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const;
|
|
||||||
void computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const;
|
|
||||||
|
|
||||||
CV_PROP bool extended;
|
|
||||||
CV_PROP bool upright;
|
|
||||||
CV_PROP float threshold;
|
|
||||||
CV_PROP int octaves;
|
|
||||||
CV_PROP int sublevels;
|
|
||||||
CV_PROP int diffusivity;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@ -555,41 +273,21 @@ AKAZE implementation
|
|||||||
class CV_EXPORTS_W AKAZE : public Feature2D
|
class CV_EXPORTS_W AKAZE : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CV_WRAP AKAZE();
|
// AKAZE descriptor type
|
||||||
CV_WRAP explicit AKAZE(int descriptor_type, int descriptor_size = 0, int descriptor_channels = 3,
|
enum
|
||||||
float threshold = 0.001f, int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2);
|
{
|
||||||
|
DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation
|
||||||
|
DESCRIPTOR_KAZE = 3,
|
||||||
|
DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation
|
||||||
|
DESCRIPTOR_MLDB = 5
|
||||||
|
};
|
||||||
|
|
||||||
virtual ~AKAZE();
|
CV_WRAP static Ptr<AKAZE> create(int descriptor_type=DESCRIPTOR_MLDB,
|
||||||
|
int descriptor_size = 0, int descriptor_channels = 3,
|
||||||
// returns the descriptor size in bytes
|
float threshold = 0.001f, int octaves = 4,
|
||||||
int descriptorSize() const;
|
int sublevels = 4, int diffusivity = KAZE::DIFF_PM_G2);
|
||||||
// returns the descriptor type
|
|
||||||
int descriptorType() const;
|
|
||||||
// returns the default norm type
|
|
||||||
int defaultNorm() const;
|
|
||||||
|
|
||||||
// Compute the AKAZE features on an image
|
|
||||||
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
|
||||||
|
|
||||||
// Compute the AKAZE features and descriptors on an image
|
|
||||||
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
|
||||||
OutputArray descriptors, bool useProvidedKeypoints = false) const;
|
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
void computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const;
|
|
||||||
void detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray()) const;
|
|
||||||
|
|
||||||
CV_PROP int descriptor;
|
|
||||||
CV_PROP int descriptor_channels;
|
|
||||||
CV_PROP int descriptor_size;
|
|
||||||
CV_PROP float threshold;
|
|
||||||
CV_PROP int octaves;
|
|
||||||
CV_PROP int sublevels;
|
|
||||||
CV_PROP int diffusivity;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* Distance *
|
* Distance *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
|
@ -55,7 +55,32 @@
|
|||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
using namespace cv;
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
|
class CV_EXPORTS_W SimpleBlobDetectorImpl : public SimpleBlobDetector
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
explicit SimpleBlobDetectorImpl(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params());
|
||||||
|
|
||||||
|
virtual void read( const FileNode& fn );
|
||||||
|
virtual void write( FileStorage& fs ) const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
struct CV_EXPORTS Center
|
||||||
|
{
|
||||||
|
Point2d location;
|
||||||
|
double radius;
|
||||||
|
double confidence;
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> ¢ers) const;
|
||||||
|
|
||||||
|
Params params;
|
||||||
|
AlgorithmInfo* info() const;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SimpleBlobDetector
|
* SimpleBlobDetector
|
||||||
|
@ -53,6 +53,79 @@
|
|||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
|
|
||||||
|
class BRISK_Impl : public BRISK
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit BRISK_Impl(int thresh=30, int octaves=3, float patternScale=1.0f);
|
||||||
|
// custom setup
|
||||||
|
explicit BRISK_Impl(const std::vector<float> &radiusList, const std::vector<int> &numberList,
|
||||||
|
float dMax=5.85f, float dMin=8.2f, const std::vector<int> indexChange=std::vector<int>());
|
||||||
|
|
||||||
|
// call this to generate the kernel:
|
||||||
|
// circle of radius r (pixels), with n points;
|
||||||
|
// short pairings with dMax, long pairings with dMin
|
||||||
|
void generateKernel(std::vector<float> &radiusList,
|
||||||
|
std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
|
||||||
|
std::vector<int> indexChange=std::vector<int>());
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
||||||
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
|
void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
||||||
|
void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
||||||
|
OutputArray descriptors, bool doDescriptors, bool doOrientation,
|
||||||
|
bool useProvidedKeypoints) const;
|
||||||
|
|
||||||
|
// Feature parameters
|
||||||
|
CV_PROP_RW int threshold;
|
||||||
|
CV_PROP_RW int octaves;
|
||||||
|
|
||||||
|
// some helper structures for the Brisk pattern representation
|
||||||
|
struct BriskPatternPoint{
|
||||||
|
float x; // x coordinate relative to center
|
||||||
|
float y; // x coordinate relative to center
|
||||||
|
float sigma; // Gaussian smoothing sigma
|
||||||
|
};
|
||||||
|
struct BriskShortPair{
|
||||||
|
unsigned int i; // index of the first pattern point
|
||||||
|
unsigned int j; // index of other pattern point
|
||||||
|
};
|
||||||
|
struct BriskLongPair{
|
||||||
|
unsigned int i; // index of the first pattern point
|
||||||
|
unsigned int j; // index of other pattern point
|
||||||
|
int weighted_dx; // 1024.0/dx
|
||||||
|
int weighted_dy; // 1024.0/dy
|
||||||
|
};
|
||||||
|
inline int smoothedIntensity(const cv::Mat& image,
|
||||||
|
const cv::Mat& integral,const float key_x,
|
||||||
|
const float key_y, const unsigned int scale,
|
||||||
|
const unsigned int rot, const unsigned int point) const;
|
||||||
|
// pattern properties
|
||||||
|
BriskPatternPoint* patternPoints_; //[i][rotation][scale]
|
||||||
|
unsigned int points_; // total number of collocation points
|
||||||
|
float* scaleList_; // lists the scaling per scale index [scale]
|
||||||
|
unsigned int* sizeList_; // lists the total pattern size per scale index [scale]
|
||||||
|
static const unsigned int scales_; // scales discretization
|
||||||
|
static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted...
|
||||||
|
static const unsigned int n_rot_; // discretization of the rotation look-up
|
||||||
|
|
||||||
|
// pairs
|
||||||
|
int strings_; // number of uchars the descriptor consists of
|
||||||
|
float dMax_; // short pair maximum distance
|
||||||
|
float dMin_; // long pair maximum distance
|
||||||
|
BriskShortPair* shortPairs_; // d<_dMax
|
||||||
|
BriskLongPair* longPairs_; // d>_dMin
|
||||||
|
unsigned int noShortPairs_; // number of shortParis
|
||||||
|
unsigned int noLongPairs_; // number of longParis
|
||||||
|
|
||||||
|
// general
|
||||||
|
static const float basicSize_;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
// a layer in the Brisk detector pyramid
|
// a layer in the Brisk detector pyramid
|
||||||
class CV_EXPORTS BriskLayer
|
class CV_EXPORTS BriskLayer
|
||||||
{
|
{
|
||||||
|
@ -1,110 +0,0 @@
|
|||||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
//
|
|
||||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
||||||
//
|
|
||||||
// By downloading, copying, installing or using the software you agree to this license.
|
|
||||||
// If you do not agree to this license, do not download, install,
|
|
||||||
// copy or use the software.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Intel License Agreement
|
|
||||||
// For Open Source Computer Vision Library
|
|
||||||
//
|
|
||||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
|
||||||
// Third party copyrights are property of their respective owners.
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
// are permitted provided that the following conditions are met:
|
|
||||||
//
|
|
||||||
// * Redistribution's of source code must retain the above copyright notice,
|
|
||||||
// this list of conditions and the following disclaimer.
|
|
||||||
//
|
|
||||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
||||||
// this list of conditions and the following disclaimer in the documentation
|
|
||||||
// and/or other materials provided with the distribution.
|
|
||||||
//
|
|
||||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
|
||||||
// derived from this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// This software is provided by the copyright holders and contributors "as is" and
|
|
||||||
// any express or implied warranties, including, but not limited to, the implied
|
|
||||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
||||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
||||||
// indirect, incidental, special, exemplary, or consequential damages
|
|
||||||
// (including, but not limited to, procurement of substitute goods or services;
|
|
||||||
// loss of use, data, or profits; or business interruption) however caused
|
|
||||||
// and on any theory of liability, whether in contract, strict liability,
|
|
||||||
// or tort (including negligence or otherwise) arising in any way out of
|
|
||||||
// the use of this software, even if advised of the possibility of such damage.
|
|
||||||
//
|
|
||||||
//M*/
|
|
||||||
|
|
||||||
#include "precomp.hpp"
|
|
||||||
#include <limits>
|
|
||||||
|
|
||||||
namespace cv
|
|
||||||
{
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* DescriptorExtractor *
|
|
||||||
\****************************************************************************************/
|
|
||||||
/*
|
|
||||||
* DescriptorExtractor
|
|
||||||
*/
|
|
||||||
DescriptorExtractor::~DescriptorExtractor()
|
|
||||||
{}
|
|
||||||
|
|
||||||
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
|
|
||||||
{
|
|
||||||
if( image.empty() || keypoints.empty() )
|
|
||||||
{
|
|
||||||
descriptors.release();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 );
|
|
||||||
KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits<float>::epsilon() );
|
|
||||||
|
|
||||||
computeImpl( image, keypoints, descriptors );
|
|
||||||
}
|
|
||||||
|
|
||||||
void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, OutputArrayOfArrays _descCollection ) const
|
|
||||||
{
|
|
||||||
std::vector<Mat> imageCollection, descCollection;
|
|
||||||
_imageCollection.getMatVector(imageCollection);
|
|
||||||
_descCollection.getMatVector(descCollection);
|
|
||||||
CV_Assert( imageCollection.size() == pointCollection.size() );
|
|
||||||
descCollection.resize( imageCollection.size() );
|
|
||||||
for( size_t i = 0; i < imageCollection.size(); i++ )
|
|
||||||
compute( imageCollection[i], pointCollection[i], descCollection[i] );
|
|
||||||
}
|
|
||||||
|
|
||||||
/*void DescriptorExtractor::read( const FileNode& )
|
|
||||||
{}
|
|
||||||
|
|
||||||
void DescriptorExtractor::write( FileStorage& ) const
|
|
||||||
{}*/
|
|
||||||
|
|
||||||
bool DescriptorExtractor::empty() const
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void DescriptorExtractor::removeBorderKeypoints( std::vector<KeyPoint>& keypoints,
|
|
||||||
Size imageSize, int borderSize )
|
|
||||||
{
|
|
||||||
KeyPointsFilter::runByImageBorder( keypoints, imageSize, borderSize );
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExtractorType)
|
|
||||||
{
|
|
||||||
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
|
|
||||||
{
|
|
||||||
DescriptorExtractor::compute(image, keypoints, descriptors);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -44,118 +44,65 @@
|
|||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
class GFTTDetector_Impl : public GFTTDetector
|
||||||
* FeatureDetector
|
|
||||||
*/
|
|
||||||
|
|
||||||
FeatureDetector::~FeatureDetector()
|
|
||||||
{}
|
|
||||||
|
|
||||||
void FeatureDetector::detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask ) const
|
|
||||||
{
|
{
|
||||||
keypoints.clear();
|
public:
|
||||||
|
GFTTDetector_Impl( int _nfeatures, double _qualityLevel,
|
||||||
if( image.empty() )
|
double _minDistance, int _blockSize,
|
||||||
return;
|
bool _useHarrisDetector, double _k )
|
||||||
|
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
|
||||||
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()) );
|
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
|
||||||
|
|
||||||
detectImpl( image, keypoints, mask );
|
|
||||||
}
|
|
||||||
|
|
||||||
void FeatureDetector::detect(InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection,
|
|
||||||
InputArrayOfArrays _masks ) const
|
|
||||||
{
|
|
||||||
if (_imageCollection.isUMatVector())
|
|
||||||
{
|
{
|
||||||
std::vector<UMat> uimageCollection, umasks;
|
|
||||||
_imageCollection.getUMatVector(uimageCollection);
|
|
||||||
_masks.getUMatVector(umasks);
|
|
||||||
|
|
||||||
pointCollection.resize( uimageCollection.size() );
|
|
||||||
for( size_t i = 0; i < uimageCollection.size(); i++ )
|
|
||||||
detect( uimageCollection[i], pointCollection[i], umasks.empty() ? noArray() : umasks[i] );
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Mat> imageCollection, masks;
|
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
|
||||||
_imageCollection.getMatVector(imageCollection);
|
|
||||||
_masks.getMatVector(masks);
|
|
||||||
|
|
||||||
pointCollection.resize( imageCollection.size() );
|
|
||||||
for( size_t i = 0; i < imageCollection.size(); i++ )
|
|
||||||
detect( imageCollection[i], pointCollection[i], masks.empty() ? noArray() : masks[i] );
|
|
||||||
}
|
|
||||||
|
|
||||||
/*void FeatureDetector::read( const FileNode& )
|
|
||||||
{}
|
|
||||||
|
|
||||||
void FeatureDetector::write( FileStorage& ) const
|
|
||||||
{}*/
|
|
||||||
|
|
||||||
bool FeatureDetector::empty() const
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector<KeyPoint>& keypoints )
|
|
||||||
{
|
|
||||||
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
|
|
||||||
{
|
|
||||||
if( detectorType.compare( "HARRIS" ) == 0 )
|
|
||||||
{
|
{
|
||||||
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
|
std::vector<Point2f> corners;
|
||||||
fd->set("useHarrisDetector", true);
|
|
||||||
return fd;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Algorithm::create<FeatureDetector>("Feature2D." + detectorType);
|
if (_image.isUMat())
|
||||||
}
|
{
|
||||||
|
UMat ugrayImage;
|
||||||
|
if( _image.type() != CV_8U )
|
||||||
|
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
|
||||||
|
else
|
||||||
|
ugrayImage = _image.getUMat();
|
||||||
|
|
||||||
|
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
|
||||||
GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel,
|
blockSize, useHarrisDetector, k );
|
||||||
double _minDistance, int _blockSize,
|
}
|
||||||
bool _useHarrisDetector, double _k )
|
|
||||||
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
|
|
||||||
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
|
|
||||||
{
|
|
||||||
std::vector<Point2f> corners;
|
|
||||||
|
|
||||||
if (_image.isUMat())
|
|
||||||
{
|
|
||||||
UMat ugrayImage;
|
|
||||||
if( _image.type() != CV_8U )
|
|
||||||
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
|
|
||||||
else
|
else
|
||||||
ugrayImage = _image.getUMat();
|
{
|
||||||
|
Mat image = _image.getMat(), grayImage = image;
|
||||||
|
if( image.type() != CV_8U )
|
||||||
|
cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
||||||
|
|
||||||
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
|
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
|
||||||
blockSize, useHarrisDetector, k );
|
blockSize, useHarrisDetector, k );
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
keypoints.resize(corners.size());
|
||||||
Mat image = _image.getMat(), grayImage = image;
|
std::vector<Point2f>::const_iterator corner_it = corners.begin();
|
||||||
if( image.type() != CV_8U )
|
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
|
||||||
cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
|
||||||
|
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
|
||||||
|
|
||||||
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
|
|
||||||
blockSize, useHarrisDetector, k );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
keypoints.resize(corners.size());
|
int nfeatures;
|
||||||
std::vector<Point2f>::const_iterator corner_it = corners.begin();
|
double qualityLevel;
|
||||||
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
|
double minDistance;
|
||||||
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
|
int blockSize;
|
||||||
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
|
bool useHarrisDetector;
|
||||||
|
double k;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
Ptr<GFTTDetector> GFTTDetector::create( int _nfeatures, double _qualityLevel,
|
||||||
|
double _minDistance, int _blockSize,
|
||||||
|
bool _useHarrisDetector, double _k )
|
||||||
|
{
|
||||||
|
return makePtr<GFTTDetector_Impl>(_nfeatures, _qualityLevel,
|
||||||
|
_minDistance, _blockSize, _useHarrisDetector, _k);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -356,30 +356,39 @@ void FAST(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool
|
|||||||
{
|
{
|
||||||
FAST(_img, keypoints, threshold, nonmax_suppression, FastFeatureDetector::TYPE_9_16);
|
FAST(_img, keypoints, threshold, nonmax_suppression, FastFeatureDetector::TYPE_9_16);
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* FastFeatureDetector
|
|
||||||
*/
|
|
||||||
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression )
|
|
||||||
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type(FastFeatureDetector::TYPE_9_16)
|
|
||||||
{}
|
|
||||||
|
|
||||||
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression, int _type )
|
|
||||||
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void FastFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
|
class FastFeatureDetector_Impl : public FastFeatureDetector
|
||||||
{
|
{
|
||||||
Mat mask = _mask.getMat(), grayImage;
|
public:
|
||||||
UMat ugrayImage;
|
FastFeatureDetector_Impl( int _threshold, bool _nonmaxSuppression, int _type )
|
||||||
_InputArray gray = _image;
|
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
|
||||||
if( _image.type() != CV_8U )
|
{}
|
||||||
|
|
||||||
|
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
|
||||||
{
|
{
|
||||||
_OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage);
|
Mat mask = _mask.getMat(), grayImage;
|
||||||
cvtColor( _image, ogray, COLOR_BGR2GRAY );
|
UMat ugrayImage;
|
||||||
gray = ogray;
|
_InputArray gray = _image;
|
||||||
|
if( _image.type() != CV_8U )
|
||||||
|
{
|
||||||
|
_OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage);
|
||||||
|
cvtColor( _image, ogray, COLOR_BGR2GRAY );
|
||||||
|
gray = ogray;
|
||||||
|
}
|
||||||
|
FAST( gray, keypoints, threshold, nonmaxSuppression, type );
|
||||||
|
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
||||||
}
|
}
|
||||||
FAST( gray, keypoints, threshold, nonmaxSuppression, type );
|
|
||||||
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
int threshold;
|
||||||
|
bool nonmaxSuppression;
|
||||||
|
int type;
|
||||||
|
};
|
||||||
|
|
||||||
|
Ptr<FastFeatureDetector> FastFeatureDetector::create( int threshold, bool nonmaxSuppression, int type )
|
||||||
|
{
|
||||||
|
return makePtr<FastFeatureDetector_Impl>(threshold, nonmaxSuppression, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -42,6 +42,8 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
|
|
||||||
Ptr<Feature2D> Feature2D::create( const String& feature2DType )
|
Ptr<Feature2D> Feature2D::create( const String& feature2DType )
|
||||||
@ -193,3 +195,5 @@ bool cv::initModule_features2d(void)
|
|||||||
|
|
||||||
return all;
|
return all;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -52,153 +52,93 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd
|
|||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
KAZE::KAZE()
|
|
||||||
: extended(false)
|
|
||||||
, upright(false)
|
|
||||||
, threshold(0.001f)
|
|
||||||
, octaves(4)
|
|
||||||
, sublevels(4)
|
|
||||||
, diffusivity(DIFF_PM_G2)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
KAZE::KAZE(bool _extended, bool _upright, float _threshold, int _octaves,
|
class KAZE_Impl : public KAZE
|
||||||
int _sublevels, int _diffusivity)
|
{
|
||||||
|
public:
|
||||||
|
KAZE_Impl(bool _extended, bool _upright, float _threshold, int _octaves,
|
||||||
|
int _sublevels, int _diffusivity)
|
||||||
: extended(_extended)
|
: extended(_extended)
|
||||||
, upright(_upright)
|
, upright(_upright)
|
||||||
, threshold(_threshold)
|
, threshold(_threshold)
|
||||||
, octaves(_octaves)
|
, octaves(_octaves)
|
||||||
, sublevels(_sublevels)
|
, sublevels(_sublevels)
|
||||||
, diffusivity(_diffusivity)
|
, diffusivity(_diffusivity)
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
KAZE::~KAZE()
|
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the descriptor size in bytes
|
|
||||||
int KAZE::descriptorSize() const
|
|
||||||
{
|
|
||||||
return extended ? 128 : 64;
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the descriptor type
|
|
||||||
int KAZE::descriptorType() const
|
|
||||||
{
|
|
||||||
return CV_32F;
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the default norm type
|
|
||||||
int KAZE::defaultNorm() const
|
|
||||||
{
|
|
||||||
return NORM_L2;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KAZE::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
|
|
||||||
{
|
|
||||||
detectImpl(image, keypoints, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
void KAZE::operator()(InputArray image, InputArray mask,
|
|
||||||
std::vector<KeyPoint>& keypoints,
|
|
||||||
OutputArray descriptors,
|
|
||||||
bool useProvidedKeypoints) const
|
|
||||||
{
|
|
||||||
cv::Mat img = image.getMat();
|
|
||||||
if (img.type() != CV_8UC1)
|
|
||||||
cvtColor(image, img, COLOR_BGR2GRAY);
|
|
||||||
|
|
||||||
Mat img1_32;
|
|
||||||
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
|
||||||
|
|
||||||
cv::Mat& desc = descriptors.getMatRef();
|
|
||||||
|
|
||||||
KAZEOptions options;
|
|
||||||
options.img_width = img.cols;
|
|
||||||
options.img_height = img.rows;
|
|
||||||
options.extended = extended;
|
|
||||||
options.upright = upright;
|
|
||||||
options.dthreshold = threshold;
|
|
||||||
options.omax = octaves;
|
|
||||||
options.nsublevels = sublevels;
|
|
||||||
options.diffusivity = diffusivity;
|
|
||||||
|
|
||||||
KAZEFeatures impl(options);
|
|
||||||
impl.Create_Nonlinear_Scale_Space(img1_32);
|
|
||||||
|
|
||||||
if (!useProvidedKeypoints)
|
|
||||||
{
|
{
|
||||||
impl.Feature_Detection(keypoints);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mask.empty())
|
virtual ~KAZE_Impl() {}
|
||||||
|
|
||||||
|
// returns the descriptor size in bytes
|
||||||
|
int descriptorSize() const
|
||||||
{
|
{
|
||||||
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
|
return extended ? 128 : 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl.Feature_Description(keypoints, desc);
|
// returns the descriptor type
|
||||||
|
int descriptorType() const
|
||||||
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
|
|
||||||
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
|
|
||||||
}
|
|
||||||
|
|
||||||
void KAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
|
||||||
{
|
|
||||||
Mat img = image.getMat();
|
|
||||||
if (img.type() != CV_8UC1)
|
|
||||||
cvtColor(image, img, COLOR_BGR2GRAY);
|
|
||||||
|
|
||||||
Mat img1_32;
|
|
||||||
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
|
||||||
|
|
||||||
KAZEOptions options;
|
|
||||||
options.img_width = img.cols;
|
|
||||||
options.img_height = img.rows;
|
|
||||||
options.extended = extended;
|
|
||||||
options.upright = upright;
|
|
||||||
options.dthreshold = threshold;
|
|
||||||
options.omax = octaves;
|
|
||||||
options.nsublevels = sublevels;
|
|
||||||
options.diffusivity = diffusivity;
|
|
||||||
|
|
||||||
KAZEFeatures impl(options);
|
|
||||||
impl.Create_Nonlinear_Scale_Space(img1_32);
|
|
||||||
impl.Feature_Detection(keypoints);
|
|
||||||
|
|
||||||
if (!mask.empty())
|
|
||||||
{
|
{
|
||||||
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
|
return CV_32F;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void KAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
|
// returns the default norm type
|
||||||
{
|
int defaultNorm() const
|
||||||
cv::Mat img = image.getMat();
|
{
|
||||||
if (img.type() != CV_8UC1)
|
return NORM_L2;
|
||||||
cvtColor(image, img, COLOR_BGR2GRAY);
|
}
|
||||||
|
|
||||||
Mat img1_32;
|
void detectAndCompute(InputArray image, InputArray mask,
|
||||||
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
std::vector<KeyPoint>& keypoints,
|
||||||
|
OutputArray descriptors,
|
||||||
|
bool useProvidedKeypoints)
|
||||||
|
{
|
||||||
|
cv::Mat img = image.getMat();
|
||||||
|
if (img.type() != CV_8UC1)
|
||||||
|
cvtColor(image, img, COLOR_BGR2GRAY);
|
||||||
|
|
||||||
cv::Mat& desc = descriptors.getMatRef();
|
Mat img1_32;
|
||||||
|
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
||||||
|
|
||||||
KAZEOptions options;
|
KAZEOptions options;
|
||||||
options.img_width = img.cols;
|
options.img_width = img.cols;
|
||||||
options.img_height = img.rows;
|
options.img_height = img.rows;
|
||||||
options.extended = extended;
|
options.extended = extended;
|
||||||
options.upright = upright;
|
options.upright = upright;
|
||||||
options.dthreshold = threshold;
|
options.dthreshold = threshold;
|
||||||
options.omax = octaves;
|
options.omax = octaves;
|
||||||
options.nsublevels = sublevels;
|
options.nsublevels = sublevels;
|
||||||
options.diffusivity = diffusivity;
|
options.diffusivity = diffusivity;
|
||||||
|
|
||||||
|
KAZEFeatures impl(options);
|
||||||
|
impl.Create_Nonlinear_Scale_Space(img1_32);
|
||||||
|
|
||||||
|
if (!useProvidedKeypoints)
|
||||||
|
{
|
||||||
|
impl.Feature_Detection(keypoints);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mask.empty())
|
||||||
|
{
|
||||||
|
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
|
||||||
|
}
|
||||||
|
|
||||||
|
if( descriptors.needed() )
|
||||||
|
{
|
||||||
|
Mat& desc = descriptors.getMatRef();
|
||||||
|
impl.Feature_Description(keypoints, desc);
|
||||||
|
|
||||||
|
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
|
||||||
|
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool extended;
|
||||||
|
bool upright;
|
||||||
|
float threshold;
|
||||||
|
int octaves;
|
||||||
|
int sublevels;
|
||||||
|
int diffusivity;
|
||||||
|
};
|
||||||
|
|
||||||
KAZEFeatures impl(options);
|
|
||||||
impl.Create_Nonlinear_Scale_Space(img1_32);
|
|
||||||
impl.Feature_Description(keypoints, desc);
|
|
||||||
|
|
||||||
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
|
|
||||||
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -8,23 +8,8 @@
|
|||||||
#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
|
#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
|
||||||
#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
|
#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
|
||||||
|
|
||||||
/* ************************************************************************* */
|
namespace cv
|
||||||
// OpenCV
|
{
|
||||||
#include "../precomp.hpp"
|
|
||||||
#include <opencv2/features2d.hpp>
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right
|
|
||||||
const float gauss25[7][7] = {
|
|
||||||
{ 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f },
|
|
||||||
{ 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f },
|
|
||||||
{ 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f },
|
|
||||||
{ 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f },
|
|
||||||
{ 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f },
|
|
||||||
{ 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f },
|
|
||||||
{ 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f }
|
|
||||||
};
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/// AKAZE configuration options structure
|
/// AKAZE configuration options structure
|
||||||
struct AKAZEOptions {
|
struct AKAZEOptions {
|
||||||
@ -75,4 +60,6 @@ struct AKAZEOptions {
|
|||||||
int kcontrast_nbins; ///< Number of bins for the contrast factor histogram
|
int kcontrast_nbins; ///< Number of bins for the contrast factor histogram
|
||||||
};
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
* @author Pablo F. Alcantarilla, Jesus Nuevo
|
* @author Pablo F. Alcantarilla, Jesus Nuevo
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "../precomp.hpp"
|
||||||
#include "AKAZEFeatures.h"
|
#include "AKAZEFeatures.h"
|
||||||
#include "fed.h"
|
#include "fed.h"
|
||||||
#include "nldiffusion_functions.h"
|
#include "nldiffusion_functions.h"
|
||||||
@ -14,9 +15,9 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
// Namespaces
|
// Namespaces
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace cv;
|
|
||||||
using namespace cv::details::kaze;
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/**
|
/**
|
||||||
@ -29,7 +30,7 @@ AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) {
|
|||||||
ncycles_ = 0;
|
ncycles_ = 0;
|
||||||
reordering_ = true;
|
reordering_ = true;
|
||||||
|
|
||||||
if (options_.descriptor_size > 0 && options_.descriptor >= cv::DESCRIPTOR_MLDB_UPRIGHT) {
|
if (options_.descriptor_size > 0 && options_.descriptor >= AKAZE::DESCRIPTOR_MLDB_UPRIGHT) {
|
||||||
generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size,
|
generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size,
|
||||||
options_.descriptor_pattern_size, options_.descriptor_channels);
|
options_.descriptor_pattern_size, options_.descriptor_channels);
|
||||||
}
|
}
|
||||||
@ -264,10 +265,10 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts)
|
|||||||
vector<cv::KeyPoint> kpts_aux;
|
vector<cv::KeyPoint> kpts_aux;
|
||||||
|
|
||||||
// Set maximum size
|
// Set maximum size
|
||||||
if (options_.descriptor == cv::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_MLDB) {
|
if (options_.descriptor == AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_MLDB) {
|
||||||
smax = 10.0f*sqrtf(2.0f);
|
smax = 10.0f*sqrtf(2.0f);
|
||||||
}
|
}
|
||||||
else if (options_.descriptor == cv::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_KAZE) {
|
else if (options_.descriptor == AKAZE::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_KAZE) {
|
||||||
smax = 12.0f*sqrtf(2.0f);
|
smax = 12.0f*sqrtf(2.0f);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -712,7 +713,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Allocate memory for the matrix with the descriptors
|
// Allocate memory for the matrix with the descriptors
|
||||||
if (options_.descriptor < cv::DESCRIPTOR_MLDB_UPRIGHT) {
|
if (options_.descriptor < AKAZE::DESCRIPTOR_MLDB_UPRIGHT) {
|
||||||
desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1);
|
desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@ -729,17 +730,17 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
|
|||||||
|
|
||||||
switch (options_.descriptor)
|
switch (options_.descriptor)
|
||||||
{
|
{
|
||||||
case cv::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation
|
case AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation
|
||||||
{
|
{
|
||||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_));
|
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case cv::DESCRIPTOR_KAZE:
|
case AKAZE::DESCRIPTOR_KAZE:
|
||||||
{
|
{
|
||||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_));
|
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case cv::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation
|
case AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation
|
||||||
{
|
{
|
||||||
if (options_.descriptor_size == 0)
|
if (options_.descriptor_size == 0)
|
||||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||||
@ -747,7 +748,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
|
|||||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
|
cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case cv::DESCRIPTOR_MLDB:
|
case AKAZE::DESCRIPTOR_MLDB:
|
||||||
{
|
{
|
||||||
if (options_.descriptor_size == 0)
|
if (options_.descriptor_size == 0)
|
||||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||||
@ -765,7 +766,20 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
|
|||||||
* @note The orientation is computed using a similar approach as described in the
|
* @note The orientation is computed using a similar approach as described in the
|
||||||
* original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006
|
* original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006
|
||||||
*/
|
*/
|
||||||
void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_) {
|
void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_)
|
||||||
|
{
|
||||||
|
/* ************************************************************************* */
|
||||||
|
/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right
|
||||||
|
static const float gauss25[7][7] =
|
||||||
|
{
|
||||||
|
{ 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f },
|
||||||
|
{ 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f },
|
||||||
|
{ 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f },
|
||||||
|
{ 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f },
|
||||||
|
{ 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f },
|
||||||
|
{ 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f },
|
||||||
|
{ 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f }
|
||||||
|
};
|
||||||
|
|
||||||
int ix = 0, iy = 0, idx = 0, s = 0, level = 0;
|
int ix = 0, iy = 0, idx = 0, s = 0, level = 0;
|
||||||
float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0;
|
float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0;
|
||||||
@ -1702,3 +1716,6 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int
|
|||||||
sampleList = samples.rowRange(0, count).clone();
|
sampleList = samples.rowRange(0, count).clone();
|
||||||
comparisons = comps.rowRange(0, nbits).clone();
|
comparisons = comps.rowRange(0, nbits).clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -12,12 +12,14 @@
|
|||||||
#include "../precomp.hpp"
|
#include "../precomp.hpp"
|
||||||
#include <opencv2/features2d.hpp>
|
#include <opencv2/features2d.hpp>
|
||||||
|
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
//*************************************************************************************
|
//*************************************************************************************
|
||||||
|
|
||||||
struct KAZEOptions {
|
struct KAZEOptions {
|
||||||
|
|
||||||
KAZEOptions()
|
KAZEOptions()
|
||||||
: diffusivity(cv::DIFF_PM_G2)
|
: diffusivity(KAZE::DIFF_PM_G2)
|
||||||
|
|
||||||
, soffset(1.60f)
|
, soffset(1.60f)
|
||||||
, omax(4)
|
, omax(4)
|
||||||
@ -49,4 +51,6 @@ struct KAZEOptions {
|
|||||||
bool extended;
|
bool extended;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -17,43 +17,48 @@
|
|||||||
#include "fed.h"
|
#include "fed.h"
|
||||||
#include "TEvolution.h"
|
#include "TEvolution.h"
|
||||||
|
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// KAZE Class Declaration
|
// KAZE Class Declaration
|
||||||
class KAZEFeatures {
|
class KAZEFeatures {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
/// Parameters of the Nonlinear diffusion class
|
/// Parameters of the Nonlinear diffusion class
|
||||||
KAZEOptions options_; ///< Configuration options for KAZE
|
KAZEOptions options_; ///< Configuration options for KAZE
|
||||||
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
|
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
|
||||||
|
|
||||||
/// Vector of keypoint vectors for finding extrema in multiple threads
|
/// Vector of keypoint vectors for finding extrema in multiple threads
|
||||||
std::vector<std::vector<cv::KeyPoint> > kpts_par_;
|
std::vector<std::vector<cv::KeyPoint> > kpts_par_;
|
||||||
|
|
||||||
/// FED parameters
|
/// FED parameters
|
||||||
int ncycles_; ///< Number of cycles
|
int ncycles_; ///< Number of cycles
|
||||||
bool reordering_; ///< Flag for reordering time steps
|
bool reordering_; ///< Flag for reordering time steps
|
||||||
std::vector<std::vector<float > > tsteps_; ///< Vector of FED dynamic time steps
|
std::vector<std::vector<float > > tsteps_; ///< Vector of FED dynamic time steps
|
||||||
std::vector<int> nsteps_; ///< Vector of number of steps per cycle
|
std::vector<int> nsteps_; ///< Vector of number of steps per cycle
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/// Constructor
|
/// Constructor
|
||||||
KAZEFeatures(KAZEOptions& options);
|
KAZEFeatures(KAZEOptions& options);
|
||||||
|
|
||||||
/// Public methods for KAZE interface
|
/// Public methods for KAZE interface
|
||||||
void Allocate_Memory_Evolution(void);
|
void Allocate_Memory_Evolution(void);
|
||||||
int Create_Nonlinear_Scale_Space(const cv::Mat& img);
|
int Create_Nonlinear_Scale_Space(const cv::Mat& img);
|
||||||
void Feature_Detection(std::vector<cv::KeyPoint>& kpts);
|
void Feature_Detection(std::vector<cv::KeyPoint>& kpts);
|
||||||
void Feature_Description(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc);
|
void Feature_Description(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc);
|
||||||
static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options);
|
static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options);
|
||||||
|
|
||||||
/// Feature Detection Methods
|
/// Feature Detection Methods
|
||||||
void Compute_KContrast(const cv::Mat& img, const float& kper);
|
void Compute_KContrast(const cv::Mat& img, const float& kper);
|
||||||
void Compute_Multiscale_Derivatives(void);
|
void Compute_Multiscale_Derivatives(void);
|
||||||
void Compute_Detector_Response(void);
|
void Compute_Detector_Response(void);
|
||||||
void Determinant_Hessian(std::vector<cv::KeyPoint>& kpts);
|
void Determinant_Hessian(std::vector<cv::KeyPoint>& kpts);
|
||||||
void Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts);
|
void Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -645,38 +645,70 @@ static inline float getScale(int level, int firstLevel, double scaleFactor)
|
|||||||
return (float)std::pow(scaleFactor, (double)(level - firstLevel));
|
return (float)std::pow(scaleFactor, (double)(level - firstLevel));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Constructor
|
|
||||||
* @param detector_params parameters to use
|
|
||||||
*/
|
|
||||||
ORB::ORB(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold,
|
|
||||||
int _firstLevel, int _WTA_K, int _scoreType, int _patchSize, int _fastThreshold) :
|
|
||||||
nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels),
|
|
||||||
edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K),
|
|
||||||
scoreType(_scoreType), patchSize(_patchSize), fastThreshold(_fastThreshold)
|
|
||||||
{}
|
|
||||||
|
|
||||||
|
class ORB_Impl : public ORB
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit ORB_Impl(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold,
|
||||||
|
int _firstLevel, int _WTA_K, int _scoreType, int _patchSize, int _fastThreshold) :
|
||||||
|
nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels),
|
||||||
|
edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K),
|
||||||
|
scoreType(_scoreType), patchSize(_patchSize), fastThreshold(_fastThreshold)
|
||||||
|
{}
|
||||||
|
|
||||||
int ORB::descriptorSize() const
|
// returns the descriptor size in bytes
|
||||||
|
int descriptorSize() const;
|
||||||
|
// returns the descriptor type
|
||||||
|
int descriptorType() const;
|
||||||
|
// returns the default norm type
|
||||||
|
int defaultNorm() const;
|
||||||
|
|
||||||
|
// Compute the ORB_Impl features and descriptors on an image
|
||||||
|
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
||||||
|
|
||||||
|
// Compute the ORB_Impl features and descriptors on an image
|
||||||
|
void operator()( InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
||||||
|
OutputArray descriptors, bool useProvidedKeypoints=false ) const;
|
||||||
|
|
||||||
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
||||||
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
|
int nfeatures;
|
||||||
|
double scaleFactor;
|
||||||
|
int nlevels;
|
||||||
|
int edgeThreshold;
|
||||||
|
int firstLevel;
|
||||||
|
int WTA_K;
|
||||||
|
int scoreType;
|
||||||
|
int patchSize;
|
||||||
|
int fastThreshold;
|
||||||
|
};
|
||||||
|
|
||||||
|
int ORB_Impl::descriptorSize() const
|
||||||
{
|
{
|
||||||
return kBytes;
|
return kBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ORB::descriptorType() const
|
int ORB_Impl::descriptorType() const
|
||||||
{
|
{
|
||||||
return CV_8U;
|
return CV_8U;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ORB::defaultNorm() const
|
int ORB_Impl::defaultNorm() const
|
||||||
{
|
{
|
||||||
return NORM_HAMMING;
|
return NORM_HAMMING;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Compute the ORB features and descriptors on an image
|
/** Compute the ORB_Impl features and descriptors on an image
|
||||||
* @param img the image to compute the features and descriptors on
|
* @param img the image to compute the features and descriptors on
|
||||||
* @param mask the mask to apply
|
* @param mask the mask to apply
|
||||||
* @param keypoints the resulting keypoints
|
* @param keypoints the resulting keypoints
|
||||||
*/
|
*/
|
||||||
void ORB::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
|
void ORB_Impl::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
|
||||||
{
|
{
|
||||||
(*this)(image, mask, keypoints, noArray(), false);
|
(*this)(image, mask, keypoints, noArray(), false);
|
||||||
}
|
}
|
||||||
@ -716,7 +748,7 @@ static void uploadORBKeypoints(const std::vector<KeyPoint>& src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Compute the ORB keypoints on an image
|
/** Compute the ORB_Impl keypoints on an image
|
||||||
* @param image_pyramid the image pyramid to compute the features and descriptors on
|
* @param image_pyramid the image pyramid to compute the features and descriptors on
|
||||||
* @param mask_pyramid the masks to apply at every level
|
* @param mask_pyramid the masks to apply at every level
|
||||||
* @param keypoints the resulting keypoints, clustered per level
|
* @param keypoints the resulting keypoints, clustered per level
|
||||||
@ -788,7 +820,7 @@ static void computeKeyPoints(const Mat& imagePyramid,
|
|||||||
KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold);
|
KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold);
|
||||||
|
|
||||||
// Keep more points than necessary as FAST does not give amazing corners
|
// Keep more points than necessary as FAST does not give amazing corners
|
||||||
KeyPointsFilter::retainBest(keypoints, scoreType == ORB::HARRIS_SCORE ? 2 * featuresNum : featuresNum);
|
KeyPointsFilter::retainBest(keypoints, scoreType == ORB_Impl::HARRIS_SCORE ? 2 * featuresNum : featuresNum);
|
||||||
|
|
||||||
nkeypoints = (int)keypoints.size();
|
nkeypoints = (int)keypoints.size();
|
||||||
counters[level] = nkeypoints;
|
counters[level] = nkeypoints;
|
||||||
@ -814,7 +846,7 @@ static void computeKeyPoints(const Mat& imagePyramid,
|
|||||||
UMat ukeypoints, uresponses(1, nkeypoints, CV_32F);
|
UMat ukeypoints, uresponses(1, nkeypoints, CV_32F);
|
||||||
|
|
||||||
// Select best features using the Harris cornerness (better scoring than FAST)
|
// Select best features using the Harris cornerness (better scoring than FAST)
|
||||||
if( scoreType == ORB::HARRIS_SCORE )
|
if( scoreType == ORB_Impl::HARRIS_SCORE )
|
||||||
{
|
{
|
||||||
if( useOCL )
|
if( useOCL )
|
||||||
{
|
{
|
||||||
@ -886,7 +918,7 @@ static void computeKeyPoints(const Mat& imagePyramid,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Compute the ORB features and descriptors on an image
|
/** Compute the ORB_Impl features and descriptors on an image
|
||||||
* @param img the image to compute the features and descriptors on
|
* @param img the image to compute the features and descriptors on
|
||||||
* @param mask the mask to apply
|
* @param mask the mask to apply
|
||||||
* @param keypoints the resulting keypoints
|
* @param keypoints the resulting keypoints
|
||||||
@ -894,7 +926,7 @@ static void computeKeyPoints(const Mat& imagePyramid,
|
|||||||
* @param do_keypoints if true, the keypoints are computed, otherwise used as an input
|
* @param do_keypoints if true, the keypoints are computed, otherwise used as an input
|
||||||
* @param do_descriptors if true, also computes the descriptors
|
* @param do_descriptors if true, also computes the descriptors
|
||||||
*/
|
*/
|
||||||
void ORB::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
|
void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
|
||||||
OutputArray _descriptors, bool useProvidedKeypoints ) const
|
OutputArray _descriptors, bool useProvidedKeypoints ) const
|
||||||
{
|
{
|
||||||
CV_Assert(patchSize >= 2);
|
CV_Assert(patchSize >= 2);
|
||||||
@ -1121,12 +1153,12 @@ void ORB::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ORB::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
void ORB_Impl::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
|
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ORB::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
|
void ORB_Impl::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
|
||||||
{
|
{
|
||||||
(*this)(image, Mat(), keypoints, descriptors, true);
|
(*this)(image, Mat(), keypoints, descriptors, true);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user