Merge pull request #2202 from ilya-lavrenov:tapi_GFTTDetector
This commit is contained in:
commit
5894f82f14
@ -41,7 +41,7 @@ with an image set. ::
|
|||||||
* Group of methods to match descriptors from an image pair.
|
* Group of methods to match descriptors from an image pair.
|
||||||
*/
|
*/
|
||||||
void match( InputArray queryDescriptors, InputArray trainDescriptors,
|
void match( InputArray queryDescriptors, InputArray trainDescriptors,
|
||||||
vector<DMatch>& matches, InputArray mask=Mat() ) const;
|
vector<DMatch>& matches, InputArray mask=noArray() ) const;
|
||||||
void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,
|
void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,
|
||||||
vector<vector<DMatch> >& matches, int k,
|
vector<vector<DMatch> >& matches, int k,
|
||||||
InputArray mask=Mat(), bool compactResult=false ) const;
|
InputArray mask=Mat(), bool compactResult=false ) const;
|
||||||
@ -52,7 +52,7 @@ with an image set. ::
|
|||||||
* Group of methods to match descriptors from one image to an image set.
|
* Group of methods to match descriptors from one image to an image set.
|
||||||
*/
|
*/
|
||||||
void match( InputArray queryDescriptors, vector<DMatch>& matches,
|
void match( InputArray queryDescriptors, vector<DMatch>& matches,
|
||||||
const vector<Mat>& masks=vector<Mat>() );
|
const vector<Mat>& masks=noArray() );
|
||||||
void knnMatch( InputArray queryDescriptors, vector<vector<DMatch> >& matches,
|
void knnMatch( InputArray queryDescriptors, vector<vector<DMatch> >& matches,
|
||||||
int k, const vector<Mat>& masks=vector<Mat>(),
|
int k, const vector<Mat>& masks=vector<Mat>(),
|
||||||
bool compactResult=false );
|
bool compactResult=false );
|
||||||
@ -131,7 +131,7 @@ DescriptorMatcher::match
|
|||||||
----------------------------
|
----------------------------
|
||||||
Finds the best match for each descriptor from a query set.
|
Finds the best match for each descriptor from a query set.
|
||||||
|
|
||||||
.. ocv:function:: void DescriptorMatcher::match( InputArray queryDescriptors, InputArray trainDescriptors, vector<DMatch>& matches, InputArray mask=Mat() ) const
|
.. ocv:function:: void DescriptorMatcher::match( InputArray queryDescriptors, InputArray trainDescriptors, vector<DMatch>& matches, InputArray mask=noArray() ) const
|
||||||
|
|
||||||
.. ocv:function:: void DescriptorMatcher::match(InputArray queryDescriptors, vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() )
|
.. ocv:function:: void DescriptorMatcher::match(InputArray queryDescriptors, vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() )
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ DescriptorMatcher::knnMatch
|
|||||||
-------------------------------
|
-------------------------------
|
||||||
Finds the k best matches for each descriptor from a query set.
|
Finds the k best matches for each descriptor from a query set.
|
||||||
|
|
||||||
.. ocv:function:: void DescriptorMatcher::knnMatch(InputArray queryDescriptors, InputArray trainDescriptors, vector<vector<DMatch> >& matches, int k, InputArray mask=Mat(), bool compactResult=false ) const
|
.. ocv:function:: void DescriptorMatcher::knnMatch(InputArray queryDescriptors, InputArray trainDescriptors, vector<vector<DMatch> >& matches, int k, InputArray mask=noArray(), bool compactResult=false ) const
|
||||||
|
|
||||||
.. ocv:function:: void DescriptorMatcher::knnMatch( InputArray queryDescriptors, vector<vector<DMatch> >& matches, int k, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
|
.. ocv:function:: void DescriptorMatcher::knnMatch( InputArray queryDescriptors, vector<vector<DMatch> >& matches, int k, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
|
||||||
|
|
||||||
@ -179,7 +179,7 @@ DescriptorMatcher::radiusMatch
|
|||||||
----------------------------------
|
----------------------------------
|
||||||
For each query descriptor, finds the training descriptors not farther than the specified distance.
|
For each query descriptor, finds the training descriptors not farther than the specified distance.
|
||||||
|
|
||||||
.. ocv:function:: void DescriptorMatcher::radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors, vector<vector<DMatch> >& matches, float maxDistance, InputArray mask=Mat(), bool compactResult=false ) const
|
.. ocv:function:: void DescriptorMatcher::radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors, vector<vector<DMatch> >& matches, float maxDistance, InputArray mask=noArray(), bool compactResult=false ) const
|
||||||
|
|
||||||
.. ocv:function:: void DescriptorMatcher::radiusMatch( InputArray queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
|
.. ocv:function:: void DescriptorMatcher::radiusMatch( InputArray queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
|
||||||
|
|
||||||
|
@ -23,12 +23,12 @@ Abstract base class for 2D image feature detectors. ::
|
|||||||
public:
|
public:
|
||||||
virtual ~FeatureDetector();
|
virtual ~FeatureDetector();
|
||||||
|
|
||||||
void detect( const Mat& image, vector<KeyPoint>& keypoints,
|
void detect( InputArray image, vector<KeyPoint>& keypoints,
|
||||||
const Mat& mask=Mat() ) const;
|
InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
void detect( const vector<Mat>& images,
|
void detect( InputArrayOfArrays images,
|
||||||
vector<vector<KeyPoint> >& keypoints,
|
vector<vector<KeyPoint> >& keypoints,
|
||||||
const vector<Mat>& masks=vector<Mat>() ) const;
|
InputArrayOfArrays masks=noArray() ) const;
|
||||||
|
|
||||||
virtual void read(const FileNode&);
|
virtual void read(const FileNode&);
|
||||||
virtual void write(FileStorage&) const;
|
virtual void write(FileStorage&) const;
|
||||||
@ -43,9 +43,9 @@ FeatureDetector::detect
|
|||||||
---------------------------
|
---------------------------
|
||||||
Detects keypoints in an image (first variant) or image set (second variant).
|
Detects keypoints in an image (first variant) or image set (second variant).
|
||||||
|
|
||||||
.. ocv:function:: void FeatureDetector::detect( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const
|
.. ocv:function:: void FeatureDetector::detect( InputArray image, vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const
|
||||||
|
|
||||||
.. ocv:function:: void FeatureDetector::detect( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, const vector<Mat>& masks=vector<Mat>() ) const
|
.. ocv:function:: void FeatureDetector::detect( InputArrayOfArrays images, vector<vector<KeyPoint> >& keypoints, InputArrayOfArrays masks=noArray() ) const
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.FeatureDetector_create.detect(image[, mask]) -> keypoints
|
.. ocv:pyfunction:: cv2.FeatureDetector_create.detect(image[, mask]) -> keypoints
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ public:
|
|||||||
* mask Mask specifying where to look for keypoints (optional). Must be a char
|
* mask Mask specifying where to look for keypoints (optional). Must be a char
|
||||||
* matrix with non-zero values in the region of interest.
|
* matrix with non-zero values in the region of interest.
|
||||||
*/
|
*/
|
||||||
CV_WRAP void detect( const Mat& image, CV_OUT std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
CV_WRAP void detect( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detect keypoints in an image set.
|
* Detect keypoints in an image set.
|
||||||
@ -116,7 +116,7 @@ public:
|
|||||||
* keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i].
|
* keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i].
|
||||||
* masks Masks for image set. masks[i] is a mask for images[i].
|
* masks Masks for image set. masks[i] is a mask for images[i].
|
||||||
*/
|
*/
|
||||||
void detect( const std::vector<Mat>& images, std::vector<std::vector<KeyPoint> >& keypoints, const std::vector<Mat>& masks=std::vector<Mat>() ) const;
|
void detect( InputArrayOfArrays images, std::vector<std::vector<KeyPoint> >& keypoints, InputArrayOfArrays masks=noArray() ) const;
|
||||||
|
|
||||||
// Return true if detector object is empty
|
// Return true if detector object is empty
|
||||||
CV_WRAP virtual bool empty() const;
|
CV_WRAP virtual bool empty() const;
|
||||||
@ -125,14 +125,14 @@ public:
|
|||||||
CV_WRAP static Ptr<FeatureDetector> create( const String& detectorType );
|
CV_WRAP static Ptr<FeatureDetector> create( const String& detectorType );
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const = 0;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove keypoints that are not in the mask.
|
* Remove keypoints that are not in the mask.
|
||||||
* Helper function, useful when wrapping a library call for keypoint detection that
|
* Helper function, useful when wrapping a library call for keypoint detection that
|
||||||
* does not support a mask argument.
|
* does not support a mask argument.
|
||||||
*/
|
*/
|
||||||
static void removeInvalidPoints( const Mat& mask, std::vector<KeyPoint>& keypoints );
|
static void removeInvalidPoints( const Mat & mask, std::vector<KeyPoint>& keypoints );
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -253,7 +253,7 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
|
|
||||||
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
||||||
void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
||||||
@ -338,7 +338,7 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
|
|
||||||
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
CV_PROP_RW int nfeatures;
|
CV_PROP_RW int nfeatures;
|
||||||
CV_PROP_RW double scaleFactor;
|
CV_PROP_RW double scaleFactor;
|
||||||
@ -470,7 +470,7 @@ public:
|
|||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
int delta;
|
int delta;
|
||||||
int minArea;
|
int minArea;
|
||||||
@ -506,7 +506,7 @@ public:
|
|||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
int maxSize;
|
int maxSize;
|
||||||
int responseThreshold;
|
int responseThreshold;
|
||||||
@ -535,7 +535,7 @@ public:
|
|||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
int threshold;
|
int threshold;
|
||||||
bool nonmaxSuppression;
|
bool nonmaxSuppression;
|
||||||
@ -551,7 +551,7 @@ public:
|
|||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
int nfeatures;
|
int nfeatures;
|
||||||
double qualityLevel;
|
double qualityLevel;
|
||||||
@ -608,7 +608,7 @@ protected:
|
|||||||
double confidence;
|
double confidence;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
virtual void findBlobs(const Mat &image, const Mat &binaryImage, std::vector<Center> ¢ers) const;
|
virtual void findBlobs(const Mat &image, const Mat &binaryImage, std::vector<Center> ¢ers) const;
|
||||||
|
|
||||||
Params params;
|
Params params;
|
||||||
@ -627,7 +627,7 @@ public:
|
|||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
double initFeatureScale;
|
double initFeatureScale;
|
||||||
int featureScaleLevels;
|
int featureScaleLevels;
|
||||||
@ -664,7 +664,7 @@ public:
|
|||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
Ptr<FeatureDetector> detector;
|
Ptr<FeatureDetector> detector;
|
||||||
int maxTotalKeypoints;
|
int maxTotalKeypoints;
|
||||||
@ -686,7 +686,7 @@ public:
|
|||||||
virtual bool empty() const;
|
virtual bool empty() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
Ptr<FeatureDetector> detector;
|
Ptr<FeatureDetector> detector;
|
||||||
int maxLevel;
|
int maxLevel;
|
||||||
@ -747,7 +747,7 @@ public:
|
|||||||
virtual bool empty() const;
|
virtual bool empty() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&);
|
DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&);
|
||||||
@ -776,7 +776,7 @@ public:
|
|||||||
virtual Ptr<AdjusterAdapter> clone() const;
|
virtual Ptr<AdjusterAdapter> clone() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
int thresh_;
|
int thresh_;
|
||||||
bool nonmax_;
|
bool nonmax_;
|
||||||
@ -799,7 +799,7 @@ public:
|
|||||||
virtual Ptr<AdjusterAdapter> clone() const;
|
virtual Ptr<AdjusterAdapter> clone() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
double thresh_, init_thresh_, min_thresh_, max_thresh_;
|
double thresh_, init_thresh_, min_thresh_, max_thresh_;
|
||||||
};
|
};
|
||||||
@ -816,7 +816,7 @@ public:
|
|||||||
virtual Ptr<AdjusterAdapter> clone() const;
|
virtual Ptr<AdjusterAdapter> clone() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||||
|
|
||||||
double thresh_, init_thresh_, min_thresh_, max_thresh_;
|
double thresh_, init_thresh_, min_thresh_, max_thresh_;
|
||||||
};
|
};
|
||||||
@ -1035,29 +1035,29 @@ public:
|
|||||||
*/
|
*/
|
||||||
// Find one best match for each query descriptor (if mask is empty).
|
// Find one best match for each query descriptor (if mask is empty).
|
||||||
CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors,
|
CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors,
|
||||||
CV_OUT std::vector<DMatch>& matches, InputArray mask=Mat() ) const;
|
CV_OUT std::vector<DMatch>& matches, InputArray mask=noArray() ) const;
|
||||||
// Find k best matches for each query descriptor (in increasing order of distances).
|
// Find k best matches for each query descriptor (in increasing order of distances).
|
||||||
// compactResult is used when mask is not empty. If compactResult is false matches
|
// compactResult is used when mask is not empty. If compactResult is false matches
|
||||||
// vector will have the same size as queryDescriptors rows. If compactResult is true
|
// vector will have the same size as queryDescriptors rows. If compactResult is true
|
||||||
// matches vector will not contain matches for fully masked out query descriptors.
|
// matches vector will not contain matches for fully masked out query descriptors.
|
||||||
CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,
|
CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,
|
||||||
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
|
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
|
||||||
InputArray mask=Mat(), bool compactResult=false ) const;
|
InputArray mask=noArray(), bool compactResult=false ) const;
|
||||||
// Find best matches for each query descriptor which have distance less than
|
// Find best matches for each query descriptor which have distance less than
|
||||||
// maxDistance (in increasing order of distances).
|
// maxDistance (in increasing order of distances).
|
||||||
void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors,
|
void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors,
|
||||||
std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
||||||
InputArray mask=Mat(), bool compactResult=false ) const;
|
InputArray mask=noArray(), bool compactResult=false ) const;
|
||||||
/*
|
/*
|
||||||
* Group of methods to match descriptors from one image to image set.
|
* Group of methods to match descriptors from one image to image set.
|
||||||
* See description of similar methods for matching image pair above.
|
* See description of similar methods for matching image pair above.
|
||||||
*/
|
*/
|
||||||
CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches,
|
CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches,
|
||||||
const std::vector<Mat>& masks=std::vector<Mat>() );
|
const std::vector<Mat>& masks=std::vector<Mat>() );
|
||||||
CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
|
CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
|
||||||
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
|
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
|
||||||
void radiusMatch( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
void radiusMatch( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
||||||
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
|
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
|
||||||
|
|
||||||
// Reads matcher object from a file node
|
// Reads matcher object from a file node
|
||||||
virtual void read( const FileNode& );
|
virtual void read( const FileNode& );
|
||||||
@ -1102,9 +1102,9 @@ protected:
|
|||||||
// that the class object has been trained already. Public match methods call these methods
|
// that the class object has been trained already. Public match methods call these methods
|
||||||
// after calling train().
|
// after calling train().
|
||||||
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
|
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
|
||||||
InputArrayOfArrays masks=std::vector<Mat>(), bool compactResult=false ) = 0;
|
InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;
|
||||||
virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
||||||
InputArrayOfArrays masks=std::vector<Mat>(), bool compactResult=false ) = 0;
|
InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;
|
||||||
|
|
||||||
static bool isPossibleMatch( const Mat& mask, int queryIdx, int trainIdx );
|
static bool isPossibleMatch( const Mat& mask, int queryIdx, int trainIdx );
|
||||||
static bool isMaskedOut( const std::vector<Mat>& masks, int queryIdx );
|
static bool isMaskedOut( const std::vector<Mat>& masks, int queryIdx );
|
||||||
@ -1139,15 +1139,9 @@ public:
|
|||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
protected:
|
protected:
|
||||||
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
|
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
|
||||||
InputArrayOfArrays masks=std::vector<Mat>(), bool compactResult=false );
|
InputArrayOfArrays masks=noArray(), bool compactResult=false );
|
||||||
virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
||||||
InputArrayOfArrays masks=std::vector<Mat>(), bool compactResult=false );
|
InputArrayOfArrays masks=noArray(), bool compactResult=false );
|
||||||
|
|
||||||
bool ocl_knnMatch(InputArray query, InputArray train, std::vector< std::vector<DMatch> > &matches,
|
|
||||||
int k, int dstType, bool compactResult=false);
|
|
||||||
bool ocl_radiusMatch(InputArray query, InputArray train, std::vector< std::vector<DMatch> > &matches,
|
|
||||||
float maxDistance, int dstType, bool compactResult=false);
|
|
||||||
bool ocl_match(InputArray query, InputArray train, std::vector< std::vector<DMatch> > &matches, int dstType);
|
|
||||||
|
|
||||||
int normType;
|
int normType;
|
||||||
bool crossCheck;
|
bool crossCheck;
|
||||||
@ -1183,9 +1177,9 @@ protected:
|
|||||||
std::vector<std::vector<DMatch> >& matches );
|
std::vector<std::vector<DMatch> >& matches );
|
||||||
|
|
||||||
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
|
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
|
||||||
InputArrayOfArrays masks=std::vector<Mat>(), bool compactResult=false );
|
InputArrayOfArrays masks=noArray(), bool compactResult=false );
|
||||||
virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
|
||||||
InputArrayOfArrays masks=std::vector<Mat>(), bool compactResult=false );
|
InputArrayOfArrays masks=noArray(), bool compactResult=false );
|
||||||
|
|
||||||
Ptr<flann::IndexParams> indexParams;
|
Ptr<flann::IndexParams> indexParams;
|
||||||
Ptr<flann::SearchParams> searchParams;
|
Ptr<flann::SearchParams> searchParams;
|
||||||
|
@ -276,7 +276,7 @@ void SimpleBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryIm
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
|
void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray) const
|
||||||
{
|
{
|
||||||
//TODO: support mask
|
//TODO: support mask
|
||||||
keypoints.clear();
|
keypoints.clear();
|
||||||
@ -284,7 +284,7 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
|
|||||||
if (image.channels() == 3)
|
if (image.channels() == 3)
|
||||||
cvtColor(image, grayscaleImage, COLOR_BGR2GRAY);
|
cvtColor(image, grayscaleImage, COLOR_BGR2GRAY);
|
||||||
else
|
else
|
||||||
grayscaleImage = image;
|
grayscaleImage = image.getMat();
|
||||||
|
|
||||||
std::vector < std::vector<Center> > centers;
|
std::vector < std::vector<Center> > centers;
|
||||||
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
|
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
|
||||||
@ -292,20 +292,11 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
|
|||||||
Mat binarizedImage;
|
Mat binarizedImage;
|
||||||
threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);
|
threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);
|
||||||
|
|
||||||
#ifdef DEBUG_BLOB_DETECTOR
|
|
||||||
// Mat keypointsImage;
|
|
||||||
// cvtColor( binarizedImage, keypointsImage, CV_GRAY2RGB );
|
|
||||||
#endif
|
|
||||||
|
|
||||||
std::vector < Center > curCenters;
|
std::vector < Center > curCenters;
|
||||||
findBlobs(grayscaleImage, binarizedImage, curCenters);
|
findBlobs(grayscaleImage, binarizedImage, curCenters);
|
||||||
std::vector < std::vector<Center> > newCenters;
|
std::vector < std::vector<Center> > newCenters;
|
||||||
for (size_t i = 0; i < curCenters.size(); i++)
|
for (size_t i = 0; i < curCenters.size(); i++)
|
||||||
{
|
{
|
||||||
#ifdef DEBUG_BLOB_DETECTOR
|
|
||||||
// circle(keypointsImage, curCenters[i].location, curCenters[i].radius, Scalar(0,0,255),-1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool isNew = true;
|
bool isNew = true;
|
||||||
for (size_t j = 0; j < centers.size(); j++)
|
for (size_t j = 0; j < centers.size(); j++)
|
||||||
{
|
{
|
||||||
@ -327,17 +318,9 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (isNew)
|
if (isNew)
|
||||||
{
|
|
||||||
newCenters.push_back(std::vector<Center> (1, curCenters[i]));
|
newCenters.push_back(std::vector<Center> (1, curCenters[i]));
|
||||||
//centers.push_back(std::vector<Center> (1, curCenters[i]));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
|
std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
|
||||||
|
|
||||||
#ifdef DEBUG_BLOB_DETECTOR
|
|
||||||
// imshow("binarized", keypointsImage );
|
|
||||||
//waitKey();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < centers.size(); i++)
|
for (size_t i = 0; i < centers.size(); i++)
|
||||||
@ -355,16 +338,4 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
|
|||||||
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
|
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
|
||||||
keypoints.push_back(kpt);
|
keypoints.push_back(kpt);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef DEBUG_BLOB_DETECTOR
|
|
||||||
namedWindow("keypoints", CV_WINDOW_NORMAL);
|
|
||||||
Mat outImg = image.clone();
|
|
||||||
for(size_t i=0; i<keypoints.size(); i++)
|
|
||||||
{
|
|
||||||
circle(outImg, keypoints[i].pt, keypoints[i].size, Scalar(255, 0, 255), -1);
|
|
||||||
}
|
|
||||||
//drawKeypoints(image, keypoints, outImg);
|
|
||||||
imshow("keypoints", outImg);
|
|
||||||
waitKey();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
@ -751,9 +751,9 @@ BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::v
|
|||||||
|
|
||||||
|
|
||||||
void
|
void
|
||||||
BRISK::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
BRISK::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
(*this)(image, mask, keypoints);
|
(*this)(image.getMat(), mask.getMat(), keypoints);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -2229,7 +2229,7 @@ BriskLayer::halfsample(const cv::Mat& srcimg, cv::Mat& dstimg)
|
|||||||
CV_Assert(srcimg.cols / 2 == dstimg.cols);
|
CV_Assert(srcimg.cols / 2 == dstimg.cols);
|
||||||
CV_Assert(srcimg.rows / 2 == dstimg.rows);
|
CV_Assert(srcimg.rows / 2 == dstimg.rows);
|
||||||
|
|
||||||
// handle non-SSE case
|
// handle non-SSE case
|
||||||
resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA);
|
resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ namespace cv
|
|||||||
FeatureDetector::~FeatureDetector()
|
FeatureDetector::~FeatureDetector()
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void FeatureDetector::detect( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void FeatureDetector::detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask ) const
|
||||||
{
|
{
|
||||||
keypoints.clear();
|
keypoints.clear();
|
||||||
|
|
||||||
@ -63,11 +63,29 @@ void FeatureDetector::detect( const Mat& image, std::vector<KeyPoint>& keypoints
|
|||||||
detectImpl( image, keypoints, mask );
|
detectImpl( image, keypoints, mask );
|
||||||
}
|
}
|
||||||
|
|
||||||
void FeatureDetector::detect(const std::vector<Mat>& imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, const std::vector<Mat>& masks ) const
|
void FeatureDetector::detect(InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection,
|
||||||
|
InputArrayOfArrays _masks ) const
|
||||||
{
|
{
|
||||||
|
if (_imageCollection.isUMatVector())
|
||||||
|
{
|
||||||
|
std::vector<UMat> uimageCollection, umasks;
|
||||||
|
_imageCollection.getUMatVector(uimageCollection);
|
||||||
|
_masks.getUMatVector(umasks);
|
||||||
|
|
||||||
|
pointCollection.resize( uimageCollection.size() );
|
||||||
|
for( size_t i = 0; i < uimageCollection.size(); i++ )
|
||||||
|
detect( uimageCollection[i], pointCollection[i], umasks.empty() ? noArray() : umasks[i] );
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<Mat> imageCollection, masks;
|
||||||
|
_imageCollection.getMatVector(imageCollection);
|
||||||
|
_masks.getMatVector(masks);
|
||||||
|
|
||||||
pointCollection.resize( imageCollection.size() );
|
pointCollection.resize( imageCollection.size() );
|
||||||
for( size_t i = 0; i < imageCollection.size(); i++ )
|
for( size_t i = 0; i < imageCollection.size(); i++ )
|
||||||
detect( imageCollection[i], pointCollection[i], masks.empty() ? Mat() : masks[i] );
|
detect( imageCollection[i], pointCollection[i], masks.empty() ? noArray() : masks[i] );
|
||||||
}
|
}
|
||||||
|
|
||||||
/*void FeatureDetector::read( const FileNode& )
|
/*void FeatureDetector::read( const FileNode& )
|
||||||
@ -125,21 +143,37 @@ GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel,
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void GFTTDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
|
||||||
{
|
{
|
||||||
Mat grayImage = image;
|
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
|
||||||
|
|
||||||
std::vector<Point2f> corners;
|
std::vector<Point2f> corners;
|
||||||
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, mask,
|
|
||||||
blockSize, useHarrisDetector, k );
|
if (_image.isUMat())
|
||||||
|
{
|
||||||
|
UMat ugrayImage;
|
||||||
|
if( _image.type() != CV_8U )
|
||||||
|
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
|
||||||
|
else
|
||||||
|
ugrayImage = _image.getUMat();
|
||||||
|
|
||||||
|
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
|
||||||
|
blockSize, useHarrisDetector, k );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Mat image = _image.getMat(), grayImage = image;
|
||||||
|
if( image.type() != CV_8U )
|
||||||
|
cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
||||||
|
|
||||||
|
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
|
||||||
|
blockSize, useHarrisDetector, k );
|
||||||
|
}
|
||||||
|
|
||||||
keypoints.resize(corners.size());
|
keypoints.resize(corners.size());
|
||||||
std::vector<Point2f>::const_iterator corner_it = corners.begin();
|
std::vector<Point2f>::const_iterator corner_it = corners.begin();
|
||||||
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
|
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
|
||||||
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
|
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
|
||||||
{
|
|
||||||
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
|
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -157,8 +191,10 @@ DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featur
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
|
|
||||||
void DenseFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void DenseFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
|
||||||
{
|
{
|
||||||
|
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||||
|
|
||||||
float curScale = static_cast<float>(initFeatureScale);
|
float curScale = static_cast<float>(initFeatureScale);
|
||||||
int curStep = initXyStep;
|
int curStep = initXyStep;
|
||||||
int curBound = initImgBound;
|
int curBound = initImgBound;
|
||||||
@ -271,9 +307,9 @@ public:
|
|||||||
};
|
};
|
||||||
} // namepace
|
} // namepace
|
||||||
|
|
||||||
void GridAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void GridAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
|
||||||
{
|
{
|
||||||
if (image.empty() || maxTotalKeypoints < gridRows * gridCols)
|
if (_image.empty() || maxTotalKeypoints < gridRows * gridCols)
|
||||||
{
|
{
|
||||||
keypoints.clear();
|
keypoints.clear();
|
||||||
return;
|
return;
|
||||||
@ -281,6 +317,8 @@ void GridAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPo
|
|||||||
keypoints.reserve(maxTotalKeypoints);
|
keypoints.reserve(maxTotalKeypoints);
|
||||||
int maxPerCell = maxTotalKeypoints / (gridRows * gridCols);
|
int maxPerCell = maxTotalKeypoints / (gridRows * gridCols);
|
||||||
|
|
||||||
|
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||||
|
|
||||||
cv::Mutex kptLock;
|
cv::Mutex kptLock;
|
||||||
cv::parallel_for_(cv::Range(0, gridRows * gridCols),
|
cv::parallel_for_(cv::Range(0, gridRows * gridCols),
|
||||||
GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols, &kptLock));
|
GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols, &kptLock));
|
||||||
@ -298,8 +336,9 @@ bool PyramidAdaptedFeatureDetector::empty() const
|
|||||||
return !detector || detector->empty();
|
return !detector || detector->empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
void PyramidAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void PyramidAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
|
||||||
{
|
{
|
||||||
|
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||||
Mat src = image;
|
Mat src = image;
|
||||||
Mat src_mask = mask;
|
Mat src_mask = mask;
|
||||||
|
|
||||||
|
@ -54,8 +54,10 @@ bool DynamicAdaptedFeatureDetector::empty() const
|
|||||||
return !adjuster_ || adjuster_->empty();
|
return !adjuster_ || adjuster_->empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DynamicAdaptedFeatureDetector::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
void DynamicAdaptedFeatureDetector::detectImpl(InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
|
||||||
{
|
{
|
||||||
|
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||||
|
|
||||||
//for oscillation testing
|
//for oscillation testing
|
||||||
bool down = false;
|
bool down = false;
|
||||||
bool up = false;
|
bool up = false;
|
||||||
@ -98,7 +100,7 @@ FastAdjuster::FastAdjuster( int init_thresh, bool nonmax, int min_thresh, int ma
|
|||||||
min_thresh_(min_thresh), max_thresh_(max_thresh)
|
min_thresh_(min_thresh), max_thresh_(max_thresh)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void FastAdjuster::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
void FastAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
FastFeatureDetector(thresh_, nonmax_).detect(image, keypoints, mask);
|
FastFeatureDetector(thresh_, nonmax_).detect(image, keypoints, mask);
|
||||||
}
|
}
|
||||||
@ -133,7 +135,7 @@ StarAdjuster::StarAdjuster(double initial_thresh, double min_thresh, double max_
|
|||||||
min_thresh_(min_thresh), max_thresh_(max_thresh)
|
min_thresh_(min_thresh), max_thresh_(max_thresh)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void StarAdjuster::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
void StarAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
StarFeatureDetector detector_tmp(16, cvRound(thresh_), 10, 8, 3);
|
StarFeatureDetector detector_tmp(16, cvRound(thresh_), 10, 8, 3);
|
||||||
detector_tmp.detect(image, keypoints, mask);
|
detector_tmp.detect(image, keypoints, mask);
|
||||||
@ -167,7 +169,7 @@ SurfAdjuster::SurfAdjuster( double initial_thresh, double min_thresh, double max
|
|||||||
min_thresh_(min_thresh), max_thresh_(max_thresh)
|
min_thresh_(min_thresh), max_thresh_(max_thresh)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void SurfAdjuster::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const cv::Mat& mask) const
|
void SurfAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
Ptr<FeatureDetector> surf = FeatureDetector::create("SURF");
|
Ptr<FeatureDetector> surf = FeatureDetector::create("SURF");
|
||||||
surf->set("hessianThreshold", thresh_);
|
surf->set("hessianThreshold", thresh_);
|
||||||
|
@ -283,10 +283,11 @@ FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppressio
|
|||||||
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
|
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void FastFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void FastFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
|
||||||
{
|
{
|
||||||
Mat grayImage = image;
|
Mat image = _image.getMat(), mask = _mask.getMat(), grayImage = image;
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
if( image.type() != CV_8U )
|
||||||
|
cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
||||||
FAST( grayImage, keypoints, threshold, nonmaxSuppression, type );
|
FAST( grayImage, keypoints, threshold, nonmaxSuppression, type );
|
||||||
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
||||||
}
|
}
|
||||||
|
@ -891,21 +891,25 @@ Ptr<DescriptorMatcher> BFMatcher::clone( bool emptyTrainData ) const
|
|||||||
return matcher;
|
return matcher;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BFMatcher::ocl_match(InputArray query, InputArray _train, std::vector< std::vector<DMatch> > &matches, int dstType)
|
static bool ocl_match(InputArray query, InputArray _train, std::vector< std::vector<DMatch> > &matches, int dstType)
|
||||||
{
|
{
|
||||||
UMat trainIdx, distance;
|
UMat trainIdx, distance;
|
||||||
if(!ocl_matchSingle(query, _train, trainIdx, distance, dstType)) return false;
|
if (!ocl_matchSingle(query, _train, trainIdx, distance, dstType))
|
||||||
if(!ocl_matchDownload(trainIdx, distance, matches)) return false;
|
return false;
|
||||||
|
if (!ocl_matchDownload(trainIdx, distance, matches))
|
||||||
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BFMatcher::ocl_knnMatch(InputArray query, InputArray _train, std::vector< std::vector<DMatch> > &matches, int k, int dstType, bool compactResult)
|
static bool ocl_knnMatch(InputArray query, InputArray _train, std::vector< std::vector<DMatch> > &matches, int k, int dstType, bool compactResult)
|
||||||
{
|
{
|
||||||
UMat trainIdx, distance;
|
UMat trainIdx, distance;
|
||||||
if (k != 2)
|
if (k != 2)
|
||||||
return false;
|
return false;
|
||||||
if (!ocl_knnMatchSingle(query, _train, trainIdx, distance, dstType)) return false;
|
if (!ocl_knnMatchSingle(query, _train, trainIdx, distance, dstType))
|
||||||
if( !ocl_knnMatchDownload(trainIdx, distance, matches, compactResult) ) return false;
|
return false;
|
||||||
|
if (!ocl_knnMatchDownload(trainIdx, distance, matches, compactResult) )
|
||||||
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1033,12 +1037,14 @@ void BFMatcher::knnMatchImpl( InputArray _queryDescriptors, std::vector<std::vec
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BFMatcher::ocl_radiusMatch(InputArray query, InputArray _train, std::vector< std::vector<DMatch> > &matches,
|
static bool ocl_radiusMatch(InputArray query, InputArray _train, std::vector< std::vector<DMatch> > &matches,
|
||||||
float maxDistance, int dstType, bool compactResult)
|
float maxDistance, int dstType, bool compactResult)
|
||||||
{
|
{
|
||||||
UMat trainIdx, distance, nMatches;
|
UMat trainIdx, distance, nMatches;
|
||||||
if(!ocl_radiusMatchSingle(query, _train, trainIdx, distance, nMatches, maxDistance, dstType)) return false;
|
if (!ocl_radiusMatchSingle(query, _train, trainIdx, distance, nMatches, maxDistance, dstType))
|
||||||
if(!ocl_radiusMatchDownload(trainIdx, distance, nMatches, matches, compactResult)) return false;
|
return false;
|
||||||
|
if (!ocl_radiusMatchDownload(trainIdx, distance, nMatches, matches, compactResult))
|
||||||
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1076,14 +1082,14 @@ void BFMatcher::radiusMatchImpl( InputArray _queryDescriptors, std::vector<std::
|
|||||||
_queryDescriptors.type() == CV_32FC1 && _queryDescriptors.offset() == 0 && trainDescOffset == 0 &&
|
_queryDescriptors.type() == CV_32FC1 && _queryDescriptors.offset() == 0 && trainDescOffset == 0 &&
|
||||||
trainDescSize.width == _queryDescriptors.size().width && masks.size() == 1 && masks[0].total() == 0 )
|
trainDescSize.width == _queryDescriptors.size().width && masks.size() == 1 && masks[0].total() == 0 )
|
||||||
{
|
{
|
||||||
if(trainDescCollection.empty())
|
if (trainDescCollection.empty())
|
||||||
{
|
{
|
||||||
if(ocl_radiusMatch(_queryDescriptors, utrainDescCollection[0], matches, maxDistance, normType, compactResult) )
|
if(ocl_radiusMatch(_queryDescriptors, utrainDescCollection[0], matches, maxDistance, normType, compactResult) )
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if(ocl_radiusMatch(_queryDescriptors, trainDescCollection[0], matches, maxDistance, normType, compactResult) )
|
if (ocl_radiusMatch(_queryDescriptors, trainDescCollection[0], matches, maxDistance, normType, compactResult) )
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1284,8 +1284,9 @@ void MSER::operator()( const Mat& image, std::vector<std::vector<Point> >& dstco
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void MserFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void MserFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
|
||||||
{
|
{
|
||||||
|
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||||
std::vector<std::vector<Point> > msers;
|
std::vector<std::vector<Point> > msers;
|
||||||
|
|
||||||
(*this)(image, msers, mask);
|
(*this)(image, msers, mask);
|
||||||
|
@ -943,9 +943,9 @@ void ORB::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ORB::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
void ORB::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
(*this)(image, mask, keypoints, noArray(), false);
|
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ORB::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
|
void ORB::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
|
||||||
|
@ -426,9 +426,9 @@ StarDetector::StarDetector(int _maxSize, int _responseThreshold,
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
|
|
||||||
void StarDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void StarDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
|
||||||
{
|
{
|
||||||
Mat grayImage = image;
|
Mat image = _image.getMat(), mask = _mask.getMat(), grayImage = image;
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
|
||||||
|
|
||||||
(*this)(grayImage, keypoints);
|
(*this)(grayImage, keypoints);
|
||||||
|
@ -87,7 +87,7 @@ public:
|
|||||||
std::vector<KeyPoint>& keypoints ) const;
|
std::vector<KeyPoint>& keypoints ) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask = Mat() ) const;
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
|
||||||
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
|
|
||||||
CV_PROP_RW int nfeatures;
|
CV_PROP_RW int nfeatures;
|
||||||
@ -143,7 +143,7 @@ public:
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask = Mat() ) const;
|
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
|
||||||
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -818,9 +818,9 @@ void SIFT::operator()(InputArray _image, InputArray _mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SIFT::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
void SIFT::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
(*this)(image, mask, keypoints, noArray());
|
(*this)(image.getMat(), mask.getMat(), keypoints, noArray());
|
||||||
}
|
}
|
||||||
|
|
||||||
void SIFT::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
|
void SIFT::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
|
||||||
|
@ -979,9 +979,9 @@ void SURF::operator()(InputArray _img, InputArray _mask,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SURF::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
|
void SURF::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||||
{
|
{
|
||||||
(*this)(image, mask, keypoints, noArray(), false);
|
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SURF::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
|
void SURF::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
|
||||||
|
Loading…
x
Reference in New Issue
Block a user