Merge pull request #3078 from vpisarev:refactor_features2d_take3

This commit is contained in:
Vadim Pisarevsky 2014-08-12 13:56:17 +00:00
commit 5505aa28ce
74 changed files with 152 additions and 16967 deletions

View File

@ -33,7 +33,7 @@ if(BUILD_DOCS AND HAVE_SPHINX)
endif()
endforeach()
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching nonfree contrib legacy)
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching)
list(REMOVE_ITEM BASE_MODULES ${FIXED_ORDER_MODULES})

View File

@ -23,7 +23,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
@ -32,9 +32,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
@ -50,25 +51,19 @@ This tutorial code's is shown lines below. You can also download it from `here <
if( !img_1.data || !img_2.data )
{ return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create();
detector->setMinHessian(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
detector->detectAndCompute( img_1, keypoints_1, descriptors_1 );
detector->detectAndCompute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
//-- Step 2: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );

View File

@ -22,7 +22,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
@ -30,11 +30,11 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
@ -53,12 +53,12 @@ This tutorial code's is shown lines below. You can also download it from `here <
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
detector->detect( img_1, keypoints_1 );
detector->detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;

View File

@ -19,10 +19,116 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
/**
* @file SURF_FlannMatcher
* @brief SURF detector + descriptor + FLANN Matcher
* @author A. Huaman
*/
#include <stdio.h>
#include <iostream>
#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
/**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
.. literalinclude:: ../../../../samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp
:language: cpp
Explanation
============

View File

@ -20,7 +20,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
@ -30,9 +30,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();

View File

@ -3050,13 +3050,13 @@ The class provides the following features for all derived classes:
Here is example of SIFT use in your application via Algorithm interface: ::
#include "opencv2/opencv.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv::xfeatures2d;
...
initModule_nonfree(); // to load SURF/SIFT etc.
Ptr<Feature2D> sift = Algorithm::create<Feature2D>("Feature2D.SIFT");
Ptr<Feature2D> sift = SIFT::create();
FileStorage fs("sift_params.xml", FileStorage::READ);
if( fs.isOpened() ) // if we have file with parameters, read them
@ -3066,7 +3066,7 @@ Here is example of SIFT use in your application via Algorithm interface: ::
}
else // else modify the parameters and store them; user can later edit the file to use different parameters
{
sift->set("contrastThreshold", 0.01f); // lower the contrast threshold, compared to the default value
sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value
{
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
@ -3076,7 +3076,7 @@ Here is example of SIFT use in your application via Algorithm interface: ::
Mat image = imread("myimage.png", 0), descriptors;
vector<KeyPoint> keypoints;
(*sift)(image, noArray(), keypoints, descriptors);
sift->detectAndCompute(image, noArray(), keypoints, descriptors);
Algorithm::name
---------------

View File

@ -84,38 +84,5 @@ Creates a descriptor extractor by name.
The current implementation supports the following types of a descriptor extractor:
* ``"SIFT"`` -- :ocv:class:`SIFT`
* ``"SURF"`` -- :ocv:class:`SURF`
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"FREAK"`` -- :ocv:class:`FREAK`
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
:ocv:class:`OpponentColorDescriptorExtractor` ) + descriptor extractor name (see above),
for example: ``"OpponentSIFT"`` .
OpponentColorDescriptorExtractor
--------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor : public DescriptorExtractor
Class adapting a descriptor extractor to compute descriptors in the Opponent Color Space
(refer to Van de Sande et al., CGIV 2008 *Color Descriptors for Object Category Recognition*).
Input RGB image is transformed in the Opponent Color Space. Then, an unadapted descriptor extractor
(set in the constructor) computes descriptors on each of three channels and concatenates
them into a single color descriptor. ::
class OpponentColorDescriptorExtractor : public DescriptorExtractor
{
public:
OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
virtual int defaultNorm() const;
protected:
...
};
* ``"ORB"`` -- :ocv:class:`ORB`

View File

@ -72,22 +72,13 @@ Creates a feature detector by its name.
The following detector types are supported:
* ``"FAST"`` -- :ocv:class:`FastFeatureDetector`
* ``"STAR"`` -- :ocv:class:`StarFeatureDetector`
* ``"SIFT"`` -- :ocv:class:`SIFT` (nonfree module)
* ``"SURF"`` -- :ocv:class:`SURF` (nonfree module)
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"MSER"`` -- :ocv:class:`MSER`
* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector`
* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled
* ``"Dense"`` -- :ocv:class:`DenseFeatureDetector`
* ``"SimpleBlob"`` -- :ocv:class:`SimpleBlobDetector`
Also a combined format is supported: feature detector adapter name ( ``"Grid"`` --
:ocv:class:`GridAdaptedFeatureDetector`, ``"Pyramid"`` --
:ocv:class:`PyramidAdaptedFeatureDetector` ) + feature detector name (see above),
for example: ``"GridFAST"``, ``"PyramidSTAR"`` .
FastFeatureDetector
-------------------
.. ocv:class:: FastFeatureDetector : public FeatureDetector
@ -164,55 +155,6 @@ Wrapping class for feature detection using the
...
};
StarFeatureDetector
-------------------
.. ocv:class:: StarFeatureDetector : public FeatureDetector
The class implements the keypoint detector introduced by [Agrawal08]_, synonym of ``StarDetector``. ::
class StarFeatureDetector : public FeatureDetector
{
public:
StarFeatureDetector( int maxSize=16, int responseThreshold=30,
int lineThresholdProjected = 10,
int lineThresholdBinarized=8, int suppressNonmaxSize=5 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
.. [Agrawal08] Agrawal, M., Konolige, K., & Blas, M. R. (2008). Censure: Center surround extremas for realtime feature detection and matching. In Computer VisionECCV 2008 (pp. 102-115). Springer Berlin Heidelberg.
DenseFeatureDetector
--------------------
.. ocv:class:: DenseFeatureDetector : public FeatureDetector
Class for generation of image features which are distributed densely and regularly over the image. ::
class DenseFeatureDetector : public FeatureDetector
{
public:
DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1,
float featureScaleMul=0.1f,
int initXyStep=6, int initImgBound=0,
bool varyXyStepWithScale=true,
bool varyImgBoundWithScale=false );
protected:
...
};
The detector generates several levels (in the amount of ``featureScaleLevels``) of features. Features of each level are located in the nodes of a regular grid over the image (excluding the image boundary of given size). The level parameters (a feature scale, a node size, a size of boundary) are multiplied by ``featureScaleMul`` with level index growing depending on input flags, viz.:
* Feature scale is multiplied always.
* The grid node size is multiplied if ``varyXyStepWithScale`` is ``true``.
* Size of image boundary is multiplied if ``varyImgBoundWithScale`` is ``true``.
SimpleBlobDetector
-------------------
.. ocv:class:: SimpleBlobDetector : public FeatureDetector
@ -277,226 +219,3 @@ This class performs several filtrations of returned blobs. You should set ``filt
Default values of parameters are tuned to extract dark circular blobs.
GridAdaptedFeatureDetector
--------------------------
.. ocv:class:: GridAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to partition the source image into a grid and detect points in each cell. ::
class GridAdaptedFeatureDetector : public FeatureDetector
{
public:
/*
* detector Detector that will be adapted.
* maxTotalKeypoints Maximum count of keypoints detected on the image.
* Only the strongest keypoints will be kept.
* gridRows Grid row count.
* gridCols Grid column count.
*/
GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
int maxTotalKeypoints, int gridRows=4,
int gridCols=4 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
PyramidAdaptedFeatureDetector
-----------------------------
.. ocv:class:: PyramidAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to detect points over multiple levels of a Gaussian pyramid. Consider using this class for detectors that are not inherently scaled. ::
class PyramidAdaptedFeatureDetector : public FeatureDetector
{
public:
PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
int levels=2 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
DynamicAdaptedFeatureDetector
-----------------------------
.. ocv:class:: DynamicAdaptedFeatureDetector : public FeatureDetector
Adaptively adjusting detector that iteratively detects features until the desired number is found. ::
class DynamicAdaptedFeatureDetector: public FeatureDetector
{
public:
DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster,
int min_features=400, int max_features=500, int max_iters=5 );
...
};
If the detector is persisted, it "remembers" the parameters
used for the last detection. In this case, the detector may be used for consistent numbers
of keypoints in a set of temporally related images, such as video streams or
panorama series.
``DynamicAdaptedFeatureDetector`` uses another detector, such as FAST or SURF, to do the dirty work,
with the help of ``AdjusterAdapter`` .
If the detected number of features is not large enough,
``AdjusterAdapter`` adjusts the detection parameters so that the next detection
results in a bigger or smaller number of features. This is repeated until either the number of desired features are found
or the parameters are maxed out.
Adapters can be easily implemented for any detector via the
``AdjusterAdapter`` interface.
Beware that this is not thread-safe since the adjustment of parameters requires modification of the feature detector class instance.
Example of creating ``DynamicAdaptedFeatureDetector`` : ::
//sample usage:
//will create a detector that attempts to find
//100 - 110 FAST Keypoints, and will at most run
//FAST feature detection 10 times until that
//number of keypoints are found
Ptr<FeatureDetector> detector(new DynamicAdaptedFeatureDetector (100, 110, 10,
new FastAdjuster(20,true)));
DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector
------------------------------------------------------------
The constructor
.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 )
:param adjuster: :ocv:class:`AdjusterAdapter` that detects features and adjusts parameters.
:param min_features: Minimum desired number of features.
:param max_features: Maximum desired number of features.
:param max_iters: Maximum number of times to try adjusting the feature detector parameters. For :ocv:class:`FastAdjuster` , this number can be high, but with ``Star`` or ``Surf`` many iterations can be time-consuming. At each iteration the detector is rerun.
AdjusterAdapter
---------------
.. ocv:class:: AdjusterAdapter : public FeatureDetector
Class providing an interface for adjusting parameters of a feature detector. This interface is used by :ocv:class:`DynamicAdaptedFeatureDetector` . It is a wrapper for :ocv:class:`FeatureDetector` that enables adjusting parameters after feature detection. ::
class AdjusterAdapter: public FeatureDetector
{
public:
virtual ~AdjusterAdapter() {}
virtual void tooFew(int min, int n_detected) = 0;
virtual void tooMany(int max, int n_detected) = 0;
virtual bool good() const = 0;
virtual Ptr<AdjusterAdapter> clone() const = 0;
static Ptr<AdjusterAdapter> create( const String& detectorType );
};
See
:ocv:class:`FastAdjuster`,
:ocv:class:`StarAdjuster`, and
:ocv:class:`SurfAdjuster` for concrete implementations.
AdjusterAdapter::tooFew
---------------------------
Adjusts the detector parameters to detect more features.
.. ocv:function:: void AdjusterAdapter::tooFew(int min, int n_detected)
:param min: Minimum desired number of features.
:param n_detected: Number of features detected during the latest run.
Example: ::
void FastAdjuster::tooFew(int min, int n_detected)
{
thresh_--;
}
AdjusterAdapter::tooMany
----------------------------
Adjusts the detector parameters to detect less features.
.. ocv:function:: void AdjusterAdapter::tooMany(int max, int n_detected)
:param max: Maximum desired number of features.
:param n_detected: Number of features detected during the latest run.
Example: ::
void FastAdjuster::tooMany(int min, int n_detected)
{
thresh_++;
}
AdjusterAdapter::good
---------------------
Returns false if the detector parameters cannot be adjusted any more.
.. ocv:function:: bool AdjusterAdapter::good() const
Example: ::
bool FastAdjuster::good() const
{
return (thresh_ > 1) && (thresh_ < 200);
}
AdjusterAdapter::create
-----------------------
Creates an adjuster adapter by name
.. ocv:function:: Ptr<AdjusterAdapter> AdjusterAdapter::create( const String& detectorType )
Creates an adjuster adapter by name ``detectorType``. The detector name is the same as in :ocv:func:`FeatureDetector::create`, but now supports ``"FAST"``, ``"STAR"``, and ``"SURF"`` only.
FastAdjuster
------------
.. ocv:class:: FastAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`FastFeatureDetector`. This class decreases or increases the threshold value by 1. ::
class FastAdjuster FastAdjuster: public AdjusterAdapter
{
public:
FastAdjuster(int init_thresh = 20, bool nonmax = true);
...
};
StarAdjuster
------------
.. ocv:class:: StarAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`StarFeatureDetector`. This class adjusts the ``responseThreshhold`` of ``StarFeatureDetector``. ::
class StarAdjuster: public AdjusterAdapter
{
StarAdjuster(double initial_thresh = 30.0);
...
};
SurfAdjuster
------------
.. ocv:class:: SurfAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for ``SurfFeatureDetector``. ::
class CV_EXPORTS SurfAdjuster: public AdjusterAdapter
{
public:
SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 );
virtual void tooFew(int minv, int n_detected);
virtual void tooMany(int maxv, int n_detected);
virtual bool good() const;
virtual Ptr<AdjusterAdapter> clone() const;
...
};

View File

@ -1,276 +0,0 @@
Common Interfaces of Generic Descriptor Matchers
================================================
.. highlight:: cpp
Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to easily switch
between different algorithms solving the same problem. This section is devoted to matching descriptors
that cannot be represented as vectors in a multidimensional space. ``GenericDescriptorMatcher`` is a more generic interface for descriptors. It does not make any assumptions about descriptor representation.
Every descriptor with the
:ocv:class:`DescriptorExtractor` interface has a wrapper with the ``GenericDescriptorMatcher`` interface (see
:ocv:class:`VectorDescriptorMatcher` ).
There are descriptors such as the One-way descriptor and Ferns that have the ``GenericDescriptorMatcher`` interface implemented but do not support ``DescriptorExtractor``.
.. note::
* An example explaining keypoint description can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
* An example on descriptor matching evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
* An example on one to many image matching can be found at opencv_source_code/samples/cpp/matching_to_many_images.cpp
GenericDescriptorMatcher
------------------------
.. ocv:class:: GenericDescriptorMatcher
Abstract interface for extracting and matching a keypoint descriptor. There are also :ocv:class:`DescriptorExtractor` and :ocv:class:`DescriptorMatcher` for these purposes but their interfaces are intended for descriptors represented as vectors in a multidimensional space. ``GenericDescriptorMatcher`` is a more generic interface for descriptors. ``DescriptorMatcher`` and ``GenericDescriptorMatcher`` have two groups of match methods: for matching keypoints of an image with another image or with an image set. ::
class GenericDescriptorMatcher
{
public:
GenericDescriptorMatcher();
virtual ~GenericDescriptorMatcher();
virtual void add( InputArrayOfArrays images,
vector<vector<KeyPoint> >& keypoints );
const vector<Mat>& getTrainImages() const;
const vector<vector<KeyPoint> >& getTrainKeypoints() const;
virtual void clear();
virtual void train() = 0;
virtual bool isMaskSupported() = 0;
void classify( InputArray queryImage,
vector<KeyPoint>& queryKeypoints,
InputArray trainImage,
vector<KeyPoint>& trainKeypoints ) const;
void classify( InputArray queryImage,
vector<KeyPoint>& queryKeypoints );
/*
* Group of methods to match keypoints from an image pair.
*/
void match( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
InputArray trainImage, vector<KeyPoint>& trainKeypoints,
vector<DMatch>& matches, InputArray mask=noArray() ) const;
void knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
InputArray trainImage, vector<KeyPoint>& trainKeypoints,
vector<vector<DMatch> >& matches, int k,
InputArray mask=noArray(), bool compactResult=false ) const;
void radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
InputArray trainImage, vector<KeyPoint>& trainKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
InputArray mask=noArray(), bool compactResult=false ) const;
/*
* Group of methods to match keypoints from one image to an image set.
*/
void match( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
vector<DMatch>& matches, InputArrayOfArrays masks=noArray() );
void knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, int k,
InputArrayOfArrays masks=noArray(), bool compactResult=false );
void radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
InputArrayOfArrays masks=noArray(), bool compactResult=false );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
protected:
...
};
GenericDescriptorMatcher::add
---------------------------------
Adds images and their keypoints to the training collection stored in the class instance.
.. ocv:function:: void GenericDescriptorMatcher::add( InputArrayOfArrays images, vector<vector<KeyPoint> >& keypoints )
:param images: Image collection.
:param keypoints: Point collection. It is assumed that ``keypoints[i]`` are keypoints detected in the image ``images[i]`` .
GenericDescriptorMatcher::getTrainImages
--------------------------------------------
Returns a train image collection.
.. ocv:function:: const vector<Mat>& GenericDescriptorMatcher::getTrainImages() const
GenericDescriptorMatcher::getTrainKeypoints
-----------------------------------------------
Returns a train keypoints collection.
.. ocv:function:: const vector<vector<KeyPoint> >& GenericDescriptorMatcher::getTrainKeypoints() const
GenericDescriptorMatcher::clear
-----------------------------------
Clears a train collection (images and keypoints).
.. ocv:function:: void GenericDescriptorMatcher::clear()
GenericDescriptorMatcher::train
-----------------------------------
Trains descriptor matcher
.. ocv:function:: void GenericDescriptorMatcher::train()
Prepares descriptor matcher, for example, creates a tree-based structure, to extract descriptors or to optimize descriptors matching.
GenericDescriptorMatcher::isMaskSupported
---------------------------------------------
Returns ``true`` if a generic descriptor matcher supports masking permissible matches.
.. ocv:function:: bool GenericDescriptorMatcher::isMaskSupported()
GenericDescriptorMatcher::classify
--------------------------------------
Classifies keypoints from a query set.
.. ocv:function:: void GenericDescriptorMatcher::classify( InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints ) const
.. ocv:function:: void GenericDescriptorMatcher::classify( InputArray queryImage, vector<KeyPoint>& queryKeypoints )
:param queryImage: Query image.
:param queryKeypoints: Keypoints from a query image.
:param trainImage: Train image.
:param trainKeypoints: Keypoints from a train image.
The method classifies each keypoint from a query set. The first variant of the method takes a train image and its keypoints as an input argument. The second variant uses the internally stored training collection that can be built using the ``GenericDescriptorMatcher::add`` method.
The methods do the following:
#.
Call the ``GenericDescriptorMatcher::match`` method to find correspondence between the query set and the training set.
#.
Set the ``class_id`` field of each keypoint from the query set to ``class_id`` of the corresponding keypoint from the training set.
GenericDescriptorMatcher::match
-----------------------------------
Finds the best match in the training set for each keypoint from the query set.
.. ocv:function:: void GenericDescriptorMatcher::match(InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints, vector<DMatch>& matches, InputArray mask=noArray() ) const
.. ocv:function:: void GenericDescriptorMatcher::match( InputArray queryImage, vector<KeyPoint>& queryKeypoints, vector<DMatch>& matches, InputArrayOfArrays masks=noArray() )
:param queryImage: Query image.
:param queryKeypoints: Keypoints detected in ``queryImage`` .
:param trainImage: Train image. It is not added to a train image collection stored in the class object.
:param trainKeypoints: Keypoints detected in ``trainImage`` . They are not added to a train points collection stored in the class object.
:param matches: Matches. If a query descriptor (keypoint) is masked out in ``mask`` , match is added for this descriptor. So, ``matches`` size may be smaller than the query keypoints count.
:param mask: Mask specifying permissible matches between an input query and train keypoints.
:param masks: Set of masks. Each ``masks[i]`` specifies permissible matches between input query keypoints and stored train keypoints from the i-th image.
The methods find the best match for each query keypoint. In the first variant of the method, a train image and its keypoints are the input arguments. In the second variant, query keypoints are matched to the internally stored training collection that can be built using the ``GenericDescriptorMatcher::add`` method. Optional mask (or masks) can be passed to specify which query and training descriptors can be matched. Namely, ``queryKeypoints[i]`` can be matched with ``trainKeypoints[j]`` only if ``mask.at<uchar>(i,j)`` is non-zero.
GenericDescriptorMatcher::knnMatch
--------------------------------------
Finds the ``k`` best matches for each query keypoint.
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints, vector<vector<DMatch> >& matches, int k, InputArray mask=noArray(), bool compactResult=false ) const
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, vector<vector<DMatch> >& matches, int k, InputArrayOfArrays masks=noArray(), bool compactResult=false )
The methods are extended variants of ``GenericDescriptorMatch::match``. The parameters are similar, and the semantics is similar to ``DescriptorMatcher::knnMatch``. But this class does not require explicitly computed keypoint descriptors.
GenericDescriptorMatcher::radiusMatch
-----------------------------------------
For each query keypoint, finds the training keypoints not farther than the specified distance.
.. ocv:function:: void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints, vector<vector<DMatch> >& matches, float maxDistance, InputArray mask=noArray(), bool compactResult=false ) const
.. ocv:function:: void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, vector<vector<DMatch> >& matches, float maxDistance, InputArrayOfArrays masks=noArray(), bool compactResult=false )
The methods are similar to ``DescriptorMatcher::radius``. But this class does not require explicitly computed keypoint descriptors.
GenericDescriptorMatcher::read
----------------------------------
Reads a matcher object from a file node.
.. ocv:function:: void GenericDescriptorMatcher::read( const FileNode& fn )
GenericDescriptorMatcher::write
-----------------------------------
Writes a match object to a file storage.
.. ocv:function:: void GenericDescriptorMatcher::write( FileStorage& fs ) const
GenericDescriptorMatcher::clone
-----------------------------------
Clones the matcher.
.. ocv:function:: Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::clone( bool emptyTrainData=false ) const
:param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies
both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters
but with empty train data.
VectorDescriptorMatcher
-----------------------
.. ocv:class:: VectorDescriptorMatcher : public GenericDescriptorMatcher
Class used for matching descriptors that can be described as vectors in a finite-dimensional space. ::
class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher
{
public:
VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& extractor, const Ptr<DescriptorMatcher>& matcher );
virtual ~VectorDescriptorMatcher();
virtual void add( InputArrayOfArrays imgCollection,
vector<vector<KeyPoint> >& pointCollection );
virtual void clear();
virtual void train();
virtual bool isMaskSupported();
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
};
Example: ::
VectorDescriptorMatcher matcher( new SurfDescriptorExtractor,
new BruteForceMatcher<L2<float> > );

View File

@ -36,38 +36,6 @@ Detects corners using the FAST algorithm by [Rosten06]_.
.. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006.
BriefDescriptorExtractor
------------------------
.. ocv:class:: BriefDescriptorExtractor : public DescriptorExtractor
Class for computing BRIEF descriptors described in a paper of Calonder M., Lepetit V.,
Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* ,
11th European Conference on Computer Vision (ECCV), Heraklion, Crete. LNCS Springer, September 2010. ::
class BriefDescriptorExtractor : public DescriptorExtractor
{
public:
static const int PATCH_SIZE = 48;
static const int KERNEL_SIZE = 9;
// bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes.
BriefDescriptorExtractor( int bytes = 32 );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
virtual int defaultNorm() const;
protected:
...
};
.. note::
* A complete BRIEF extractor sample can be found at opencv_source_code/samples/cpp/brief_match_test.cpp
MSER
----
.. ocv:class:: MSER : public FeatureDetector
@ -213,43 +181,6 @@ Finds keypoints in an image and computes their descriptors
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
FREAK
-----
.. ocv:class:: FREAK : public DescriptorExtractor
Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in [AOV12]_. The algorithm propose a novel keypoint descriptor inspired by the human visual system and more precisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is computed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in general faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK. They are competitive alternatives to existing keypoints in particular for embedded applications.
.. [AOV12] A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012. CVPR 2012 Open Source Award Winner.
.. note::
* An example on how to use the FREAK descriptor can be found at opencv_source_code/samples/cpp/freak_demo.cpp
FREAK::FREAK
------------
The FREAK constructor
.. ocv:function:: FREAK::FREAK( bool orientationNormalized=true, bool scaleNormalized=true, float patternScale=22.0f, int nOctaves=4, const vector<int>& selectedPairs=vector<int>() )
:param orientationNormalized: Enable orientation normalization.
:param scaleNormalized: Enable scale normalization.
:param patternScale: Scaling of the description pattern.
:param nOctaves: Number of octaves covered by the detected keypoints.
:param selectedPairs: (Optional) user defined selected pairs indexes,
FREAK::selectPairs
------------------
Select the 512 best description pair indexes from an input (grayscale) image set. FREAK is available with a set of pairs learned off-line. Researchers can run a training process to learn their own set of pair. For more details read section 4.2 in: A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012.
We notice that for keypoint matching applications, image content has little effect on the selected pairs unless very specific what does matter is the detector type (blobs, corners,...) and the options used (scale/rotation invariance,...). Reduce corrThresh if not enough pairs are selected (43 points --> 903 possible pairs)
.. ocv:function:: vector<int> FREAK::selectPairs(const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, const double corrThresh = 0.7, bool verbose = true)
:param images: Grayscale image input set.
:param keypoints: Set of detected keypoints
:param corrThresh: Correlation threshold.
:param verbose: Prints pair selection informations.
KAZE
----
.. ocv:class:: KAZE : public Feature2D
@ -312,3 +243,10 @@ The AKAZE constructor
:param octaves: Maximum octave evolution of the image
:param sublevels: Default number of sublevels per scale level
:param diffusivity: Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or DIFF_CHARBONNIER
SIFT
----
.. ocv:class:: SIFT : public Feature2D
The SIFT algorithm has been moved to opencv_contrib/xfeatures2d module.

View File

@ -11,6 +11,5 @@ features2d. 2D Features Framework
common_interfaces_of_feature_detectors
common_interfaces_of_descriptor_extractors
common_interfaces_of_descriptor_matchers
common_interfaces_of_generic_descriptor_matchers
drawing_function_of_keypoints_and_matches
object_categorization

View File

@ -49,7 +49,7 @@
namespace cv
{
CV_EXPORTS bool initModule_features2d();
CV_EXPORTS bool initModule_features2d(void);
// //! writes vector of keypoints to the file storage
// CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);
@ -353,107 +353,6 @@ protected:
typedef ORB OrbFeatureDetector;
typedef ORB OrbDescriptorExtractor;
/*!
FREAK implementation
*/
class CV_EXPORTS FREAK : public DescriptorExtractor
{
public:
/** Constructor
* @param orientationNormalized enable orientation normalization
* @param scaleNormalized enable scale normalization
* @param patternScale scaling of the description pattern
* @param nbOctave number of octaves covered by the detected keypoints
* @param selectedPairs (optional) user defined selected pairs
*/
explicit FREAK( bool orientationNormalized = true,
bool scaleNormalized = true,
float patternScale = 22.0f,
int nOctaves = 4,
const std::vector<int>& selectedPairs = std::vector<int>());
FREAK( const FREAK& rhs );
FREAK& operator=( const FREAK& );
virtual ~FREAK();
/** returns the descriptor length in bytes */
virtual int descriptorSize() const;
/** returns the descriptor type */
virtual int descriptorType() const;
/** returns the default norm type */
virtual int defaultNorm() const;
/** select the 512 "best description pairs"
* @param images grayscale images set
* @param keypoints set of detected keypoints
* @param corrThresh correlation threshold
* @param verbose print construction information
* @return list of best pair indexes
*/
std::vector<int> selectPairs( const std::vector<Mat>& images, std::vector<std::vector<KeyPoint> >& keypoints,
const double corrThresh = 0.7, bool verbose = true );
AlgorithmInfo* info() const;
enum
{
NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45
};
protected:
virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
void buildPattern();
template <typename imgType, typename iiType>
imgType meanIntensity( InputArray image, InputArray integral, const float kp_x, const float kp_y,
const unsigned int scale, const unsigned int rot, const unsigned int point ) const;
template <typename srcMatType, typename iiMatType>
void computeDescriptors( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
template <typename srcMatType>
void extractDescriptor(srcMatType *pointsValue, void ** ptr) const;
bool orientationNormalized; //true if the orientation is normalized, false otherwise
bool scaleNormalized; //true if the scale is normalized, false otherwise
double patternScale; //scaling of the pattern
int nOctaves; //number of octaves
bool extAll; // true if all pairs need to be extracted for pairs selection
double patternScale0;
int nOctaves0;
std::vector<int> selectedPairs0;
struct PatternPoint
{
float x; // x coordinate relative to center
float y; // x coordinate relative to center
float sigma; // Gaussian smoothing sigma
};
struct DescriptionPair
{
uchar i; // index of the first point
uchar j; // index of the second point
};
struct OrientationPair
{
uchar i; // index of the first point
uchar j; // index of the second point
int weight_dx; // dx/(norm_sq))*4096
int weight_dy; // dy/(norm_sq))*4096
};
std::vector<PatternPoint> patternLookup; // look-up table for the pattern points (position+sigma of all points at all scales and orientation)
int patternSizes[NB_SCALES]; // size of the pattern at a specific scale (used to check if a point is within image boundaries)
DescriptionPair descriptionPairs[NB_PAIRS];
OrientationPair orientationPairs[NB_ORIENPAIRS];
};
/*!
Maximal Stable Extremal Regions class.
@ -493,36 +392,6 @@ protected:
typedef MSER MserFeatureDetector;
/*!
The "Star" Detector.
The class implements the keypoint detector introduced by K. Konolige.
*/
class CV_EXPORTS_W StarDetector : public FeatureDetector
{
public:
//! the full constructor
CV_WRAP StarDetector(int _maxSize=45, int _responseThreshold=30,
int _lineThresholdProjected=10,
int _lineThresholdBinarized=8,
int _suppressNonmaxSize=5);
//! finds the keypoints in the image
CV_WRAP_AS(detect) void operator()(const Mat& image,
CV_OUT std::vector<KeyPoint>& keypoints) const;
AlgorithmInfo* info() const;
protected:
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
int maxSize;
int responseThreshold;
int lineThresholdProjected;
int lineThresholdBinarized;
int suppressNonmaxSize;
};
//! detects corners using FAST algorithm by E. Rosten
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
int threshold, bool nonmaxSuppression=true );
@ -570,7 +439,6 @@ protected:
};
typedef GFTTDetector GoodFeaturesToTrackDetector;
typedef StarDetector StarFeatureDetector;
class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector
{
@ -624,277 +492,6 @@ protected:
};
class CV_EXPORTS_W DenseFeatureDetector : public FeatureDetector
{
public:
CV_WRAP explicit DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1,
float featureScaleMul=0.1f,
int initXyStep=6, int initImgBound=0,
bool varyXyStepWithScale=true,
bool varyImgBoundWithScale=false );
AlgorithmInfo* info() const;
protected:
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
double initFeatureScale;
int featureScaleLevels;
double featureScaleMul;
int initXyStep;
int initImgBound;
bool varyXyStepWithScale;
bool varyImgBoundWithScale;
};
/*
* Adapts a detector to partition the source image into a grid and detect
* points in each cell.
*/
class CV_EXPORTS_W GridAdaptedFeatureDetector : public FeatureDetector
{
public:
/*
* detector Detector that will be adapted.
* maxTotalKeypoints Maximum count of keypoints detected on the image. Only the strongest keypoints
* will be keeped.
* gridRows Grid rows count.
* gridCols Grid column count.
*/
CV_WRAP GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector=Ptr<FeatureDetector>(),
int maxTotalKeypoints=1000,
int gridRows=4, int gridCols=4 );
// TODO implement read/write
virtual bool empty() const;
AlgorithmInfo* info() const;
protected:
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
Ptr<FeatureDetector> detector;
int maxTotalKeypoints;
int gridRows;
int gridCols;
};
/*
* Adapts a detector to detect points over multiple levels of a Gaussian
* pyramid. Useful for detectors that are not inherently scaled.
*/
class CV_EXPORTS_W PyramidAdaptedFeatureDetector : public FeatureDetector
{
public:
// maxLevel - The 0-based index of the last pyramid layer
CV_WRAP PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector, int maxLevel=2 );
// TODO implement read/write
virtual bool empty() const;
protected:
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
Ptr<FeatureDetector> detector;
int maxLevel;
};
/** \brief A feature detector parameter adjuster, this is used by the DynamicAdaptedFeatureDetector
* and is a wrapper for FeatureDetector that allow them to be adjusted after a detection
*/
class CV_EXPORTS AdjusterAdapter: public FeatureDetector
{
public:
/** pure virtual interface
*/
virtual ~AdjusterAdapter() {}
/** too few features were detected so, adjust the detector params accordingly
* \param min the minimum number of desired features
* \param n_detected the number previously detected
*/
virtual void tooFew(int min, int n_detected) = 0;
/** too many features were detected so, adjust the detector params accordingly
* \param max the maximum number of desired features
* \param n_detected the number previously detected
*/
virtual void tooMany(int max, int n_detected) = 0;
/** are params maxed out or still valid?
* \return false if the parameters can't be adjusted any more
*/
virtual bool good() const = 0;
virtual Ptr<AdjusterAdapter> clone() const = 0;
static Ptr<AdjusterAdapter> create( const String& detectorType );
};
/** \brief an adaptively adjusting detector that iteratively detects until the desired number
* of features are detected.
* Beware that this is not thread safe - as the adjustment of parameters breaks the const
* of the detection routine...
* /TODO Make this const correct and thread safe
*
* sample usage:
//will create a detector that attempts to find 100 - 110 FAST Keypoints, and will at most run
//FAST feature detection 10 times until that number of keypoints are found
Ptr<FeatureDetector> detector(new DynamicAdaptedFeatureDetector(new FastAdjuster(20,true),100, 110, 10));
*/
class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector
{
public:
/** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment
* \param max_features the maximum desired number of features
* \param max_iters the maximum number of times to try to adjust the feature detector params
* for the FastAdjuster this can be high, but with Star or Surf this can get time consuming
* \param min_features the minimum desired features
*/
DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 );
virtual bool empty() const;
protected:
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
private:
DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&);
DynamicAdaptedFeatureDetector(const DynamicAdaptedFeatureDetector&);
int escape_iters_;
int min_features_, max_features_;
const Ptr<AdjusterAdapter> adjuster_;
};
/**\brief an adjust for the FAST detector. This will basically decrement or increment the
* threshold by 1
*/
class CV_EXPORTS FastAdjuster: public AdjusterAdapter
{
public:
/**\param init_thresh the initial threshold to start with, default = 20
* \param nonmax whether to use non max or not for fast feature detection
*/
FastAdjuster(int init_thresh=20, bool nonmax=true, int min_thresh=1, int max_thresh=200);
virtual void tooFew(int minv, int n_detected);
virtual void tooMany(int maxv, int n_detected);
virtual bool good() const;
virtual Ptr<AdjusterAdapter> clone() const;
protected:
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
int thresh_;
bool nonmax_;
int init_thresh_, min_thresh_, max_thresh_;
};
/** An adjuster for StarFeatureDetector, this one adjusts the responseThreshold for now
* TODO find a faster way to converge the parameters for Star - use CvStarDetectorParams
*/
class CV_EXPORTS StarAdjuster: public AdjusterAdapter
{
public:
StarAdjuster(double initial_thresh=30.0, double min_thresh=2., double max_thresh=200.);
virtual void tooFew(int minv, int n_detected);
virtual void tooMany(int maxv, int n_detected);
virtual bool good() const;
virtual Ptr<AdjusterAdapter> clone() const;
protected:
virtual void detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
double thresh_, init_thresh_, min_thresh_, max_thresh_;
};
class CV_EXPORTS SurfAdjuster: public AdjusterAdapter
{
public:
SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 );
virtual void tooFew(int minv, int n_detected);
virtual void tooMany(int maxv, int n_detected);
virtual bool good() const;
virtual Ptr<AdjusterAdapter> clone() const;
protected:
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
double thresh_, init_thresh_, min_thresh_, max_thresh_;
};
CV_EXPORTS Mat windowedMatchingMask( const std::vector<KeyPoint>& keypoints1, const std::vector<KeyPoint>& keypoints2,
float maxDeltaX, float maxDeltaY );
/*
* OpponentColorDescriptorExtractor
*
* Adapts a descriptor extractor to compute descriptors in Opponent Color Space
* (refer to van de Sande et al., CGIV 2008 "Color Descriptors for Object Category Recognition").
* Input RGB image is transformed in Opponent Color Space. Then unadapted descriptor extractor
* (set in constructor) computes descriptors on each of the three channel and concatenate
* them into a single color descriptor.
*/
class CV_EXPORTS OpponentColorDescriptorExtractor : public DescriptorExtractor
{
public:
OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& descriptorExtractor );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
virtual int defaultNorm() const;
virtual bool empty() const;
protected:
virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
Ptr<DescriptorExtractor> descriptorExtractor;
};
/*
* BRIEF Descriptor
*/
class CV_EXPORTS BriefDescriptorExtractor : public DescriptorExtractor
{
public:
static const int PATCH_SIZE = 48;
static const int KERNEL_SIZE = 9;
// bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes.
BriefDescriptorExtractor( int bytes = 32 );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
virtual int defaultNorm() const;
/// @todo read and write for brief
AlgorithmInfo* info() const;
protected:
virtual void computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const;
typedef void(*PixelTestFn)(InputArray, const std::vector<KeyPoint>&, OutputArray);
int bytes_;
PixelTestFn test_fn_;
};
// KAZE/AKAZE diffusivity
enum {
DIFF_PM_G1 = 0,
@ -1293,208 +890,6 @@ protected:
int addedDescCount;
};
/****************************************************************************************\
* GenericDescriptorMatcher *
\****************************************************************************************/
/*
* Abstract interface for a keypoint descriptor and matcher
*/
class GenericDescriptorMatcher;
typedef GenericDescriptorMatcher GenericDescriptorMatch;
class CV_EXPORTS GenericDescriptorMatcher
{
public:
GenericDescriptorMatcher();
virtual ~GenericDescriptorMatcher();
/*
* Add train collection: images and keypoints from them.
* images A set of train images.
* ketpoints Keypoint collection that have been detected on train images.
*
* Keypoints for which a descriptor cannot be computed are removed. Such keypoints
* must be filtered in this method befor adding keypoints to train collection "trainPointCollection".
* If inheritor class need perform such prefiltering the method add() must be overloaded.
* In the other class methods programmer has access to the train keypoints by a constant link.
*/
virtual void add( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints );
const std::vector<Mat>& getTrainImages() const;
const std::vector<std::vector<KeyPoint> >& getTrainKeypoints() const;
/*
* Clear images and keypoints storing in train collection.
*/
virtual void clear();
/*
* Returns true if matcher supports mask to match descriptors.
*/
virtual bool isMaskSupported() = 0;
/*
* Train some inner structures (e.g. flann index or decision trees).
* train() methods is run every time in matching methods. So the method implementation
* should has a check whether these inner structures need be trained/retrained or not.
*/
virtual void train();
/*
* Classifies query keypoints.
* queryImage The query image
* queryKeypoints Keypoints from the query image
* trainImage The train image
* trainKeypoints Keypoints from the train image
*/
// Classify keypoints from query image under one train image.
void classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints ) const;
// Classify keypoints from query image under train image collection.
void classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints );
/*
* Group of methods to match keypoints from image pair.
* Keypoints for which a descriptor cannot be computed are removed.
* train() method is called here.
*/
// Find one best match for each query descriptor (if mask is empty).
void match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<DMatch>& matches, InputArray mask=noArray() ) const;
// Find k best matches for each query keypoint (in increasing order of distances).
// compactResult is used when mask is not empty. If compactResult is false matches
// vector will have the same size as queryDescriptors rows.
// If compactResult is true matches vector will not contain matches for fully masked out query descriptors.
void knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
InputArray mask=noArray(), bool compactResult=false ) const;
// Find best matches for each query descriptor which have distance less than maxDistance (in increasing order of distances).
void radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
InputArray mask=noArray(), bool compactResult=false ) const;
/*
* Group of methods to match keypoints from one image to image set.
* See description of similar methods for matching image pair above.
*/
void match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<DMatch>& matches, InputArrayOfArrays masks=noArray() );
void knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
InputArrayOfArrays masks=noArray(), bool compactResult=false );
void radiusMatch(InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
InputArrayOfArrays masks=noArray(), bool compactResult=false );
// Reads matcher object from a file node
virtual void read( const FileNode& fn );
// Writes matcher object to a file storage
virtual void write( FileStorage& fs ) const;
// Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty)
virtual bool empty() const;
// Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies
// both parameters and train data. If emptyTrainData is true the method create object copy with current parameters
// but with empty train data.
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
static Ptr<GenericDescriptorMatcher> create( const String& genericDescritptorMatcherType,
const String &paramsFilename=String() );
protected:
// In fact the matching is implemented only by the following two methods. These methods suppose
// that the class object has been trained already. Public match methods call these methods
// after calling train().
virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
InputArrayOfArrays masks, bool compactResult ) = 0;
virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
InputArrayOfArrays masks, bool compactResult ) = 0;
/*
* A storage for sets of keypoints together with corresponding images and class IDs
*/
class CV_EXPORTS KeyPointCollection
{
public:
KeyPointCollection();
KeyPointCollection( const KeyPointCollection& collection );
void add( const std::vector<Mat>& images, const std::vector<std::vector<KeyPoint> >& keypoints );
void clear();
// Returns the total number of keypoints in the collection
size_t keypointCount() const;
size_t imageCount() const;
const std::vector<std::vector<KeyPoint> >& getKeypoints() const;
const std::vector<KeyPoint>& getKeypoints( int imgIdx ) const;
const KeyPoint& getKeyPoint( int imgIdx, int localPointIdx ) const;
const KeyPoint& getKeyPoint( int globalPointIdx ) const;
void getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const;
const std::vector<Mat>& getImages() const;
const Mat& getImage( int imgIdx ) const;
protected:
int pointCount;
std::vector<Mat> images;
std::vector<std::vector<KeyPoint> > keypoints;
// global indices of the first points in each image, startIndices.size() = keypoints.size()
std::vector<int> startIndices;
private:
static Mat clone_op( Mat m ) { return m.clone(); }
};
KeyPointCollection trainPointCollection;
};
/****************************************************************************************\
* VectorDescriptorMatcher *
\****************************************************************************************/
/*
* A class used for matching descriptors that can be described as vectors in a finite-dimensional space
*/
class VectorDescriptorMatcher;
typedef VectorDescriptorMatcher VectorDescriptorMatch;
class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher
{
public:
VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& extractor, const Ptr<DescriptorMatcher>& matcher );
virtual ~VectorDescriptorMatcher();
virtual void add( InputArrayOfArrays imgCollection,
std::vector<std::vector<KeyPoint> >& pointCollection );
virtual void clear();
virtual void train();
virtual bool isMaskSupported();
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
virtual bool empty() const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
InputArrayOfArrays masks, bool compactResult );
virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
InputArrayOfArrays masks, bool compactResult );
Ptr<DescriptorExtractor> extractor;
Ptr<DescriptorMatcher> matcher;
};
/****************************************************************************************\
* Drawing functions *
@ -1547,13 +942,6 @@ CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatc
CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
CV_EXPORTS void evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2,
std::vector<KeyPoint>& keypoints1, std::vector<KeyPoint>& keypoints2,
std::vector<std::vector<DMatch> >* matches1to2, std::vector<std::vector<uchar> >* correctMatches1to2Mask,
std::vector<Point2f>& recallPrecisionCurve,
const Ptr<GenericDescriptorMatcher>& dmatch=Ptr<GenericDescriptorMatcher>() );
/****************************************************************************************\
* Bag of visual words *
\****************************************************************************************/

View File

@ -1,184 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <algorithm>
#include <vector>
#include <iostream>
#include <iomanip>
using namespace cv;
inline int smoothedSum(const Mat& sum, const KeyPoint& pt, int y, int x)
{
static const int HALF_KERNEL = BriefDescriptorExtractor::KERNEL_SIZE / 2;
int img_y = (int)(pt.pt.y + 0.5) + y;
int img_x = (int)(pt.pt.x + 0.5) + x;
return sum.at<int>(img_y + HALF_KERNEL + 1, img_x + HALF_KERNEL + 1)
- sum.at<int>(img_y + HALF_KERNEL + 1, img_x - HALF_KERNEL)
- sum.at<int>(img_y - HALF_KERNEL, img_x + HALF_KERNEL + 1)
+ sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
}
static void pixelTests16(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
const KeyPoint& pt = keypoints[i];
#include "generated_16.i"
}
}
static void pixelTests32(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
const KeyPoint& pt = keypoints[i];
#include "generated_32.i"
}
}
static void pixelTests64(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
const KeyPoint& pt = keypoints[i];
#include "generated_64.i"
}
}
namespace cv
{
BriefDescriptorExtractor::BriefDescriptorExtractor(int bytes) :
bytes_(bytes), test_fn_(NULL)
{
switch (bytes)
{
case 16:
test_fn_ = pixelTests16;
break;
case 32:
test_fn_ = pixelTests32;
break;
case 64:
test_fn_ = pixelTests64;
break;
default:
CV_Error(Error::StsBadArg, "bytes must be 16, 32, or 64");
}
}
int BriefDescriptorExtractor::descriptorSize() const
{
return bytes_;
}
int BriefDescriptorExtractor::descriptorType() const
{
return CV_8UC1;
}
int BriefDescriptorExtractor::defaultNorm() const
{
return NORM_HAMMING;
}
void BriefDescriptorExtractor::read( const FileNode& fn)
{
int dSize = fn["descriptorSize"];
switch (dSize)
{
case 16:
test_fn_ = pixelTests16;
break;
case 32:
test_fn_ = pixelTests32;
break;
case 64:
test_fn_ = pixelTests64;
break;
default:
CV_Error(Error::StsBadArg, "descriptorSize must be 16, 32, or 64");
}
bytes_ = dSize;
}
void BriefDescriptorExtractor::write( FileStorage& fs) const
{
fs << "descriptorSize" << bytes_;
}
void BriefDescriptorExtractor::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
// Construct integral image for fast smoothing (box filter)
Mat sum;
Mat grayImage = image.getMat();
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
///TODO allow the user to pass in a precomputed integral image
//if(image.type() == CV_32S)
// sum = image;
//else
integral( grayImage, sum, CV_32S);
//Remove keypoints very close to the border
KeyPointsFilter::runByImageBorder(keypoints, image.size(), PATCH_SIZE/2 + KERNEL_SIZE/2);
descriptors.create((int)keypoints.size(), bytes_, CV_8U);
descriptors.setTo(Scalar::all(0));
test_fn_(sum, keypoints, descriptors);
}
} // namespace cv

View File

@ -98,13 +98,6 @@ void DescriptorExtractor::removeBorderKeypoints( std::vector<KeyPoint>& keypoint
Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExtractorType)
{
if( descriptorExtractorType.find("Opponent") == 0 )
{
size_t pos = String("Opponent").size();
String type = descriptorExtractorType.substr(pos);
return makePtr<OpponentColorDescriptorExtractor>(DescriptorExtractor::create(type));
}
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
}
@ -114,151 +107,4 @@ CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<
DescriptorExtractor::compute(image, keypoints, descriptors);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/****************************************************************************************\
* OpponentColorDescriptorExtractor *
\****************************************************************************************/
OpponentColorDescriptorExtractor::OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& _descriptorExtractor ) :
descriptorExtractor(_descriptorExtractor)
{
CV_Assert( descriptorExtractor );
}
static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, std::vector<Mat>& opponentChannels )
{
if( bgrImage.type() != CV_8UC3 )
CV_Error( Error::StsBadArg, "input image must be an BGR image of type CV_8UC3" );
// Prepare opponent color space storage matrices.
opponentChannels.resize( 3 );
opponentChannels[0] = cv::Mat(bgrImage.size(), CV_8UC1); // R-G RED-GREEN
opponentChannels[1] = cv::Mat(bgrImage.size(), CV_8UC1); // R+G-2B YELLOW-BLUE
opponentChannels[2] = cv::Mat(bgrImage.size(), CV_8UC1); // R+G+B
for(int y = 0; y < bgrImage.rows; ++y)
for(int x = 0; x < bgrImage.cols; ++x)
{
Vec3b v = bgrImage.at<Vec3b>(y, x);
uchar& b = v[0];
uchar& g = v[1];
uchar& r = v[2];
opponentChannels[0].at<uchar>(y, x) = saturate_cast<uchar>(0.5f * (255 + g - r)); // (R - G)/sqrt(2), but converted to the destination data type
opponentChannels[1].at<uchar>(y, x) = saturate_cast<uchar>(0.25f * (510 + r + g - 2*b)); // (R + G - 2B)/sqrt(6), but converted to the destination data type
opponentChannels[2].at<uchar>(y, x) = saturate_cast<uchar>(1.f/3.f * (r + g + b)); // (R + G + B)/sqrt(3), but converted to the destination data type
}
}
struct KP_LessThan
{
KP_LessThan(const std::vector<KeyPoint>& _kp) : kp(&_kp) {}
bool operator()(int i, int j) const
{
return (*kp)[i].class_id < (*kp)[j].class_id;
}
const std::vector<KeyPoint>* kp;
};
void OpponentColorDescriptorExtractor::computeImpl( InputArray _bgrImage, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
Mat bgrImage = _bgrImage.getMat();
std::vector<Mat> opponentChannels;
convertBGRImageToOpponentColorSpace( bgrImage, opponentChannels );
const int N = 3; // channels count
std::vector<KeyPoint> channelKeypoints[N];
Mat channelDescriptors[N];
std::vector<int> idxs[N];
// Compute descriptors three times, once for each Opponent channel to concatenate into a single color descriptor
int maxKeypointsCount = 0;
for( int ci = 0; ci < N; ci++ )
{
channelKeypoints[ci].insert( channelKeypoints[ci].begin(), keypoints.begin(), keypoints.end() );
// Use class_id member to get indices into initial keypoints vector
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
channelKeypoints[ci][ki].class_id = (int)ki;
descriptorExtractor->compute( opponentChannels[ci], channelKeypoints[ci], channelDescriptors[ci] );
idxs[ci].resize( channelKeypoints[ci].size() );
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
{
idxs[ci][ki] = (int)ki;
}
std::sort( idxs[ci].begin(), idxs[ci].end(), KP_LessThan(channelKeypoints[ci]) );
maxKeypointsCount = std::max( maxKeypointsCount, (int)channelKeypoints[ci].size());
}
std::vector<KeyPoint> outKeypoints;
outKeypoints.reserve( keypoints.size() );
int dSize = descriptorExtractor->descriptorSize();
Mat mergedDescriptors( maxKeypointsCount, 3*dSize, descriptorExtractor->descriptorType() );
int mergedCount = 0;
// cp - current channel position
size_t cp[] = {0, 0, 0};
while( cp[0] < channelKeypoints[0].size() &&
cp[1] < channelKeypoints[1].size() &&
cp[2] < channelKeypoints[2].size() )
{
const int maxInitIdx = std::max( 0, std::max( channelKeypoints[0][idxs[0][cp[0]]].class_id,
std::max( channelKeypoints[1][idxs[1][cp[1]]].class_id,
channelKeypoints[2][idxs[2][cp[2]]].class_id ) ) );
while( channelKeypoints[0][idxs[0][cp[0]]].class_id < maxInitIdx && cp[0] < channelKeypoints[0].size() ) { cp[0]++; }
while( channelKeypoints[1][idxs[1][cp[1]]].class_id < maxInitIdx && cp[1] < channelKeypoints[1].size() ) { cp[1]++; }
while( channelKeypoints[2][idxs[2][cp[2]]].class_id < maxInitIdx && cp[2] < channelKeypoints[2].size() ) { cp[2]++; }
if( cp[0] >= channelKeypoints[0].size() || cp[1] >= channelKeypoints[1].size() || cp[2] >= channelKeypoints[2].size() )
break;
if( channelKeypoints[0][idxs[0][cp[0]]].class_id == maxInitIdx &&
channelKeypoints[1][idxs[1][cp[1]]].class_id == maxInitIdx &&
channelKeypoints[2][idxs[2][cp[2]]].class_id == maxInitIdx )
{
outKeypoints.push_back( keypoints[maxInitIdx] );
// merge descriptors
for( int ci = 0; ci < N; ci++ )
{
Mat dst = mergedDescriptors(Range(mergedCount, mergedCount+1), Range(ci*dSize, (ci+1)*dSize));
channelDescriptors[ci].row( idxs[ci][cp[ci]] ).copyTo( dst );
cp[ci]++;
}
mergedCount++;
}
}
mergedDescriptors.rowRange(0, mergedCount).copyTo( descriptors );
std::swap( outKeypoints, keypoints );
}
void OpponentColorDescriptorExtractor::read( const FileNode& fn )
{
descriptorExtractor->read(fn);
}
void OpponentColorDescriptorExtractor::write( FileStorage& fs ) const
{
descriptorExtractor->write(fs);
}
int OpponentColorDescriptorExtractor::descriptorSize() const
{
return 3*descriptorExtractor->descriptorSize();
}
int OpponentColorDescriptorExtractor::descriptorType() const
{
return descriptorExtractor->descriptorType();
}
int OpponentColorDescriptorExtractor::defaultNorm() const
{
return descriptorExtractor->defaultNorm();
}
bool OpponentColorDescriptorExtractor::empty() const
{
return !descriptorExtractor || descriptorExtractor->empty();
}
}

View File

@ -106,24 +106,6 @@ void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector<KeyPoint
Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
{
if( detectorType.find("Grid") == 0 )
{
return makePtr<GridAdaptedFeatureDetector>(FeatureDetector::create(
detectorType.substr(strlen("Grid"))));
}
if( detectorType.find("Pyramid") == 0 )
{
return makePtr<PyramidAdaptedFeatureDetector>(FeatureDetector::create(
detectorType.substr(strlen("Pyramid"))));
}
if( detectorType.find("Dynamic") == 0 )
{
return makePtr<DynamicAdaptedFeatureDetector>(AdjusterAdapter::create(
detectorType.substr(strlen("Dynamic"))));
}
if( detectorType.compare( "HARRIS" ) == 0 )
{
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
@ -176,212 +158,4 @@ void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoin
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
* DenseFeatureDetector
*/
DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
float _featureScaleMul, int _initXyStep,
int _initImgBound, bool _varyXyStepWithScale,
bool _varyImgBoundWithScale ) :
initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
{}
void DenseFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
float curScale = static_cast<float>(initFeatureScale);
int curStep = initXyStep;
int curBound = initImgBound;
for( int curLevel = 0; curLevel < featureScaleLevels; curLevel++ )
{
for( int x = curBound; x < image.cols - curBound; x += curStep )
{
for( int y = curBound; y < image.rows - curBound; y += curStep )
{
keypoints.push_back( KeyPoint(static_cast<float>(x), static_cast<float>(y), curScale) );
}
}
curScale = static_cast<float>(curScale * featureScaleMul);
if( varyXyStepWithScale ) curStep = static_cast<int>( curStep * featureScaleMul + 0.5f );
if( varyImgBoundWithScale ) curBound = static_cast<int>( curBound * featureScaleMul + 0.5f );
}
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
/*
* GridAdaptedFeatureDetector
*/
GridAdaptedFeatureDetector::GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& _detector,
int _maxTotalKeypoints, int _gridRows, int _gridCols )
: detector(_detector), maxTotalKeypoints(_maxTotalKeypoints), gridRows(_gridRows), gridCols(_gridCols)
{}
bool GridAdaptedFeatureDetector::empty() const
{
return !detector || detector->empty();
}
struct ResponseComparator
{
bool operator() (const KeyPoint& a, const KeyPoint& b)
{
return std::abs(a.response) > std::abs(b.response);
}
};
static void keepStrongest( int N, std::vector<KeyPoint>& keypoints )
{
if( (int)keypoints.size() > N )
{
std::vector<KeyPoint>::iterator nth = keypoints.begin() + N;
std::nth_element( keypoints.begin(), nth, keypoints.end(), ResponseComparator() );
keypoints.erase( nth, keypoints.end() );
}
}
namespace {
class GridAdaptedFeatureDetectorInvoker : public ParallelLoopBody
{
private:
int gridRows_, gridCols_;
int maxPerCell_;
std::vector<KeyPoint>& keypoints_;
const Mat& image_;
const Mat& mask_;
const Ptr<FeatureDetector>& detector_;
Mutex* kptLock_;
GridAdaptedFeatureDetectorInvoker& operator=(const GridAdaptedFeatureDetectorInvoker&); // to quiet MSVC
public:
GridAdaptedFeatureDetectorInvoker(const Ptr<FeatureDetector>& detector, const Mat& image, const Mat& mask,
std::vector<KeyPoint>& keypoints, int maxPerCell, int gridRows, int gridCols,
cv::Mutex* kptLock)
: gridRows_(gridRows), gridCols_(gridCols), maxPerCell_(maxPerCell),
keypoints_(keypoints), image_(image), mask_(mask), detector_(detector),
kptLock_(kptLock)
{
}
void operator() (const Range& range) const
{
for (int i = range.start; i < range.end; ++i)
{
int celly = i / gridCols_;
int cellx = i - celly * gridCols_;
Range row_range((celly*image_.rows)/gridRows_, ((celly+1)*image_.rows)/gridRows_);
Range col_range((cellx*image_.cols)/gridCols_, ((cellx+1)*image_.cols)/gridCols_);
Mat sub_image = image_(row_range, col_range);
Mat sub_mask;
if (!mask_.empty()) sub_mask = mask_(row_range, col_range);
std::vector<KeyPoint> sub_keypoints;
sub_keypoints.reserve(maxPerCell_);
detector_->detect( sub_image, sub_keypoints, sub_mask );
keepStrongest( maxPerCell_, sub_keypoints );
std::vector<cv::KeyPoint>::iterator it = sub_keypoints.begin(),
end = sub_keypoints.end();
for( ; it != end; ++it )
{
it->pt.x += col_range.start;
it->pt.y += row_range.start;
}
cv::AutoLock join_keypoints(*kptLock_);
keypoints_.insert( keypoints_.end(), sub_keypoints.begin(), sub_keypoints.end() );
}
}
};
} // namepace
void GridAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
if (_image.empty() || maxTotalKeypoints < gridRows * gridCols)
{
keypoints.clear();
return;
}
keypoints.reserve(maxTotalKeypoints);
int maxPerCell = maxTotalKeypoints / (gridRows * gridCols);
Mat image = _image.getMat(), mask = _mask.getMat();
cv::Mutex kptLock;
cv::parallel_for_(cv::Range(0, gridRows * gridCols),
GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols, &kptLock));
}
/*
* PyramidAdaptedFeatureDetector
*/
PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& _detector, int _maxLevel )
: detector(_detector), maxLevel(_maxLevel)
{}
bool PyramidAdaptedFeatureDetector::empty() const
{
return !detector || detector->empty();
}
void PyramidAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
Mat src = image;
Mat src_mask = mask;
Mat dilated_mask;
if( !mask.empty() )
{
dilate( mask, dilated_mask, Mat() );
Mat mask255( mask.size(), CV_8UC1, Scalar(0) );
mask255.setTo( Scalar(255), dilated_mask != 0 );
dilated_mask = mask255;
}
for( int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2 )
{
// Detect on current level of the pyramid
std::vector<KeyPoint> new_pts;
detector->detect( src, new_pts, src_mask );
std::vector<KeyPoint>::iterator it = new_pts.begin(),
end = new_pts.end();
for( ; it != end; ++it)
{
it->pt.x *= multiplier;
it->pt.y *= multiplier;
it->size *= multiplier;
it->octave = l;
}
keypoints.insert( keypoints.end(), new_pts.begin(), new_pts.end() );
// Downsample
if( l < maxLevel )
{
Mat dst;
pyrDown( src, dst );
src = dst;
if( !mask.empty() )
resize( dilated_mask, src_mask, src.size(), 0, 0, INTER_AREA );
}
}
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
}

View File

@ -44,181 +44,4 @@
namespace cv
{
DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector(const Ptr<AdjusterAdapter>& a,
int min_features, int max_features, int max_iters ) :
escape_iters_(max_iters), min_features_(min_features), max_features_(max_features), adjuster_(a)
{}
bool DynamicAdaptedFeatureDetector::empty() const
{
return !adjuster_ || adjuster_->empty();
}
void DynamicAdaptedFeatureDetector::detectImpl(InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
//for oscillation testing
bool down = false;
bool up = false;
//flag for whether the correct threshhold has been reached
bool thresh_good = false;
Ptr<AdjusterAdapter> adjuster = adjuster_->clone();
//break if the desired number hasn't been reached.
int iter_count = escape_iters_;
while( iter_count > 0 && !(down && up) && !thresh_good && adjuster->good() )
{
keypoints.clear();
//the adjuster takes care of calling the detector with updated parameters
adjuster->detect(image, keypoints,mask);
if( int(keypoints.size()) < min_features_ )
{
down = true;
adjuster->tooFew(min_features_, (int)keypoints.size());
}
else if( int(keypoints.size()) > max_features_ )
{
up = true;
adjuster->tooMany(max_features_, (int)keypoints.size());
}
else
thresh_good = true;
iter_count--;
}
}
FastAdjuster::FastAdjuster( int init_thresh, bool nonmax, int min_thresh, int max_thresh ) :
thresh_(init_thresh), nonmax_(nonmax), init_thresh_(init_thresh),
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void FastAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
FastFeatureDetector(thresh_, nonmax_).detect(image, keypoints, mask);
}
void FastAdjuster::tooFew(int, int)
{
//fast is easy to adjust
thresh_--;
}
void FastAdjuster::tooMany(int, int)
{
//fast is easy to adjust
thresh_++;
}
//return whether or not the threshhold is beyond
//a useful point
bool FastAdjuster::good() const
{
return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
}
Ptr<AdjusterAdapter> FastAdjuster::clone() const
{
Ptr<AdjusterAdapter> cloned_obj(new FastAdjuster( init_thresh_, nonmax_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
StarAdjuster::StarAdjuster(double initial_thresh, double min_thresh, double max_thresh) :
thresh_(initial_thresh), init_thresh_(initial_thresh),
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void StarAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
StarFeatureDetector detector_tmp(16, cvRound(thresh_), 10, 8, 3);
detector_tmp.detect(image, keypoints, mask);
}
void StarAdjuster::tooFew(int, int)
{
thresh_ *= 0.9;
if (thresh_ < 1.1)
thresh_ = 1.1;
}
void StarAdjuster::tooMany(int, int)
{
thresh_ *= 1.1;
}
bool StarAdjuster::good() const
{
return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
}
Ptr<AdjusterAdapter> StarAdjuster::clone() const
{
Ptr<AdjusterAdapter> cloned_obj(new StarAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
SurfAdjuster::SurfAdjuster( double initial_thresh, double min_thresh, double max_thresh ) :
thresh_(initial_thresh), init_thresh_(initial_thresh),
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void SurfAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
Ptr<FeatureDetector> surf = FeatureDetector::create("SURF");
surf->set("hessianThreshold", thresh_);
surf->detect(image, keypoints, mask);
}
void SurfAdjuster::tooFew(int, int)
{
thresh_ *= 0.9;
if (thresh_ < 1.1)
thresh_ = 1.1;
}
void SurfAdjuster::tooMany(int, int)
{
thresh_ *= 1.1;
}
//return whether or not the threshhold is beyond
//a useful point
bool SurfAdjuster::good() const
{
return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
}
Ptr<AdjusterAdapter> SurfAdjuster::clone() const
{
Ptr<AdjusterAdapter> cloned_obj(new SurfAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
Ptr<AdjusterAdapter> AdjusterAdapter::create( const String& detectorType )
{
Ptr<AdjusterAdapter> adapter;
if( !detectorType.compare( "FAST" ) )
{
adapter = makePtr<FastAdjuster>();
}
else if( !detectorType.compare( "STAR" ) )
{
adapter = makePtr<StarAdjuster>();
}
else if( !detectorType.compare( "SURF" ) )
{
adapter = makePtr<SurfAdjuster>();
}
return adapter;
}
}

View File

@ -556,56 +556,3 @@ int cv::getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float
return nearestPointIndex;
}
void cv::evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2,
std::vector<KeyPoint>& keypoints1, std::vector<KeyPoint>& keypoints2,
std::vector<std::vector<DMatch> >* _matches1to2, std::vector<std::vector<uchar> >* _correctMatches1to2Mask,
std::vector<Point2f>& recallPrecisionCurve,
const Ptr<GenericDescriptorMatcher>& _dmatcher )
{
Ptr<GenericDescriptorMatcher> dmatcher = _dmatcher;
dmatcher->clear();
std::vector<std::vector<DMatch> > *matches1to2, buf1;
matches1to2 = _matches1to2 != 0 ? _matches1to2 : &buf1;
std::vector<std::vector<uchar> > *correctMatches1to2Mask, buf2;
correctMatches1to2Mask = _correctMatches1to2Mask != 0 ? _correctMatches1to2Mask : &buf2;
if( keypoints1.empty() )
CV_Error( Error::StsBadArg, "keypoints1 must not be empty" );
if( matches1to2->empty() && !dmatcher )
CV_Error( Error::StsBadArg, "dmatch must not be empty when matches1to2 is empty" );
bool computeKeypoints2ByPrj = keypoints2.empty();
if( computeKeypoints2ByPrj )
{
CV_Error(Error::StsNotImplemented, "");
// TODO: add computing keypoints2 from keypoints1 using H1to2
}
if( matches1to2->empty() || computeKeypoints2ByPrj )
{
dmatcher->clear();
dmatcher->radiusMatch( img1, keypoints1, img2, keypoints2, *matches1to2, std::numeric_limits<float>::max() );
}
float repeatability;
int correspCount;
Mat thresholdedOverlapMask; // thresholded allOverlapErrors
calculateRepeatability( img1, img2, H1to2, keypoints1, keypoints2, repeatability, correspCount, &thresholdedOverlapMask );
correctMatches1to2Mask->resize(matches1to2->size());
for( size_t i = 0; i < matches1to2->size(); i++ )
{
(*correctMatches1to2Mask)[i].resize((*matches1to2)[i].size());
for( size_t j = 0;j < (*matches1to2)[i].size(); j++ )
{
int indexQuery = (*matches1to2)[i][j].queryIdx;
int indexTrain = (*matches1to2)[i][j].trainIdx;
(*correctMatches1to2Mask)[i][j] = thresholdedOverlapMask.at<uchar>( indexQuery, indexTrain );
}
}
computeRecallPrecisionCurve( *matches1to2, *correctMatches1to2Mask, recallPrecisionCurve );
}

View File

@ -62,24 +62,11 @@ CV_INIT_ALGORITHM(BRISK, "Feature2D.BRISK",
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BriefDescriptorExtractor, "Feature2D.BRIEF",
obj.info()->addParam(obj, "bytes", obj.bytes_))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
obj.info()->addParam(obj, "threshold", obj.threshold);
obj.info()->addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
obj.info()->addParam(obj, "type", obj.type))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(StarDetector, "Feature2D.STAR",
obj.info()->addParam(obj, "maxSize", obj.maxSize);
obj.info()->addParam(obj, "responseThreshold", obj.responseThreshold);
obj.info()->addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
obj.info()->addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -108,14 +95,6 @@ CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(FREAK, "Feature2D.FREAK",
obj.info()->addParam(obj, "orientationNormalized", obj.orientationNormalized);
obj.info()->addParam(obj, "scaleNormalized", obj.scaleNormalized);
obj.info()->addParam(obj, "patternScale", obj.patternScale);
obj.info()->addParam(obj, "nbOctave", obj.nOctaves))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
@ -181,23 +160,6 @@ CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
////////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
obj.info()->addParam(obj, "initFeatureScale", obj.initFeatureScale);
obj.info()->addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
obj.info()->addParam(obj, "featureScaleMul", obj.featureScaleMul);
obj.info()->addParam(obj, "initXyStep", obj.initXyStep);
obj.info()->addParam(obj, "initImgBound", obj.initImgBound);
obj.info()->addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale))
CV_INIT_ALGORITHM(GridAdaptedFeatureDetector, "Feature2D.Grid",
obj.info()->addParam<FeatureDetector>(obj, "detector", obj.detector, false, 0, 0); // Extra params added to avoid VS2013 fatal error in opencv2/core.hpp (decl. of addParam)
obj.info()->addParam(obj, "maxTotalKeypoints", obj.maxTotalKeypoints);
obj.info()->addParam(obj, "gridRows", obj.gridRows);
obj.info()->addParam(obj, "gridCols", obj.gridCols))
////////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BFMatcher, "DescriptorMatcher.BFMatcher",
obj.info()->addParam(obj, "normType", obj.normType);
obj.info()->addParam(obj, "crossCheck", obj.crossCheck))
@ -209,19 +171,14 @@ CV_INIT_ALGORITHM(FlannBasedMatcher, "DescriptorMatcher.FlannBasedMatcher",)
bool cv::initModule_features2d(void)
{
bool all = true;
all &= !BriefDescriptorExtractor_info_auto.name().empty();
all &= !BRISK_info_auto.name().empty();
all &= !FastFeatureDetector_info_auto.name().empty();
all &= !StarDetector_info_auto.name().empty();
all &= !MSER_info_auto.name().empty();
all &= !FREAK_info_auto.name().empty();
all &= !ORB_info_auto.name().empty();
all &= !GFTTDetector_info_auto.name().empty();
all &= !KAZE_info_auto.name().empty();
all &= !AKAZE_info_auto.name().empty();
all &= !HarrisDetector_info_auto.name().empty();
all &= !DenseFeatureDetector_info_auto.name().empty();
all &= !GridAdaptedFeatureDetector_info_auto.name().empty();
all &= !HarrisDetector_info_auto.name().empty();
all &= !BFMatcher_info_auto.name().empty();
all &= !FlannBasedMatcher_info_auto.name().empty();

View File

@ -1,733 +0,0 @@
// freak.cpp
//
// Copyright (C) 2011-2012 Signal processing laboratory 2, EPFL,
// Kirell Benzi (kirell.benzi@epfl.ch),
// Raphael Ortiz (raphael.ortiz@a3.epfl.ch)
// Alexandre Alahi (alexandre.alahi@epfl.ch)
// and Pierre Vandergheynst (pierre.vandergheynst@epfl.ch)
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
#include "precomp.hpp"
#include <fstream>
#include <stdlib.h>
#include <algorithm>
#include <iostream>
#include <bitset>
#include <sstream>
#include <algorithm>
#include <iomanip>
#include <string.h>
namespace cv
{
static const double FREAK_SQRT2 = 1.4142135623731;
static const double FREAK_LOG2 = 0.693147180559945;
static const int FREAK_NB_ORIENTATION = 256;
static const int FREAK_NB_POINTS = 43;
static const int FREAK_SMALLEST_KP_SIZE = 7; // smallest size of keypoints
static const int FREAK_NB_SCALES = FREAK::NB_SCALES;
static const int FREAK_NB_PAIRS = FREAK::NB_PAIRS;
static const int FREAK_NB_ORIENPAIRS = FREAK::NB_ORIENPAIRS;
// default pairs
static const int FREAK_DEF_PAIRS[FREAK::NB_PAIRS] =
{
404,431,818,511,181,52,311,874,774,543,719,230,417,205,11,
560,149,265,39,306,165,857,250,8,61,15,55,717,44,412,
592,134,761,695,660,782,625,487,549,516,271,665,762,392,178,
796,773,31,672,845,548,794,677,654,241,831,225,238,849,83,
691,484,826,707,122,517,583,731,328,339,571,475,394,472,580,
381,137,93,380,327,619,729,808,218,213,459,141,806,341,95,
382,568,124,750,193,749,706,843,79,199,317,329,768,198,100,
466,613,78,562,783,689,136,838,94,142,164,679,219,419,366,
418,423,77,89,523,259,683,312,555,20,470,684,123,458,453,833,
72,113,253,108,313,25,153,648,411,607,618,128,305,232,301,84,
56,264,371,46,407,360,38,99,176,710,114,578,66,372,653,
129,359,424,159,821,10,323,393,5,340,891,9,790,47,0,175,346,
236,26,172,147,574,561,32,294,429,724,755,398,787,288,299,
769,565,767,722,757,224,465,723,498,467,235,127,802,446,233,
544,482,800,318,16,532,801,441,554,173,60,530,713,469,30,
212,630,899,170,266,799,88,49,512,399,23,500,107,524,90,
194,143,135,192,206,345,148,71,119,101,563,870,158,254,214,
276,464,332,725,188,385,24,476,40,231,620,171,258,67,109,
844,244,187,388,701,690,50,7,850,479,48,522,22,154,12,659,
736,655,577,737,830,811,174,21,237,335,353,234,53,270,62,
182,45,177,245,812,673,355,556,612,166,204,54,248,365,226,
242,452,700,685,573,14,842,481,468,781,564,416,179,405,35,
819,608,624,367,98,643,448,2,460,676,440,240,130,146,184,
185,430,65,807,377,82,121,708,239,310,138,596,730,575,477,
851,797,247,27,85,586,307,779,326,494,856,324,827,96,748,
13,397,125,688,702,92,293,716,277,140,112,4,80,855,839,1,
413,347,584,493,289,696,19,751,379,76,73,115,6,590,183,734,
197,483,217,344,330,400,186,243,587,220,780,200,793,246,824,
41,735,579,81,703,322,760,720,139,480,490,91,814,813,163,
152,488,763,263,425,410,576,120,319,668,150,160,302,491,515,
260,145,428,97,251,395,272,252,18,106,358,854,485,144,550,
131,133,378,68,102,104,58,361,275,209,697,582,338,742,589,
325,408,229,28,304,191,189,110,126,486,211,547,533,70,215,
670,249,36,581,389,605,331,518,442,822
};
// used to sort pairs during pairs selection
struct PairStat
{
double mean;
int idx;
};
struct sortMean
{
bool operator()( const PairStat& a, const PairStat& b ) const
{
return a.mean < b.mean;
}
};
void FREAK::buildPattern()
{
if( patternScale == patternScale0 && nOctaves == nOctaves0 && !patternLookup.empty() )
return;
nOctaves0 = nOctaves;
patternScale0 = patternScale;
patternLookup.resize(FREAK_NB_SCALES*FREAK_NB_ORIENTATION*FREAK_NB_POINTS);
double scaleStep = std::pow(2.0, (double)(nOctaves)/FREAK_NB_SCALES ); // 2 ^ ( (nOctaves-1) /nbScales)
double scalingFactor, alpha, beta, theta = 0;
// pattern definition, radius normalized to 1.0 (outer point position+sigma=1.0)
const int n[8] = {6,6,6,6,6,6,6,1}; // number of points on each concentric circle (from outer to inner)
const double bigR(2.0/3.0); // bigger radius
const double smallR(2.0/24.0); // smaller radius
const double unitSpace( (bigR-smallR)/21.0 ); // define spaces between concentric circles (from center to outer: 1,2,3,4,5,6)
// radii of the concentric cirles (from outer to inner)
const double radius[8] = {bigR, bigR-6*unitSpace, bigR-11*unitSpace, bigR-15*unitSpace, bigR-18*unitSpace, bigR-20*unitSpace, smallR, 0.0};
// sigma of pattern points (each group of 6 points on a concentric cirle has the same sigma)
const double sigma[8] = {radius[0]/2.0, radius[1]/2.0, radius[2]/2.0,
radius[3]/2.0, radius[4]/2.0, radius[5]/2.0,
radius[6]/2.0, radius[6]/2.0
};
// fill the lookup table
for( int scaleIdx=0; scaleIdx < FREAK_NB_SCALES; ++scaleIdx )
{
patternSizes[scaleIdx] = 0; // proper initialization
scalingFactor = std::pow(scaleStep,scaleIdx); //scale of the pattern, scaleStep ^ scaleIdx
for( int orientationIdx = 0; orientationIdx < FREAK_NB_ORIENTATION; ++orientationIdx )
{
theta = double(orientationIdx)* 2*CV_PI/double(FREAK_NB_ORIENTATION); // orientation of the pattern
int pointIdx = 0;
PatternPoint* patternLookupPtr = &patternLookup[0];
for( size_t i = 0; i < 8; ++i )
{
for( int k = 0 ; k < n[i]; ++k )
{
beta = CV_PI/n[i] * (i%2); // orientation offset so that groups of points on each circles are staggered
alpha = double(k)* 2*CV_PI/double(n[i])+beta+theta;
// add the point to the look-up table
PatternPoint& point = patternLookupPtr[ scaleIdx*FREAK_NB_ORIENTATION*FREAK_NB_POINTS+orientationIdx*FREAK_NB_POINTS+pointIdx ];
point.x = static_cast<float>(radius[i] * cos(alpha) * scalingFactor * patternScale);
point.y = static_cast<float>(radius[i] * sin(alpha) * scalingFactor * patternScale);
point.sigma = static_cast<float>(sigma[i] * scalingFactor * patternScale);
// adapt the sizeList if necessary
const int sizeMax = static_cast<int>(ceil((radius[i]+sigma[i])*scalingFactor*patternScale)) + 1;
if( patternSizes[scaleIdx] < sizeMax )
patternSizes[scaleIdx] = sizeMax;
++pointIdx;
}
}
}
}
// build the list of orientation pairs
orientationPairs[0].i=0; orientationPairs[0].j=3; orientationPairs[1].i=1; orientationPairs[1].j=4; orientationPairs[2].i=2; orientationPairs[2].j=5;
orientationPairs[3].i=0; orientationPairs[3].j=2; orientationPairs[4].i=1; orientationPairs[4].j=3; orientationPairs[5].i=2; orientationPairs[5].j=4;
orientationPairs[6].i=3; orientationPairs[6].j=5; orientationPairs[7].i=4; orientationPairs[7].j=0; orientationPairs[8].i=5; orientationPairs[8].j=1;
orientationPairs[9].i=6; orientationPairs[9].j=9; orientationPairs[10].i=7; orientationPairs[10].j=10; orientationPairs[11].i=8; orientationPairs[11].j=11;
orientationPairs[12].i=6; orientationPairs[12].j=8; orientationPairs[13].i=7; orientationPairs[13].j=9; orientationPairs[14].i=8; orientationPairs[14].j=10;
orientationPairs[15].i=9; orientationPairs[15].j=11; orientationPairs[16].i=10; orientationPairs[16].j=6; orientationPairs[17].i=11; orientationPairs[17].j=7;
orientationPairs[18].i=12; orientationPairs[18].j=15; orientationPairs[19].i=13; orientationPairs[19].j=16; orientationPairs[20].i=14; orientationPairs[20].j=17;
orientationPairs[21].i=12; orientationPairs[21].j=14; orientationPairs[22].i=13; orientationPairs[22].j=15; orientationPairs[23].i=14; orientationPairs[23].j=16;
orientationPairs[24].i=15; orientationPairs[24].j=17; orientationPairs[25].i=16; orientationPairs[25].j=12; orientationPairs[26].i=17; orientationPairs[26].j=13;
orientationPairs[27].i=18; orientationPairs[27].j=21; orientationPairs[28].i=19; orientationPairs[28].j=22; orientationPairs[29].i=20; orientationPairs[29].j=23;
orientationPairs[30].i=18; orientationPairs[30].j=20; orientationPairs[31].i=19; orientationPairs[31].j=21; orientationPairs[32].i=20; orientationPairs[32].j=22;
orientationPairs[33].i=21; orientationPairs[33].j=23; orientationPairs[34].i=22; orientationPairs[34].j=18; orientationPairs[35].i=23; orientationPairs[35].j=19;
orientationPairs[36].i=24; orientationPairs[36].j=27; orientationPairs[37].i=25; orientationPairs[37].j=28; orientationPairs[38].i=26; orientationPairs[38].j=29;
orientationPairs[39].i=30; orientationPairs[39].j=33; orientationPairs[40].i=31; orientationPairs[40].j=34; orientationPairs[41].i=32; orientationPairs[41].j=35;
orientationPairs[42].i=36; orientationPairs[42].j=39; orientationPairs[43].i=37; orientationPairs[43].j=40; orientationPairs[44].i=38; orientationPairs[44].j=41;
for( unsigned m = FREAK_NB_ORIENPAIRS; m--; )
{
const float dx = patternLookup[orientationPairs[m].i].x-patternLookup[orientationPairs[m].j].x;
const float dy = patternLookup[orientationPairs[m].i].y-patternLookup[orientationPairs[m].j].y;
const float norm_sq = (dx*dx+dy*dy);
orientationPairs[m].weight_dx = int((dx/(norm_sq))*4096.0+0.5);
orientationPairs[m].weight_dy = int((dy/(norm_sq))*4096.0+0.5);
}
// build the list of description pairs
std::vector<DescriptionPair> allPairs;
for( unsigned int i = 1; i < (unsigned int)FREAK_NB_POINTS; ++i )
{
// (generate all the pairs)
for( unsigned int j = 0; (unsigned int)j < i; ++j )
{
DescriptionPair pair = {(uchar)i,(uchar)j};
allPairs.push_back(pair);
}
}
// Input vector provided
if( !selectedPairs0.empty() )
{
if( (int)selectedPairs0.size() == FREAK_NB_PAIRS )
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
descriptionPairs[i] = allPairs[selectedPairs0.at(i)];
}
else
{
CV_Error(Error::StsVecLengthErr, "Input vector does not match the required size");
}
}
else // default selected pairs
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
descriptionPairs[i] = allPairs[FREAK_DEF_PAIRS[i]];
}
}
void FREAK::computeImpl( InputArray _image, std::vector<KeyPoint>& keypoints, OutputArray _descriptors ) const
{
Mat image = _image.getMat();
if( image.empty() )
return;
if( keypoints.empty() )
return;
((FREAK*)this)->buildPattern();
// Convert to gray if not already
Mat grayImage = image;
// if( image.channels() > 1 )
// cvtColor( image, grayImage, COLOR_BGR2GRAY );
// Use 32-bit integers if we won't overflow in the integral image
if ((image.depth() == CV_8U || image.depth() == CV_8S) &&
(image.rows * image.cols) < 8388608 ) // 8388608 = 2 ^ (32 - 8(bit depth) - 1(sign bit))
{
// Create the integral image appropriate for our type & usage
if (image.depth() == CV_8U)
computeDescriptors<uchar, int>(grayImage, keypoints, _descriptors);
else if (image.depth() == CV_8S)
computeDescriptors<char, int>(grayImage, keypoints, _descriptors);
else
CV_Error( Error::StsUnsupportedFormat, "" );
} else {
// Create the integral image appropriate for our type & usage
if ( image.depth() == CV_8U )
computeDescriptors<uchar, double>(grayImage, keypoints, _descriptors);
else if ( image.depth() == CV_8S )
computeDescriptors<char, double>(grayImage, keypoints, _descriptors);
else if ( image.depth() == CV_16U )
computeDescriptors<ushort, double>(grayImage, keypoints, _descriptors);
else if ( image.depth() == CV_16S )
computeDescriptors<short, double>(grayImage, keypoints, _descriptors);
else
CV_Error( Error::StsUnsupportedFormat, "" );
}
}
template <typename srcMatType>
void FREAK::extractDescriptor(srcMatType *pointsValue, void ** ptr) const
{
std::bitset<FREAK_NB_PAIRS>** ptrScalar = (std::bitset<FREAK_NB_PAIRS>**) ptr;
// extracting descriptor preserving the order of SSE version
int cnt = 0;
for( int n = 7; n < FREAK_NB_PAIRS; n += 128)
{
for( int m = 8; m--; )
{
int nm = n-m;
for(int kk = nm+15*8; kk >= nm; kk-=8, ++cnt)
{
(*ptrScalar)->set(kk, pointsValue[descriptionPairs[cnt].i] >= pointsValue[descriptionPairs[cnt].j]);
}
}
}
--(*ptrScalar);
}
#if CV_SSE2
template <>
void FREAK::extractDescriptor(uchar *pointsValue, void ** ptr) const
{
__m128i** ptrSSE = (__m128i**) ptr;
// note that comparisons order is modified in each block (but first 128 comparisons remain globally the same-->does not affect the 128,384 bits segmanted matching strategy)
int cnt = 0;
for( int n = FREAK_NB_PAIRS/128; n-- ; )
{
__m128i result128 = _mm_setzero_si128();
for( int m = 128/16; m--; cnt += 16 )
{
__m128i operand1 = _mm_set_epi8(pointsValue[descriptionPairs[cnt+0].i],
pointsValue[descriptionPairs[cnt+1].i],
pointsValue[descriptionPairs[cnt+2].i],
pointsValue[descriptionPairs[cnt+3].i],
pointsValue[descriptionPairs[cnt+4].i],
pointsValue[descriptionPairs[cnt+5].i],
pointsValue[descriptionPairs[cnt+6].i],
pointsValue[descriptionPairs[cnt+7].i],
pointsValue[descriptionPairs[cnt+8].i],
pointsValue[descriptionPairs[cnt+9].i],
pointsValue[descriptionPairs[cnt+10].i],
pointsValue[descriptionPairs[cnt+11].i],
pointsValue[descriptionPairs[cnt+12].i],
pointsValue[descriptionPairs[cnt+13].i],
pointsValue[descriptionPairs[cnt+14].i],
pointsValue[descriptionPairs[cnt+15].i]);
__m128i operand2 = _mm_set_epi8(pointsValue[descriptionPairs[cnt+0].j],
pointsValue[descriptionPairs[cnt+1].j],
pointsValue[descriptionPairs[cnt+2].j],
pointsValue[descriptionPairs[cnt+3].j],
pointsValue[descriptionPairs[cnt+4].j],
pointsValue[descriptionPairs[cnt+5].j],
pointsValue[descriptionPairs[cnt+6].j],
pointsValue[descriptionPairs[cnt+7].j],
pointsValue[descriptionPairs[cnt+8].j],
pointsValue[descriptionPairs[cnt+9].j],
pointsValue[descriptionPairs[cnt+10].j],
pointsValue[descriptionPairs[cnt+11].j],
pointsValue[descriptionPairs[cnt+12].j],
pointsValue[descriptionPairs[cnt+13].j],
pointsValue[descriptionPairs[cnt+14].j],
pointsValue[descriptionPairs[cnt+15].j]);
__m128i workReg = _mm_min_epu8(operand1, operand2); // emulated "not less than" for 8-bit UNSIGNED integers
workReg = _mm_cmpeq_epi8(workReg, operand2); // emulated "not less than" for 8-bit UNSIGNED integers
workReg = _mm_and_si128(_mm_set1_epi16(short(0x8080 >> m)), workReg); // merge the last 16 bits with the 128bits std::vector until full
result128 = _mm_or_si128(result128, workReg);
}
(**ptrSSE) = result128;
++(*ptrSSE);
}
(*ptrSSE) -= 8;
}
#endif
template <typename srcMatType, typename iiMatType>
void FREAK::computeDescriptors( InputArray _image, std::vector<KeyPoint>& keypoints, OutputArray _descriptors ) const {
Mat image = _image.getMat();
Mat imgIntegral;
integral(image, imgIntegral, DataType<iiMatType>::type);
std::vector<int> kpScaleIdx(keypoints.size()); // used to save pattern scale index corresponding to each keypoints
const std::vector<int>::iterator ScaleIdxBegin = kpScaleIdx.begin(); // used in std::vector erase function
const std::vector<cv::KeyPoint>::iterator kpBegin = keypoints.begin(); // used in std::vector erase function
const float sizeCst = static_cast<float>(FREAK_NB_SCALES/(FREAK_LOG2* nOctaves));
srcMatType pointsValue[FREAK_NB_POINTS];
int thetaIdx = 0;
int direction0;
int direction1;
// compute the scale index corresponding to the keypoint size and remove keypoints close to the border
if( scaleNormalized )
{
for( size_t k = keypoints.size(); k--; )
{
//Is k non-zero? If so, decrement it and continue"
kpScaleIdx[k] = std::max( (int)(std::log(keypoints[k].size/FREAK_SMALLEST_KP_SIZE)*sizeCst+0.5) ,0);
if( kpScaleIdx[k] >= FREAK_NB_SCALES )
kpScaleIdx[k] = FREAK_NB_SCALES-1;
if( keypoints[k].pt.x <= patternSizes[kpScaleIdx[k]] || //check if the description at this specific position and scale fits inside the image
keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.x >= image.cols-patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.y >= image.rows-patternSizes[kpScaleIdx[k]]
)
{
keypoints.erase(kpBegin+k);
kpScaleIdx.erase(ScaleIdxBegin+k);
}
}
}
else
{
const int scIdx = std::max( (int)(1.0986122886681*sizeCst+0.5) ,0);
for( size_t k = keypoints.size(); k--; )
{
kpScaleIdx[k] = scIdx; // equivalent to the formule when the scale is normalized with a constant size of keypoints[k].size=3*SMALLEST_KP_SIZE
if( kpScaleIdx[k] >= FREAK_NB_SCALES )
{
kpScaleIdx[k] = FREAK_NB_SCALES-1;
}
if( keypoints[k].pt.x <= patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.x >= image.cols-patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.y >= image.rows-patternSizes[kpScaleIdx[k]]
)
{
keypoints.erase(kpBegin+k);
kpScaleIdx.erase(ScaleIdxBegin+k);
}
}
}
// allocate descriptor memory, estimate orientations, extract descriptors
if( !extAll )
{
// extract the best comparisons only
_descriptors.create((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
_descriptors.setTo(Scalar::all(0));
Mat descriptors = _descriptors.getMat();
void *ptr = descriptors.data+(keypoints.size()-1)*descriptors.step[0];
for( size_t k = keypoints.size(); k--; ) {
// estimate orientation (gradient)
if( !orientationNormalized )
{
thetaIdx = 0; // assign 0° to all keypoints
keypoints[k].angle = 0.0;
}
else
{
// get the points intensity value in the un-rotated pattern
for( int i = FREAK_NB_POINTS; i--; ) {
pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
keypoints[k].pt.x, keypoints[k].pt.y,
kpScaleIdx[k], 0, i);
}
direction0 = 0;
direction1 = 0;
for( int m = 45; m--; )
{
//iterate through the orientation pairs
const int delta = (pointsValue[ orientationPairs[m].i ]-pointsValue[ orientationPairs[m].j ]);
direction0 += delta*(orientationPairs[m].weight_dx)/2048;
direction1 += delta*(orientationPairs[m].weight_dy)/2048;
}
keypoints[k].angle = static_cast<float>(atan2((float)direction1,(float)direction0)*(180.0/CV_PI));//estimate orientation
thetaIdx = int(FREAK_NB_ORIENTATION*keypoints[k].angle*(1/360.0)+0.5);
if( thetaIdx < 0 )
thetaIdx += FREAK_NB_ORIENTATION;
if( thetaIdx >= FREAK_NB_ORIENTATION )
thetaIdx -= FREAK_NB_ORIENTATION;
}
// extract descriptor at the computed orientation
for( int i = FREAK_NB_POINTS; i--; ) {
pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
keypoints[k].pt.x, keypoints[k].pt.y,
kpScaleIdx[k], thetaIdx, i);
}
// Extract descriptor
extractDescriptor<srcMatType>(pointsValue, &ptr);
}
}
else // extract all possible comparisons for selection
{
_descriptors.create((int)keypoints.size(), 128, CV_8U);
_descriptors.setTo(Scalar::all(0));
Mat descriptors = _descriptors.getMat();
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
for( size_t k = keypoints.size(); k--; )
{
//estimate orientation (gradient)
if( !orientationNormalized )
{
thetaIdx = 0;//assign 0° to all keypoints
keypoints[k].angle = 0.0;
}
else
{
//get the points intensity value in the un-rotated pattern
for( int i = FREAK_NB_POINTS;i--; )
pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
keypoints[k].pt.x,keypoints[k].pt.y,
kpScaleIdx[k], 0, i);
direction0 = 0;
direction1 = 0;
for( int m = 45; m--; )
{
//iterate through the orientation pairs
const int delta = (pointsValue[ orientationPairs[m].i ]-pointsValue[ orientationPairs[m].j ]);
direction0 += delta*(orientationPairs[m].weight_dx)/2048;
direction1 += delta*(orientationPairs[m].weight_dy)/2048;
}
keypoints[k].angle = static_cast<float>(atan2((float)direction1,(float)direction0)*(180.0/CV_PI)); //estimate orientation
thetaIdx = int(FREAK_NB_ORIENTATION*keypoints[k].angle*(1/360.0)+0.5);
if( thetaIdx < 0 )
thetaIdx += FREAK_NB_ORIENTATION;
if( thetaIdx >= FREAK_NB_ORIENTATION )
thetaIdx -= FREAK_NB_ORIENTATION;
}
// get the points intensity value in the rotated pattern
for( int i = FREAK_NB_POINTS; i--; ) {
pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
keypoints[k].pt.x, keypoints[k].pt.y,
kpScaleIdx[k], thetaIdx, i);
}
int cnt(0);
for( int i = 1; i < FREAK_NB_POINTS; ++i )
{
//(generate all the pairs)
for( int j = 0; j < i; ++j )
{
ptr->set(cnt, pointsValue[i] >= pointsValue[j] );
++cnt;
}
}
--ptr;
}
}
}
// simply take average on a square patch, not even gaussian approx
template <typename imgType, typename iiType>
imgType FREAK::meanIntensity( InputArray _image, InputArray _integral,
const float kp_x,
const float kp_y,
const unsigned int scale,
const unsigned int rot,
const unsigned int point) const {
Mat image = _image.getMat(), integral = _integral.getMat();
// get point position in image
const PatternPoint& FreakPoint = patternLookup[scale*FREAK_NB_ORIENTATION*FREAK_NB_POINTS + rot*FREAK_NB_POINTS + point];
const float xf = FreakPoint.x+kp_x;
const float yf = FreakPoint.y+kp_y;
const int x = int(xf);
const int y = int(yf);
// get the sigma:
const float radius = FreakPoint.sigma;
// calculate output:
if( radius < 0.5 )
{
// interpolation multipliers:
const int r_x = static_cast<int>((xf-x)*1024);
const int r_y = static_cast<int>((yf-y)*1024);
const int r_x_1 = (1024-r_x);
const int r_y_1 = (1024-r_y);
unsigned int ret_val;
// linear interpolation:
ret_val = r_x_1*r_y_1*int(image.at<imgType>(y , x ))
+ r_x *r_y_1*int(image.at<imgType>(y , x+1))
+ r_x_1*r_y *int(image.at<imgType>(y+1, x ))
+ r_x *r_y *int(image.at<imgType>(y+1, x+1));
//return the rounded mean
ret_val += 2 * 1024 * 1024;
return static_cast<imgType>(ret_val / (4 * 1024 * 1024));
}
// expected case:
// calculate borders
const int x_left = int(xf-radius+0.5);
const int y_top = int(yf-radius+0.5);
const int x_right = int(xf+radius+1.5);//integral image is 1px wider
const int y_bottom = int(yf+radius+1.5);//integral image is 1px higher
iiType ret_val;
ret_val = integral.at<iiType>(y_bottom,x_right);//bottom right corner
ret_val -= integral.at<iiType>(y_bottom,x_left);
ret_val += integral.at<iiType>(y_top,x_left);
ret_val -= integral.at<iiType>(y_top,x_right);
ret_val = ret_val/( (x_right-x_left)* (y_bottom-y_top) );
//~ std::cout<<integral.step[1]<<std::endl;
return static_cast<imgType>(ret_val);
}
// pair selection algorithm from a set of training images and corresponding keypoints
std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
, std::vector<std::vector<KeyPoint> >& keypoints
, const double corrTresh
, bool verbose )
{
extAll = true;
// compute descriptors with all pairs
Mat descriptors;
if( verbose )
std::cout << "Number of images: " << images.size() << std::endl;
for( size_t i = 0;i < images.size(); ++i )
{
Mat descriptorsTmp;
computeImpl(images[i],keypoints[i],descriptorsTmp);
descriptors.push_back(descriptorsTmp);
}
if( verbose )
std::cout << "number of keypoints: " << descriptors.rows << std::endl;
//descriptor in floating point format (each bit is a float)
Mat descriptorsFloat = Mat::zeros(descriptors.rows, 903, CV_32F);
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(descriptors.rows-1)*descriptors.step[0]);
for( int m = descriptors.rows; m--; )
{
for( int n = 903; n--; )
{
if( ptr->test(n) == true )
descriptorsFloat.at<float>(m,n)=1.0f;
}
--ptr;
}
std::vector<PairStat> pairStat;
for( int n = 903; n--; )
{
// the higher the variance, the better --> mean = 0.5
PairStat tmp = { fabs( mean(descriptorsFloat.col(n))[0]-0.5 ) ,n};
pairStat.push_back(tmp);
}
std::sort( pairStat.begin(),pairStat.end(), sortMean() );
std::vector<PairStat> bestPairs;
for( int m = 0; m < 903; ++m )
{
if( verbose )
std::cout << m << ":" << bestPairs.size() << " " << std::flush;
double corrMax(0);
for( size_t n = 0; n < bestPairs.size(); ++n )
{
int idxA = bestPairs[n].idx;
int idxB = pairStat[m].idx;
double corr(0);
// compute correlation between 2 pairs
corr = fabs(compareHist(descriptorsFloat.col(idxA), descriptorsFloat.col(idxB), HISTCMP_CORREL));
if( corr > corrMax )
{
corrMax = corr;
if( corrMax >= corrTresh )
break;
}
}
if( corrMax < corrTresh/*0.7*/ )
bestPairs.push_back(pairStat[m]);
if( bestPairs.size() >= 512 )
{
if( verbose )
std::cout << m << std::endl;
break;
}
}
std::vector<int> idxBestPairs;
if( (int)bestPairs.size() >= FREAK_NB_PAIRS )
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
idxBestPairs.push_back(bestPairs[i].idx);
}
else
{
if( verbose )
std::cout << "correlation threshold too small (restrictive)" << std::endl;
CV_Error(Error::StsError, "correlation threshold too small (restrictive)");
}
extAll = false;
return idxBestPairs;
}
/*
// create an image showing the brisk pattern
void FREAKImpl::drawPattern()
{
Mat pattern = Mat::zeros(1000, 1000, CV_8UC3) + Scalar(255,255,255);
int sFac = 500 / patternScale;
for( int n = 0; n < kNB_POINTS; ++n )
{
PatternPoint& pt = patternLookup[n];
circle(pattern, Point( pt.x*sFac,pt.y*sFac)+Point(500,500), pt.sigma*sFac, Scalar(0,0,255),2);
// rectangle(pattern, Point( (pt.x-pt.sigma)*sFac,(pt.y-pt.sigma)*sFac)+Point(500,500), Point( (pt.x+pt.sigma)*sFac,(pt.y+pt.sigma)*sFac)+Point(500,500), Scalar(0,0,255),2);
circle(pattern, Point( pt.x*sFac,pt.y*sFac)+Point(500,500), 1, Scalar(0,0,0),3);
std::ostringstream oss;
oss << n;
putText( pattern, oss.str(), Point( pt.x*sFac,pt.y*sFac)+Point(500,500), FONT_HERSHEY_SIMPLEX,0.5, Scalar(0,0,0), 1);
}
imshow( "FreakDescriptorExtractor pattern", pattern );
waitKey(0);
}
*/
// -------------------------------------------------
/* FREAK interface implementation */
FREAK::FREAK( bool _orientationNormalized, bool _scaleNormalized
, float _patternScale, int _nOctaves, const std::vector<int>& _selectedPairs )
: orientationNormalized(_orientationNormalized), scaleNormalized(_scaleNormalized),
patternScale(_patternScale), nOctaves(_nOctaves), extAll(false), nOctaves0(0), selectedPairs0(_selectedPairs)
{
}
FREAK::~FREAK()
{
}
int FREAK::descriptorSize() const
{
return FREAK_NB_PAIRS / 8; // descriptor length in bytes
}
int FREAK::descriptorType() const
{
return CV_8U;
}
int FREAK::defaultNorm() const
{
return NORM_HAMMING;
}
} // END NAMESPACE CV

View File

@ -1,19 +0,0 @@
// Code generated with '$ scripts/generate_code.py src/test_pairs.txt 16'
#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
#undef SMOOTHED

View File

@ -1,35 +0,0 @@
// Code generated with '$ scripts/generate_code.py src/test_pairs.txt 32'
#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
desc[16] = (uchar)(((SMOOTHED(-9, 1) < SMOOTHED(-18, 0)) << 7) + ((SMOOTHED(4, 0) < SMOOTHED(1, 12)) << 6) + ((SMOOTHED(0, 9) < SMOOTHED(-14, -10)) << 5) + ((SMOOTHED(-13, -9) < SMOOTHED(-2, 6)) << 4) + ((SMOOTHED(1, 5) < SMOOTHED(10, 10)) << 3) + ((SMOOTHED(-3, -6) < SMOOTHED(-16, -5)) << 2) + ((SMOOTHED(11, 6) < SMOOTHED(-5, 0)) << 1) + ((SMOOTHED(-23, 10) < SMOOTHED(1, 2)) << 0));
desc[17] = (uchar)(((SMOOTHED(13, -5) < SMOOTHED(-3, 9)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-13, -5)) << 6) + ((SMOOTHED(10, 13) < SMOOTHED(-11, 8)) << 5) + ((SMOOTHED(19, 20) < SMOOTHED(-9, 2)) << 4) + ((SMOOTHED(4, -8) < SMOOTHED(0, -9)) << 3) + ((SMOOTHED(-14, 10) < SMOOTHED(15, 19)) << 2) + ((SMOOTHED(-14, -12) < SMOOTHED(-10, -3)) << 1) + ((SMOOTHED(-23, -3) < SMOOTHED(17, -2)) << 0));
desc[18] = (uchar)(((SMOOTHED(-3, -11) < SMOOTHED(6, -14)) << 7) + ((SMOOTHED(19, -2) < SMOOTHED(-4, 2)) << 6) + ((SMOOTHED(-5, 5) < SMOOTHED(3, -13)) << 5) + ((SMOOTHED(2, -2) < SMOOTHED(-5, 4)) << 4) + ((SMOOTHED(17, 4) < SMOOTHED(17, -11)) << 3) + ((SMOOTHED(-7, -2) < SMOOTHED(1, 23)) << 2) + ((SMOOTHED(8, 13) < SMOOTHED(1, -16)) << 1) + ((SMOOTHED(-13, -5) < SMOOTHED(1, -17)) << 0));
desc[19] = (uchar)(((SMOOTHED(4, 6) < SMOOTHED(-8, -3)) << 7) + ((SMOOTHED(-5, -9) < SMOOTHED(-2, -10)) << 6) + ((SMOOTHED(-9, 0) < SMOOTHED(-7, -2)) << 5) + ((SMOOTHED(5, 0) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-4, -16) < SMOOTHED(6, 3)) << 3) + ((SMOOTHED(2, -15) < SMOOTHED(-2, 12)) << 2) + ((SMOOTHED(4, -1) < SMOOTHED(6, 2)) << 1) + ((SMOOTHED(1, 1) < SMOOTHED(-2, -8)) << 0));
desc[20] = (uchar)(((SMOOTHED(-2, 12) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, 8) < SMOOTHED(-9, 9)) << 6) + ((SMOOTHED(2, -10) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-4, 10) < SMOOTHED(-9, 4)) << 4) + ((SMOOTHED(6, 12) < SMOOTHED(2, 5)) << 3) + ((SMOOTHED(-3, -8) < SMOOTHED(0, 5)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-7, 2)) << 1) + ((SMOOTHED(-1, -10) < SMOOTHED(7, -18)) << 0));
desc[21] = (uchar)(((SMOOTHED(-1, 8) < SMOOTHED(-9, -10)) << 7) + ((SMOOTHED(-23, -1) < SMOOTHED(6, 2)) << 6) + ((SMOOTHED(-5, -3) < SMOOTHED(3, 2)) << 5) + ((SMOOTHED(0, 11) < SMOOTHED(-4, -7)) << 4) + ((SMOOTHED(15, 2) < SMOOTHED(-10, -3)) << 3) + ((SMOOTHED(-20, -8) < SMOOTHED(-13, 3)) << 2) + ((SMOOTHED(-19, -12) < SMOOTHED(5, -11)) << 1) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 0));
desc[22] = (uchar)(((SMOOTHED(7, 4) < SMOOTHED(-12, 0)) << 7) + ((SMOOTHED(5, -1) < SMOOTHED(-14, -6)) << 6) + ((SMOOTHED(-4, 11) < SMOOTHED(0, -4)) << 5) + ((SMOOTHED(3, 10) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(13, 21) < SMOOTHED(-11, 6)) << 3) + ((SMOOTHED(-12, 24) < SMOOTHED(-7, -4)) << 2) + ((SMOOTHED(4, 16) < SMOOTHED(3, -14)) << 1) + ((SMOOTHED(-3, 5) < SMOOTHED(-7, -12)) << 0));
desc[23] = (uchar)(((SMOOTHED(0, -4) < SMOOTHED(7, -5)) << 7) + ((SMOOTHED(-17, -9) < SMOOTHED(13, -7)) << 6) + ((SMOOTHED(22, -6) < SMOOTHED(-11, 5)) << 5) + ((SMOOTHED(2, -8) < SMOOTHED(23, -11)) << 4) + ((SMOOTHED(7, -10) < SMOOTHED(-1, 14)) << 3) + ((SMOOTHED(-3, -10) < SMOOTHED(8, 3)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-6, 0)) << 1) + ((SMOOTHED(-7, -21) < SMOOTHED(6, -14)) << 0));
desc[24] = (uchar)(((SMOOTHED(18, 19) < SMOOTHED(-4, -6)) << 7) + ((SMOOTHED(10, 7) < SMOOTHED(-1, -4)) << 6) + ((SMOOTHED(-1, 21) < SMOOTHED(1, -5)) << 5) + ((SMOOTHED(-10, 6) < SMOOTHED(-11, -2)) << 4) + ((SMOOTHED(18, -3) < SMOOTHED(-1, 7)) << 3) + ((SMOOTHED(-3, -9) < SMOOTHED(-5, 10)) << 2) + ((SMOOTHED(-13, 14) < SMOOTHED(17, -3)) << 1) + ((SMOOTHED(11, -19) < SMOOTHED(-1, -18)) << 0));
desc[25] = (uchar)(((SMOOTHED(8, -2) < SMOOTHED(-18, -23)) << 7) + ((SMOOTHED(0, -5) < SMOOTHED(-2, -9)) << 6) + ((SMOOTHED(-4, -11) < SMOOTHED(2, -8)) << 5) + ((SMOOTHED(14, 6) < SMOOTHED(-3, -6)) << 4) + ((SMOOTHED(-3, 0) < SMOOTHED(-15, 0)) << 3) + ((SMOOTHED(-9, 4) < SMOOTHED(-15, -9)) << 2) + ((SMOOTHED(-1, 11) < SMOOTHED(3, 11)) << 1) + ((SMOOTHED(-10, -16) < SMOOTHED(-7, 7)) << 0));
desc[26] = (uchar)(((SMOOTHED(-2, -10) < SMOOTHED(-10, -2)) << 7) + ((SMOOTHED(-5, -3) < SMOOTHED(5, -23)) << 6) + ((SMOOTHED(13, -8) < SMOOTHED(-15, -11)) << 5) + ((SMOOTHED(-15, 11) < SMOOTHED(6, -6)) << 4) + ((SMOOTHED(-16, -3) < SMOOTHED(-2, 2)) << 3) + ((SMOOTHED(6, 12) < SMOOTHED(-16, 24)) << 2) + ((SMOOTHED(-10, 0) < SMOOTHED(8, 11)) << 1) + ((SMOOTHED(-7, 7) < SMOOTHED(-19, -7)) << 0));
desc[27] = (uchar)(((SMOOTHED(5, 16) < SMOOTHED(9, -3)) << 7) + ((SMOOTHED(9, 7) < SMOOTHED(-7, -16)) << 6) + ((SMOOTHED(3, 2) < SMOOTHED(-10, 9)) << 5) + ((SMOOTHED(21, 1) < SMOOTHED(8, 7)) << 4) + ((SMOOTHED(7, 0) < SMOOTHED(1, 17)) << 3) + ((SMOOTHED(-8, 12) < SMOOTHED(9, 6)) << 2) + ((SMOOTHED(11, -7) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(19, 0) < SMOOTHED(9, 3)) << 0));
desc[28] = (uchar)(((SMOOTHED(1, -7) < SMOOTHED(-5, -11)) << 7) + ((SMOOTHED(0, 8) < SMOOTHED(-2, 14)) << 6) + ((SMOOTHED(12, -2) < SMOOTHED(-15, -6)) << 5) + ((SMOOTHED(4, 12) < SMOOTHED(0, -21)) << 4) + ((SMOOTHED(17, -4) < SMOOTHED(-6, -7)) << 3) + ((SMOOTHED(-10, -9) < SMOOTHED(-14, -7)) << 2) + ((SMOOTHED(-15, -10) < SMOOTHED(-15, -14)) << 1) + ((SMOOTHED(-7, -5) < SMOOTHED(5, -12)) << 0));
desc[29] = (uchar)(((SMOOTHED(-4, 0) < SMOOTHED(15, -4)) << 7) + ((SMOOTHED(5, 2) < SMOOTHED(-6, -23)) << 6) + ((SMOOTHED(-4, -21) < SMOOTHED(-6, 4)) << 5) + ((SMOOTHED(-10, 5) < SMOOTHED(-15, 6)) << 4) + ((SMOOTHED(4, -3) < SMOOTHED(-1, 5)) << 3) + ((SMOOTHED(-4, 19) < SMOOTHED(-23, -4)) << 2) + ((SMOOTHED(-4, 17) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(1, 12) < SMOOTHED(4, -14)) << 0));
desc[30] = (uchar)(((SMOOTHED(-11, -6) < SMOOTHED(-20, 10)) << 7) + ((SMOOTHED(4, 5) < SMOOTHED(3, 20)) << 6) + ((SMOOTHED(-8, -20) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-19, 9) < SMOOTHED(9, -3)) << 4) + ((SMOOTHED(18, 15) < SMOOTHED(11, -4)) << 3) + ((SMOOTHED(12, 16) < SMOOTHED(8, 7)) << 2) + ((SMOOTHED(-14, -8) < SMOOTHED(-3, 9)) << 1) + ((SMOOTHED(-6, 0) < SMOOTHED(2, -4)) << 0));
desc[31] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-1, 2)) << 7) + ((SMOOTHED(8, -7) < SMOOTHED(-6, 18)) << 6) + ((SMOOTHED(9, 12) < SMOOTHED(-7, -23)) << 5) + ((SMOOTHED(8, -6) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-9, 6) < SMOOTHED(-12, -7)) << 3) + ((SMOOTHED(-1, -2) < SMOOTHED(-7, 2)) << 2) + ((SMOOTHED(9, 9) < SMOOTHED(7, 15)) << 1) + ((SMOOTHED(6, 2) < SMOOTHED(-6, 6)) << 0));
#undef SMOOTHED

View File

@ -1,67 +0,0 @@
// Code generated with '$ scripts/generate_code.py src/test_pairs.txt 64'
#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
desc[16] = (uchar)(((SMOOTHED(-9, 1) < SMOOTHED(-18, 0)) << 7) + ((SMOOTHED(4, 0) < SMOOTHED(1, 12)) << 6) + ((SMOOTHED(0, 9) < SMOOTHED(-14, -10)) << 5) + ((SMOOTHED(-13, -9) < SMOOTHED(-2, 6)) << 4) + ((SMOOTHED(1, 5) < SMOOTHED(10, 10)) << 3) + ((SMOOTHED(-3, -6) < SMOOTHED(-16, -5)) << 2) + ((SMOOTHED(11, 6) < SMOOTHED(-5, 0)) << 1) + ((SMOOTHED(-23, 10) < SMOOTHED(1, 2)) << 0));
desc[17] = (uchar)(((SMOOTHED(13, -5) < SMOOTHED(-3, 9)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-13, -5)) << 6) + ((SMOOTHED(10, 13) < SMOOTHED(-11, 8)) << 5) + ((SMOOTHED(19, 20) < SMOOTHED(-9, 2)) << 4) + ((SMOOTHED(4, -8) < SMOOTHED(0, -9)) << 3) + ((SMOOTHED(-14, 10) < SMOOTHED(15, 19)) << 2) + ((SMOOTHED(-14, -12) < SMOOTHED(-10, -3)) << 1) + ((SMOOTHED(-23, -3) < SMOOTHED(17, -2)) << 0));
desc[18] = (uchar)(((SMOOTHED(-3, -11) < SMOOTHED(6, -14)) << 7) + ((SMOOTHED(19, -2) < SMOOTHED(-4, 2)) << 6) + ((SMOOTHED(-5, 5) < SMOOTHED(3, -13)) << 5) + ((SMOOTHED(2, -2) < SMOOTHED(-5, 4)) << 4) + ((SMOOTHED(17, 4) < SMOOTHED(17, -11)) << 3) + ((SMOOTHED(-7, -2) < SMOOTHED(1, 23)) << 2) + ((SMOOTHED(8, 13) < SMOOTHED(1, -16)) << 1) + ((SMOOTHED(-13, -5) < SMOOTHED(1, -17)) << 0));
desc[19] = (uchar)(((SMOOTHED(4, 6) < SMOOTHED(-8, -3)) << 7) + ((SMOOTHED(-5, -9) < SMOOTHED(-2, -10)) << 6) + ((SMOOTHED(-9, 0) < SMOOTHED(-7, -2)) << 5) + ((SMOOTHED(5, 0) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-4, -16) < SMOOTHED(6, 3)) << 3) + ((SMOOTHED(2, -15) < SMOOTHED(-2, 12)) << 2) + ((SMOOTHED(4, -1) < SMOOTHED(6, 2)) << 1) + ((SMOOTHED(1, 1) < SMOOTHED(-2, -8)) << 0));
desc[20] = (uchar)(((SMOOTHED(-2, 12) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, 8) < SMOOTHED(-9, 9)) << 6) + ((SMOOTHED(2, -10) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-4, 10) < SMOOTHED(-9, 4)) << 4) + ((SMOOTHED(6, 12) < SMOOTHED(2, 5)) << 3) + ((SMOOTHED(-3, -8) < SMOOTHED(0, 5)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-7, 2)) << 1) + ((SMOOTHED(-1, -10) < SMOOTHED(7, -18)) << 0));
desc[21] = (uchar)(((SMOOTHED(-1, 8) < SMOOTHED(-9, -10)) << 7) + ((SMOOTHED(-23, -1) < SMOOTHED(6, 2)) << 6) + ((SMOOTHED(-5, -3) < SMOOTHED(3, 2)) << 5) + ((SMOOTHED(0, 11) < SMOOTHED(-4, -7)) << 4) + ((SMOOTHED(15, 2) < SMOOTHED(-10, -3)) << 3) + ((SMOOTHED(-20, -8) < SMOOTHED(-13, 3)) << 2) + ((SMOOTHED(-19, -12) < SMOOTHED(5, -11)) << 1) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 0));
desc[22] = (uchar)(((SMOOTHED(7, 4) < SMOOTHED(-12, 0)) << 7) + ((SMOOTHED(5, -1) < SMOOTHED(-14, -6)) << 6) + ((SMOOTHED(-4, 11) < SMOOTHED(0, -4)) << 5) + ((SMOOTHED(3, 10) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(13, 21) < SMOOTHED(-11, 6)) << 3) + ((SMOOTHED(-12, 24) < SMOOTHED(-7, -4)) << 2) + ((SMOOTHED(4, 16) < SMOOTHED(3, -14)) << 1) + ((SMOOTHED(-3, 5) < SMOOTHED(-7, -12)) << 0));
desc[23] = (uchar)(((SMOOTHED(0, -4) < SMOOTHED(7, -5)) << 7) + ((SMOOTHED(-17, -9) < SMOOTHED(13, -7)) << 6) + ((SMOOTHED(22, -6) < SMOOTHED(-11, 5)) << 5) + ((SMOOTHED(2, -8) < SMOOTHED(23, -11)) << 4) + ((SMOOTHED(7, -10) < SMOOTHED(-1, 14)) << 3) + ((SMOOTHED(-3, -10) < SMOOTHED(8, 3)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-6, 0)) << 1) + ((SMOOTHED(-7, -21) < SMOOTHED(6, -14)) << 0));
desc[24] = (uchar)(((SMOOTHED(18, 19) < SMOOTHED(-4, -6)) << 7) + ((SMOOTHED(10, 7) < SMOOTHED(-1, -4)) << 6) + ((SMOOTHED(-1, 21) < SMOOTHED(1, -5)) << 5) + ((SMOOTHED(-10, 6) < SMOOTHED(-11, -2)) << 4) + ((SMOOTHED(18, -3) < SMOOTHED(-1, 7)) << 3) + ((SMOOTHED(-3, -9) < SMOOTHED(-5, 10)) << 2) + ((SMOOTHED(-13, 14) < SMOOTHED(17, -3)) << 1) + ((SMOOTHED(11, -19) < SMOOTHED(-1, -18)) << 0));
desc[25] = (uchar)(((SMOOTHED(8, -2) < SMOOTHED(-18, -23)) << 7) + ((SMOOTHED(0, -5) < SMOOTHED(-2, -9)) << 6) + ((SMOOTHED(-4, -11) < SMOOTHED(2, -8)) << 5) + ((SMOOTHED(14, 6) < SMOOTHED(-3, -6)) << 4) + ((SMOOTHED(-3, 0) < SMOOTHED(-15, 0)) << 3) + ((SMOOTHED(-9, 4) < SMOOTHED(-15, -9)) << 2) + ((SMOOTHED(-1, 11) < SMOOTHED(3, 11)) << 1) + ((SMOOTHED(-10, -16) < SMOOTHED(-7, 7)) << 0));
desc[26] = (uchar)(((SMOOTHED(-2, -10) < SMOOTHED(-10, -2)) << 7) + ((SMOOTHED(-5, -3) < SMOOTHED(5, -23)) << 6) + ((SMOOTHED(13, -8) < SMOOTHED(-15, -11)) << 5) + ((SMOOTHED(-15, 11) < SMOOTHED(6, -6)) << 4) + ((SMOOTHED(-16, -3) < SMOOTHED(-2, 2)) << 3) + ((SMOOTHED(6, 12) < SMOOTHED(-16, 24)) << 2) + ((SMOOTHED(-10, 0) < SMOOTHED(8, 11)) << 1) + ((SMOOTHED(-7, 7) < SMOOTHED(-19, -7)) << 0));
desc[27] = (uchar)(((SMOOTHED(5, 16) < SMOOTHED(9, -3)) << 7) + ((SMOOTHED(9, 7) < SMOOTHED(-7, -16)) << 6) + ((SMOOTHED(3, 2) < SMOOTHED(-10, 9)) << 5) + ((SMOOTHED(21, 1) < SMOOTHED(8, 7)) << 4) + ((SMOOTHED(7, 0) < SMOOTHED(1, 17)) << 3) + ((SMOOTHED(-8, 12) < SMOOTHED(9, 6)) << 2) + ((SMOOTHED(11, -7) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(19, 0) < SMOOTHED(9, 3)) << 0));
desc[28] = (uchar)(((SMOOTHED(1, -7) < SMOOTHED(-5, -11)) << 7) + ((SMOOTHED(0, 8) < SMOOTHED(-2, 14)) << 6) + ((SMOOTHED(12, -2) < SMOOTHED(-15, -6)) << 5) + ((SMOOTHED(4, 12) < SMOOTHED(0, -21)) << 4) + ((SMOOTHED(17, -4) < SMOOTHED(-6, -7)) << 3) + ((SMOOTHED(-10, -9) < SMOOTHED(-14, -7)) << 2) + ((SMOOTHED(-15, -10) < SMOOTHED(-15, -14)) << 1) + ((SMOOTHED(-7, -5) < SMOOTHED(5, -12)) << 0));
desc[29] = (uchar)(((SMOOTHED(-4, 0) < SMOOTHED(15, -4)) << 7) + ((SMOOTHED(5, 2) < SMOOTHED(-6, -23)) << 6) + ((SMOOTHED(-4, -21) < SMOOTHED(-6, 4)) << 5) + ((SMOOTHED(-10, 5) < SMOOTHED(-15, 6)) << 4) + ((SMOOTHED(4, -3) < SMOOTHED(-1, 5)) << 3) + ((SMOOTHED(-4, 19) < SMOOTHED(-23, -4)) << 2) + ((SMOOTHED(-4, 17) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(1, 12) < SMOOTHED(4, -14)) << 0));
desc[30] = (uchar)(((SMOOTHED(-11, -6) < SMOOTHED(-20, 10)) << 7) + ((SMOOTHED(4, 5) < SMOOTHED(3, 20)) << 6) + ((SMOOTHED(-8, -20) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-19, 9) < SMOOTHED(9, -3)) << 4) + ((SMOOTHED(18, 15) < SMOOTHED(11, -4)) << 3) + ((SMOOTHED(12, 16) < SMOOTHED(8, 7)) << 2) + ((SMOOTHED(-14, -8) < SMOOTHED(-3, 9)) << 1) + ((SMOOTHED(-6, 0) < SMOOTHED(2, -4)) << 0));
desc[31] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-1, 2)) << 7) + ((SMOOTHED(8, -7) < SMOOTHED(-6, 18)) << 6) + ((SMOOTHED(9, 12) < SMOOTHED(-7, -23)) << 5) + ((SMOOTHED(8, -6) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-9, 6) < SMOOTHED(-12, -7)) << 3) + ((SMOOTHED(-1, -2) < SMOOTHED(-7, 2)) << 2) + ((SMOOTHED(9, 9) < SMOOTHED(7, 15)) << 1) + ((SMOOTHED(6, 2) < SMOOTHED(-6, 6)) << 0));
desc[32] = (uchar)(((SMOOTHED(16, 12) < SMOOTHED(0, 19)) << 7) + ((SMOOTHED(4, 3) < SMOOTHED(6, 0)) << 6) + ((SMOOTHED(-2, -1) < SMOOTHED(2, 17)) << 5) + ((SMOOTHED(8, 1) < SMOOTHED(3, 1)) << 4) + ((SMOOTHED(-12, -1) < SMOOTHED(-11, 0)) << 3) + ((SMOOTHED(-11, 2) < SMOOTHED(7, 9)) << 2) + ((SMOOTHED(-1, 3) < SMOOTHED(-19, 4)) << 1) + ((SMOOTHED(-1, -11) < SMOOTHED(-1, 3)) << 0));
desc[33] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-10, -4)) << 7) + ((SMOOTHED(-2, 3) < SMOOTHED(6, 11)) << 6) + ((SMOOTHED(3, 7) < SMOOTHED(-9, -8)) << 5) + ((SMOOTHED(24, -14) < SMOOTHED(-2, -10)) << 4) + ((SMOOTHED(-3, -3) < SMOOTHED(-18, -6)) << 3) + ((SMOOTHED(-13, -10) < SMOOTHED(-7, -1)) << 2) + ((SMOOTHED(2, -7) < SMOOTHED(9, -6)) << 1) + ((SMOOTHED(2, -4) < SMOOTHED(6, -13)) << 0));
desc[34] = (uchar)(((SMOOTHED(4, -4) < SMOOTHED(-2, 3)) << 7) + ((SMOOTHED(-4, 2) < SMOOTHED(9, 13)) << 6) + ((SMOOTHED(-11, 5) < SMOOTHED(-6, -11)) << 5) + ((SMOOTHED(4, -2) < SMOOTHED(11, -9)) << 4) + ((SMOOTHED(-19, 0) < SMOOTHED(-23, -5)) << 3) + ((SMOOTHED(-5, -7) < SMOOTHED(-3, -6)) << 2) + ((SMOOTHED(-6, -4) < SMOOTHED(12, 14)) << 1) + ((SMOOTHED(12, -11) < SMOOTHED(-8, -16)) << 0));
desc[35] = (uchar)(((SMOOTHED(-21, 15) < SMOOTHED(-12, 6)) << 7) + ((SMOOTHED(-2, -1) < SMOOTHED(-8, 16)) << 6) + ((SMOOTHED(6, -1) < SMOOTHED(-8, -2)) << 5) + ((SMOOTHED(1, -1) < SMOOTHED(-9, 8)) << 4) + ((SMOOTHED(3, -4) < SMOOTHED(-2, -2)) << 3) + ((SMOOTHED(-7, 0) < SMOOTHED(4, -8)) << 2) + ((SMOOTHED(11, -11) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(2, 3) < SMOOTHED(11, 7)) << 0));
desc[36] = (uchar)(((SMOOTHED(-7, -4) < SMOOTHED(-9, -6)) << 7) + ((SMOOTHED(3, -7) < SMOOTHED(-5, 0)) << 6) + ((SMOOTHED(3, -7) < SMOOTHED(-10, -5)) << 5) + ((SMOOTHED(-3, -1) < SMOOTHED(8, -10)) << 4) + ((SMOOTHED(0, 8) < SMOOTHED(5, 1)) << 3) + ((SMOOTHED(9, 0) < SMOOTHED(1, 16)) << 2) + ((SMOOTHED(8, 4) < SMOOTHED(-11, -3)) << 1) + ((SMOOTHED(-15, 9) < SMOOTHED(8, 17)) << 0));
desc[37] = (uchar)(((SMOOTHED(0, 2) < SMOOTHED(-9, 17)) << 7) + ((SMOOTHED(-6, -11) < SMOOTHED(-10, -3)) << 6) + ((SMOOTHED(1, 1) < SMOOTHED(15, -8)) << 5) + ((SMOOTHED(-12, -13) < SMOOTHED(-2, 4)) << 4) + ((SMOOTHED(-6, 4) < SMOOTHED(-6, -10)) << 3) + ((SMOOTHED(5, -7) < SMOOTHED(7, -5)) << 2) + ((SMOOTHED(10, 6) < SMOOTHED(8, 9)) << 1) + ((SMOOTHED(-5, 7) < SMOOTHED(-18, -3)) << 0));
desc[38] = (uchar)(((SMOOTHED(-6, 3) < SMOOTHED(5, 4)) << 7) + ((SMOOTHED(-10, -13) < SMOOTHED(-5, -3)) << 6) + ((SMOOTHED(-11, 2) < SMOOTHED(-16, 0)) << 5) + ((SMOOTHED(7, -21) < SMOOTHED(-5, -13)) << 4) + ((SMOOTHED(-14, -14) < SMOOTHED(-4, -4)) << 3) + ((SMOOTHED(4, 9) < SMOOTHED(7, -3)) << 2) + ((SMOOTHED(4, 11) < SMOOTHED(10, -4)) << 1) + ((SMOOTHED(6, 17) < SMOOTHED(9, 17)) << 0));
desc[39] = (uchar)(((SMOOTHED(-10, 8) < SMOOTHED(0, -11)) << 7) + ((SMOOTHED(-6, -16) < SMOOTHED(-6, 8)) << 6) + ((SMOOTHED(-13, 5) < SMOOTHED(10, -5)) << 5) + ((SMOOTHED(3, 2) < SMOOTHED(12, 16)) << 4) + ((SMOOTHED(13, -8) < SMOOTHED(0, -6)) << 3) + ((SMOOTHED(10, 0) < SMOOTHED(4, -11)) << 2) + ((SMOOTHED(8, 5) < SMOOTHED(10, -2)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(-13, 3)) << 0));
desc[40] = (uchar)(((SMOOTHED(2, 4) < SMOOTHED(-7, -3)) << 7) + ((SMOOTHED(-14, -2) < SMOOTHED(-11, 16)) << 6) + ((SMOOTHED(11, -6) < SMOOTHED(7, 6)) << 5) + ((SMOOTHED(-3, 15) < SMOOTHED(8, -10)) << 4) + ((SMOOTHED(-3, 8) < SMOOTHED(12, -12)) << 3) + ((SMOOTHED(-13, 6) < SMOOTHED(-14, 7)) << 2) + ((SMOOTHED(-11, -5) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(7, -6) < SMOOTHED(6, 3)) << 0));
desc[41] = (uchar)(((SMOOTHED(-4, 10) < SMOOTHED(5, 1)) << 7) + ((SMOOTHED(9, 16) < SMOOTHED(10, 13)) << 6) + ((SMOOTHED(-17, 10) < SMOOTHED(2, 8)) << 5) + ((SMOOTHED(-5, 1) < SMOOTHED(4, -4)) << 4) + ((SMOOTHED(-14, 8) < SMOOTHED(-5, 2)) << 3) + ((SMOOTHED(4, -9) < SMOOTHED(-6, -3)) << 2) + ((SMOOTHED(3, -7) < SMOOTHED(-10, 0)) << 1) + ((SMOOTHED(-2, -8) < SMOOTHED(-10, 4)) << 0));
desc[42] = (uchar)(((SMOOTHED(-8, 5) < SMOOTHED(-9, 24)) << 7) + ((SMOOTHED(2, -8) < SMOOTHED(8, -9)) << 6) + ((SMOOTHED(-4, 17) < SMOOTHED(-5, 2)) << 5) + ((SMOOTHED(14, 0) < SMOOTHED(-9, 9)) << 4) + ((SMOOTHED(11, 15) < SMOOTHED(-6, 5)) << 3) + ((SMOOTHED(-8, 1) < SMOOTHED(-3, 4)) << 2) + ((SMOOTHED(9, -21) < SMOOTHED(10, 2)) << 1) + ((SMOOTHED(2, -1) < SMOOTHED(4, 11)) << 0));
desc[43] = (uchar)(((SMOOTHED(24, 3) < SMOOTHED(2, -2)) << 7) + ((SMOOTHED(-8, 17) < SMOOTHED(-14, -10)) << 6) + ((SMOOTHED(6, 5) < SMOOTHED(-13, 7)) << 5) + ((SMOOTHED(11, 10) < SMOOTHED(0, -1)) << 4) + ((SMOOTHED(4, 6) < SMOOTHED(-10, 6)) << 3) + ((SMOOTHED(-12, -2) < SMOOTHED(5, 6)) << 2) + ((SMOOTHED(3, -1) < SMOOTHED(8, -15)) << 1) + ((SMOOTHED(1, -4) < SMOOTHED(-7, 11)) << 0));
desc[44] = (uchar)(((SMOOTHED(1, 11) < SMOOTHED(5, 0)) << 7) + ((SMOOTHED(6, -12) < SMOOTHED(10, 1)) << 6) + ((SMOOTHED(-3, -2) < SMOOTHED(-1, 4)) << 5) + ((SMOOTHED(-2, -11) < SMOOTHED(-1, 12)) << 4) + ((SMOOTHED(7, -8) < SMOOTHED(-20, -18)) << 3) + ((SMOOTHED(2, 0) < SMOOTHED(-9, 2)) << 2) + ((SMOOTHED(-13, -1) < SMOOTHED(-16, 2)) << 1) + ((SMOOTHED(3, -1) < SMOOTHED(-5, -17)) << 0));
desc[45] = (uchar)(((SMOOTHED(15, 8) < SMOOTHED(3, -14)) << 7) + ((SMOOTHED(-13, -12) < SMOOTHED(6, 15)) << 6) + ((SMOOTHED(2, -8) < SMOOTHED(2, 6)) << 5) + ((SMOOTHED(6, 22) < SMOOTHED(-3, -23)) << 4) + ((SMOOTHED(-2, -7) < SMOOTHED(-6, 0)) << 3) + ((SMOOTHED(13, -10) < SMOOTHED(-6, 6)) << 2) + ((SMOOTHED(6, 7) < SMOOTHED(-10, 12)) << 1) + ((SMOOTHED(-6, 7) < SMOOTHED(-2, 11)) << 0));
desc[46] = (uchar)(((SMOOTHED(0, -22) < SMOOTHED(-2, -17)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-11, -14)) << 6) + ((SMOOTHED(-2, -8) < SMOOTHED(7, 12)) << 5) + ((SMOOTHED(12, -5) < SMOOTHED(7, -13)) << 4) + ((SMOOTHED(2, -2) < SMOOTHED(-7, 6)) << 3) + ((SMOOTHED(0, 8) < SMOOTHED(-3, 23)) << 2) + ((SMOOTHED(6, 12) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(-21, -10) < SMOOTHED(10, 8)) << 0));
desc[47] = (uchar)(((SMOOTHED(-3, 0) < SMOOTHED(7, 15)) << 7) + ((SMOOTHED(7, -6) < SMOOTHED(-5, -12)) << 6) + ((SMOOTHED(-21, -10) < SMOOTHED(12, -11)) << 5) + ((SMOOTHED(-5, -11) < SMOOTHED(8, -11)) << 4) + ((SMOOTHED(5, 0) < SMOOTHED(-11, -1)) << 3) + ((SMOOTHED(8, -9) < SMOOTHED(7, -1)) << 2) + ((SMOOTHED(11, -23) < SMOOTHED(21, -5)) << 1) + ((SMOOTHED(0, -5) < SMOOTHED(-8, 6)) << 0));
desc[48] = (uchar)(((SMOOTHED(-6, 8) < SMOOTHED(8, 12)) << 7) + ((SMOOTHED(-7, 5) < SMOOTHED(3, -2)) << 6) + ((SMOOTHED(-5, -20) < SMOOTHED(-12, 9)) << 5) + ((SMOOTHED(-6, 12) < SMOOTHED(-11, 3)) << 4) + ((SMOOTHED(4, 5) < SMOOTHED(13, 11)) << 3) + ((SMOOTHED(2, 12) < SMOOTHED(13, -12)) << 2) + ((SMOOTHED(-4, -13) < SMOOTHED(4, 7)) << 1) + ((SMOOTHED(0, 15) < SMOOTHED(-3, -16)) << 0));
desc[49] = (uchar)(((SMOOTHED(-3, 2) < SMOOTHED(-2, 14)) << 7) + ((SMOOTHED(4, -14) < SMOOTHED(16, -11)) << 6) + ((SMOOTHED(-13, 3) < SMOOTHED(23, 10)) << 5) + ((SMOOTHED(9, -19) < SMOOTHED(2, 5)) << 4) + ((SMOOTHED(5, 3) < SMOOTHED(14, -7)) << 3) + ((SMOOTHED(19, -13) < SMOOTHED(-11, 15)) << 2) + ((SMOOTHED(14, 0) < SMOOTHED(-2, -5)) << 1) + ((SMOOTHED(11, -4) < SMOOTHED(0, -6)) << 0));
desc[50] = (uchar)(((SMOOTHED(-2, 5) < SMOOTHED(-13, -8)) << 7) + ((SMOOTHED(-11, -15) < SMOOTHED(-7, -17)) << 6) + ((SMOOTHED(1, 3) < SMOOTHED(-10, -8)) << 5) + ((SMOOTHED(-13, -10) < SMOOTHED(7, -12)) << 4) + ((SMOOTHED(0, -13) < SMOOTHED(23, -6)) << 3) + ((SMOOTHED(2, -17) < SMOOTHED(-7, -3)) << 2) + ((SMOOTHED(1, 3) < SMOOTHED(4, -10)) << 1) + ((SMOOTHED(13, 4) < SMOOTHED(14, -6)) << 0));
desc[51] = (uchar)(((SMOOTHED(-19, -2) < SMOOTHED(-1, 5)) << 7) + ((SMOOTHED(9, -8) < SMOOTHED(10, -5)) << 6) + ((SMOOTHED(7, -1) < SMOOTHED(5, 7)) << 5) + ((SMOOTHED(9, -10) < SMOOTHED(19, 0)) << 4) + ((SMOOTHED(7, 5) < SMOOTHED(-4, -7)) << 3) + ((SMOOTHED(-11, 1) < SMOOTHED(-1, -11)) << 2) + ((SMOOTHED(2, -1) < SMOOTHED(-4, 11)) << 1) + ((SMOOTHED(-1, 7) < SMOOTHED(2, -2)) << 0));
desc[52] = (uchar)(((SMOOTHED(1, -20) < SMOOTHED(-9, -6)) << 7) + ((SMOOTHED(-4, -18) < SMOOTHED(8, -18)) << 6) + ((SMOOTHED(-16, -2) < SMOOTHED(7, -6)) << 5) + ((SMOOTHED(-3, -6) < SMOOTHED(-1, -4)) << 4) + ((SMOOTHED(0, -16) < SMOOTHED(24, -5)) << 3) + ((SMOOTHED(-4, -2) < SMOOTHED(-1, 9)) << 2) + ((SMOOTHED(-8, 2) < SMOOTHED(-6, 15)) << 1) + ((SMOOTHED(11, 4) < SMOOTHED(0, -3)) << 0));
desc[53] = (uchar)(((SMOOTHED(7, 6) < SMOOTHED(2, -10)) << 7) + ((SMOOTHED(-7, -9) < SMOOTHED(12, -6)) << 6) + ((SMOOTHED(24, 15) < SMOOTHED(-8, -1)) << 5) + ((SMOOTHED(15, -9) < SMOOTHED(-3, -15)) << 4) + ((SMOOTHED(17, -5) < SMOOTHED(11, -10)) << 3) + ((SMOOTHED(-2, 13) < SMOOTHED(-15, 4)) << 2) + ((SMOOTHED(-2, -1) < SMOOTHED(4, -23)) << 1) + ((SMOOTHED(-16, 3) < SMOOTHED(-7, -14)) << 0));
desc[54] = (uchar)(((SMOOTHED(-3, -5) < SMOOTHED(-10, -9)) << 7) + ((SMOOTHED(-5, 3) < SMOOTHED(-2, -1)) << 6) + ((SMOOTHED(-1, 4) < SMOOTHED(1, 8)) << 5) + ((SMOOTHED(12, 9) < SMOOTHED(9, -14)) << 4) + ((SMOOTHED(-9, 17) < SMOOTHED(-3, 0)) << 3) + ((SMOOTHED(5, 4) < SMOOTHED(13, -6)) << 2) + ((SMOOTHED(-1, -8) < SMOOTHED(19, 10)) << 1) + ((SMOOTHED(8, -5) < SMOOTHED(-15, 2)) << 0));
desc[55] = (uchar)(((SMOOTHED(-12, -9) < SMOOTHED(-4, -5)) << 7) + ((SMOOTHED(12, 0) < SMOOTHED(24, 4)) << 6) + ((SMOOTHED(8, -2) < SMOOTHED(14, 4)) << 5) + ((SMOOTHED(8, -4) < SMOOTHED(-7, 16)) << 4) + ((SMOOTHED(5, -1) < SMOOTHED(-8, -4)) << 3) + ((SMOOTHED(-2, 18) < SMOOTHED(-5, 17)) << 2) + ((SMOOTHED(8, -2) < SMOOTHED(-9, -2)) << 1) + ((SMOOTHED(3, -7) < SMOOTHED(1, -6)) << 0));
desc[56] = (uchar)(((SMOOTHED(-5, -22) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, -10) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(-3, -13) < SMOOTHED(3, 9)) << 5) + ((SMOOTHED(-4, -1) < SMOOTHED(-1, 0)) << 4) + ((SMOOTHED(-7, -21) < SMOOTHED(12, -19)) << 3) + ((SMOOTHED(-8, 8) < SMOOTHED(24, 8)) << 2) + ((SMOOTHED(12, -6) < SMOOTHED(-2, 3)) << 1) + ((SMOOTHED(-5, -11) < SMOOTHED(-22, -4)) << 0));
desc[57] = (uchar)(((SMOOTHED(-3, 5) < SMOOTHED(-4, 4)) << 7) + ((SMOOTHED(-16, 24) < SMOOTHED(7, -9)) << 6) + ((SMOOTHED(-10, 23) < SMOOTHED(-9, 18)) << 5) + ((SMOOTHED(1, 12) < SMOOTHED(17, 21)) << 4) + ((SMOOTHED(24, -6) < SMOOTHED(-3, -11)) << 3) + ((SMOOTHED(-7, 17) < SMOOTHED(1, -6)) << 2) + ((SMOOTHED(4, 4) < SMOOTHED(2, -7)) << 1) + ((SMOOTHED(14, 6) < SMOOTHED(-12, 3)) << 0));
desc[58] = (uchar)(((SMOOTHED(-6, 0) < SMOOTHED(-16, 13)) << 7) + ((SMOOTHED(-10, 5) < SMOOTHED(7, 12)) << 6) + ((SMOOTHED(5, 2) < SMOOTHED(6, -3)) << 5) + ((SMOOTHED(7, 0) < SMOOTHED(-23, 1)) << 4) + ((SMOOTHED(15, -5) < SMOOTHED(1, 14)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(6, 6)) << 2) + ((SMOOTHED(6, -9) < SMOOTHED(-9, 12)) << 1) + ((SMOOTHED(4, -2) < SMOOTHED(-4, 7)) << 0));
desc[59] = (uchar)(((SMOOTHED(-4, -5) < SMOOTHED(4, 4)) << 7) + ((SMOOTHED(-13, 0) < SMOOTHED(6, -10)) << 6) + ((SMOOTHED(2, -12) < SMOOTHED(-6, -3)) << 5) + ((SMOOTHED(16, 0) < SMOOTHED(-3, 3)) << 4) + ((SMOOTHED(5, -14) < SMOOTHED(6, 11)) << 3) + ((SMOOTHED(5, 11) < SMOOTHED(0, -13)) << 2) + ((SMOOTHED(7, 5) < SMOOTHED(-1, -5)) << 1) + ((SMOOTHED(12, 4) < SMOOTHED(6, 10)) << 0));
desc[60] = (uchar)(((SMOOTHED(-10, 4) < SMOOTHED(-1, -11)) << 7) + ((SMOOTHED(4, 10) < SMOOTHED(-14, 5)) << 6) + ((SMOOTHED(11, -14) < SMOOTHED(-13, 0)) << 5) + ((SMOOTHED(2, 8) < SMOOTHED(12, 24)) << 4) + ((SMOOTHED(-1, 3) < SMOOTHED(-1, 2)) << 3) + ((SMOOTHED(9, -14) < SMOOTHED(-23, 3)) << 2) + ((SMOOTHED(-8, -6) < SMOOTHED(0, 9)) << 1) + ((SMOOTHED(-15, 14) < SMOOTHED(10, -10)) << 0));
desc[61] = (uchar)(((SMOOTHED(-10, -6) < SMOOTHED(-7, -5)) << 7) + ((SMOOTHED(11, 5) < SMOOTHED(-3, -15)) << 6) + ((SMOOTHED(1, 0) < SMOOTHED(1, 8)) << 5) + ((SMOOTHED(-11, -6) < SMOOTHED(-4, -18)) << 4) + ((SMOOTHED(9, 0) < SMOOTHED(22, -4)) << 3) + ((SMOOTHED(-5, -1) < SMOOTHED(-9, 4)) << 2) + ((SMOOTHED(-20, 2) < SMOOTHED(1, 6)) << 1) + ((SMOOTHED(1, 2) < SMOOTHED(-9, -12)) << 0));
desc[62] = (uchar)(((SMOOTHED(5, 15) < SMOOTHED(4, -6)) << 7) + ((SMOOTHED(19, 4) < SMOOTHED(4, 11)) << 6) + ((SMOOTHED(17, -4) < SMOOTHED(-8, -1)) << 5) + ((SMOOTHED(-8, -12) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(11, 9) < SMOOTHED(8, 1)) << 3) + ((SMOOTHED(9, 22) < SMOOTHED(-15, 15)) << 2) + ((SMOOTHED(-7, -7) < SMOOTHED(1, -23)) << 1) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 2)) << 0));
desc[63] = (uchar)(((SMOOTHED(3, -5) < SMOOTHED(11, -11)) << 7) + ((SMOOTHED(3, -18) < SMOOTHED(14, -5)) << 6) + ((SMOOTHED(-20, 7) < SMOOTHED(-10, -23)) << 5) + ((SMOOTHED(-2, -5) < SMOOTHED(6, 0)) << 4) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 3) + ((SMOOTHED(-6, -1) < SMOOTHED(14, -2)) << 2) + ((SMOOTHED(-12, -16) < SMOOTHED(15, 6)) << 1) + ((SMOOTHED(-12, -2) < SMOOTHED(3, -19)) << 0));
#undef SMOOTHED

View File

@ -50,26 +50,7 @@
namespace cv
{
Mat windowedMatchingMask( const std::vector<KeyPoint>& keypoints1, const std::vector<KeyPoint>& keypoints2,
float maxDeltaX, float maxDeltaY )
{
if( keypoints1.empty() || keypoints2.empty() )
return Mat();
int n1 = (int)keypoints1.size(), n2 = (int)keypoints2.size();
Mat mask( n1, n2, CV_8UC1 );
for( int i = 0; i < n1; i++ )
{
for( int j = 0; j < n2; j++ )
{
Point2f diff = keypoints2[j].pt - keypoints1[i].pt;
mask.at<uchar>(i, j) = std::abs(diff.x) < maxDeltaX && std::abs(diff.y) < maxDeltaY;
}
}
return mask;
}
//////////////////////////////////////////////////////////////////ocl functions for BFMatcher ///////////////////////////////////////////////////////////////
/////////////////////// ocl functions for BFMatcher ///////////////////////////
static void ensureSizeIsEnough(int rows, int cols, int type, UMat &m)
{
@ -1507,382 +1488,4 @@ void FlannBasedMatcher::radiusMatchImpl( InputArray _queryDescriptors, std::vect
convertToDMatches( mergedDescriptors, indices, dists, matches );
}
/****************************************************************************************\
* GenericDescriptorMatcher *
\****************************************************************************************/
/*
* KeyPointCollection
*/
GenericDescriptorMatcher::KeyPointCollection::KeyPointCollection() : pointCount(0)
{}
GenericDescriptorMatcher::KeyPointCollection::KeyPointCollection( const KeyPointCollection& collection )
{
pointCount = collection.pointCount;
std::transform( collection.images.begin(), collection.images.end(), images.begin(), clone_op );
keypoints.resize( collection.keypoints.size() );
for( size_t i = 0; i < keypoints.size(); i++ )
std::copy( collection.keypoints[i].begin(), collection.keypoints[i].end(), keypoints[i].begin() );
std::copy( collection.startIndices.begin(), collection.startIndices.end(), startIndices.begin() );
}
void GenericDescriptorMatcher::KeyPointCollection::add( const std::vector<Mat>& _images,
const std::vector<std::vector<KeyPoint> >& _points )
{
CV_Assert( !_images.empty() );
CV_Assert( _images.size() == _points.size() );
images.insert( images.end(), _images.begin(), _images.end() );
keypoints.insert( keypoints.end(), _points.begin(), _points.end() );
for( size_t i = 0; i < _points.size(); i++ )
pointCount += (int)_points[i].size();
size_t prevSize = startIndices.size(), addSize = _images.size();
startIndices.resize( prevSize + addSize );
if( prevSize == 0 )
startIndices[prevSize] = 0; //first
else
startIndices[prevSize] = (int)(startIndices[prevSize-1] + keypoints[prevSize-1].size());
for( size_t i = prevSize + 1; i < prevSize + addSize; i++ )
{
startIndices[i] = (int)(startIndices[i - 1] + keypoints[i - 1].size());
}
}
void GenericDescriptorMatcher::KeyPointCollection::clear()
{
pointCount = 0;
images.clear();
keypoints.clear();
startIndices.clear();
}
size_t GenericDescriptorMatcher::KeyPointCollection::keypointCount() const
{
return pointCount;
}
size_t GenericDescriptorMatcher::KeyPointCollection::imageCount() const
{
return images.size();
}
const std::vector<std::vector<KeyPoint> >& GenericDescriptorMatcher::KeyPointCollection::getKeypoints() const
{
return keypoints;
}
const std::vector<KeyPoint>& GenericDescriptorMatcher::KeyPointCollection::getKeypoints( int imgIdx ) const
{
CV_Assert( imgIdx < (int)imageCount() );
return keypoints[imgIdx];
}
const KeyPoint& GenericDescriptorMatcher::KeyPointCollection::getKeyPoint( int imgIdx, int localPointIdx ) const
{
CV_Assert( imgIdx < (int)images.size() );
CV_Assert( localPointIdx < (int)keypoints[imgIdx].size() );
return keypoints[imgIdx][localPointIdx];
}
const KeyPoint& GenericDescriptorMatcher::KeyPointCollection::getKeyPoint( int globalPointIdx ) const
{
int imgIdx, localPointIdx;
getLocalIdx( globalPointIdx, imgIdx, localPointIdx );
return keypoints[imgIdx][localPointIdx];
}
void GenericDescriptorMatcher::KeyPointCollection::getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const
{
imgIdx = -1;
CV_Assert( globalPointIdx < (int)keypointCount() );
for( size_t i = 1; i < startIndices.size(); i++ )
{
if( globalPointIdx < startIndices[i] )
{
imgIdx = (int)(i - 1);
break;
}
}
imgIdx = imgIdx == -1 ? (int)(startIndices.size() - 1) : imgIdx;
localPointIdx = globalPointIdx - startIndices[imgIdx];
}
const std::vector<Mat>& GenericDescriptorMatcher::KeyPointCollection::getImages() const
{
return images;
}
const Mat& GenericDescriptorMatcher::KeyPointCollection::getImage( int imgIdx ) const
{
CV_Assert( imgIdx < (int)imageCount() );
return images[imgIdx];
}
/*
* GenericDescriptorMatcher
*/
GenericDescriptorMatcher::GenericDescriptorMatcher()
{}
GenericDescriptorMatcher::~GenericDescriptorMatcher()
{}
void GenericDescriptorMatcher::add( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints )
{
std::vector<Mat> images;
_images.getMatVector(images);
CV_Assert( !images.empty() );
CV_Assert( images.size() == keypoints.size() );
for( size_t i = 0; i < images.size(); i++ )
{
CV_Assert( !images[i].empty() );
KeyPointsFilter::runByImageBorder( keypoints[i], images[i].size(), 0 );
KeyPointsFilter::runByKeypointSize( keypoints[i], std::numeric_limits<float>::epsilon() );
}
trainPointCollection.add( images, keypoints );
}
const std::vector<Mat>& GenericDescriptorMatcher::getTrainImages() const
{
return trainPointCollection.getImages();
}
const std::vector<std::vector<KeyPoint> >& GenericDescriptorMatcher::getTrainKeypoints() const
{
return trainPointCollection.getKeypoints();
}
void GenericDescriptorMatcher::clear()
{
trainPointCollection.clear();
}
void GenericDescriptorMatcher::train()
{}
void GenericDescriptorMatcher::classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints ) const
{
std::vector<DMatch> matches;
match( queryImage, queryKeypoints, trainImage, trainKeypoints, matches );
// remap keypoint indices to descriptors
for( size_t i = 0; i < matches.size(); i++ )
queryKeypoints[matches[i].queryIdx].class_id = trainKeypoints[matches[i].trainIdx].class_id;
}
void GenericDescriptorMatcher::classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints )
{
std::vector<DMatch> matches;
match( queryImage, queryKeypoints, matches );
// remap keypoint indices to descriptors
for( size_t i = 0; i < matches.size(); i++ )
queryKeypoints[matches[i].queryIdx].class_id = trainPointCollection.getKeyPoint( matches[i].trainIdx, matches[i].trainIdx ).class_id;
}
void GenericDescriptorMatcher::match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<DMatch>& matches, InputArray mask ) const
{
Mat trainImage = _trainImage.getMat();
Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
tempMatcher->match( queryImage, queryKeypoints, matches, std::vector<Mat>(1, mask.getMat()) );
vecTrainPoints[0].swap( trainKeypoints );
}
void GenericDescriptorMatcher::knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn, InputArray mask, bool compactResult ) const
{
Mat trainImage = _trainImage.getMat();
Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
tempMatcher->knnMatch( queryImage, queryKeypoints, matches, knn, std::vector<Mat>(1, mask.getMat()), compactResult );
vecTrainPoints[0].swap( trainKeypoints );
}
void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
InputArray mask, bool compactResult ) const
{
Mat trainImage = _trainImage.getMat();
Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
tempMatcher->radiusMatch( queryImage, queryKeypoints, matches, maxDistance, std::vector<Mat>(1, mask.getMat()), compactResult );
vecTrainPoints[0].swap( trainKeypoints );
}
void GenericDescriptorMatcher::match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<DMatch>& matches, InputArrayOfArrays masks )
{
std::vector<std::vector<DMatch> > knnMatches;
knnMatch( queryImage, queryKeypoints, knnMatches, 1, masks, false );
convertMatches( knnMatches, matches );
}
void GenericDescriptorMatcher::knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn,
InputArrayOfArrays masks, bool compactResult )
{
matches.clear();
if( queryImage.empty() || queryKeypoints.empty() )
return;
KeyPointsFilter::runByImageBorder( queryKeypoints, queryImage.size(), 0 );
KeyPointsFilter::runByKeypointSize( queryKeypoints, std::numeric_limits<float>::epsilon() );
train();
knnMatchImpl( queryImage, queryKeypoints, matches, knn, masks, compactResult );
}
void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
InputArrayOfArrays masks, bool compactResult )
{
matches.clear();
if( queryImage.empty() || queryKeypoints.empty() )
return;
KeyPointsFilter::runByImageBorder( queryKeypoints, queryImage.size(), 0 );
KeyPointsFilter::runByKeypointSize( queryKeypoints, std::numeric_limits<float>::epsilon() );
train();
radiusMatchImpl( queryImage, queryKeypoints, matches, maxDistance, masks, compactResult );
}
void GenericDescriptorMatcher::read( const FileNode& )
{}
void GenericDescriptorMatcher::write( FileStorage& ) const
{}
bool GenericDescriptorMatcher::empty() const
{
return true;
}
/*
* Factory function for GenericDescriptorMatch creating
*/
Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::create( const String& genericDescritptorMatcherType,
const String &paramsFilename )
{
Ptr<GenericDescriptorMatcher> descriptorMatcher =
Algorithm::create<GenericDescriptorMatcher>("DescriptorMatcher." + genericDescritptorMatcherType);
if( !paramsFilename.empty() && descriptorMatcher )
{
FileStorage fs = FileStorage( paramsFilename, FileStorage::READ );
if( fs.isOpened() )
{
descriptorMatcher->read( fs.root() );
fs.release();
}
}
return descriptorMatcher;
}
/****************************************************************************************\
* VectorDescriptorMatcher *
\****************************************************************************************/
VectorDescriptorMatcher::VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& _extractor,
const Ptr<DescriptorMatcher>& _matcher )
: extractor( _extractor ), matcher( _matcher )
{
CV_Assert( extractor && matcher );
}
VectorDescriptorMatcher::~VectorDescriptorMatcher()
{}
void VectorDescriptorMatcher::add( InputArrayOfArrays _imgCollection,
std::vector<std::vector<KeyPoint> >& pointCollection )
{
std::vector<Mat> imgCollection, descriptors;
_imgCollection.getMatVector(imgCollection);
extractor->compute( imgCollection, pointCollection, descriptors );
matcher->add( descriptors );
trainPointCollection.add( imgCollection, pointCollection );
}
void VectorDescriptorMatcher::clear()
{
//extractor->clear();
matcher->clear();
GenericDescriptorMatcher::clear();
}
void VectorDescriptorMatcher::train()
{
matcher->train();
}
bool VectorDescriptorMatcher::isMaskSupported()
{
return matcher->isMaskSupported();
}
void VectorDescriptorMatcher::knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn,
InputArrayOfArrays masks, bool compactResult )
{
Mat queryDescriptors;
extractor->compute( queryImage, queryKeypoints, queryDescriptors );
matcher->knnMatch( queryDescriptors, matches, knn, masks, compactResult );
}
void VectorDescriptorMatcher::radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
InputArrayOfArrays masks, bool compactResult )
{
Mat queryDescriptors;
extractor->compute( queryImage, queryKeypoints, queryDescriptors );
matcher->radiusMatch( queryDescriptors, matches, maxDistance, masks, compactResult );
}
void VectorDescriptorMatcher::read( const FileNode& fn )
{
GenericDescriptorMatcher::read(fn);
extractor->read(fn);
}
void VectorDescriptorMatcher::write (FileStorage& fs) const
{
GenericDescriptorMatcher::write(fs);
extractor->write (fs);
}
bool VectorDescriptorMatcher::empty() const
{
return !extractor || extractor->empty() ||
!matcher || matcher->empty();
}
Ptr<GenericDescriptorMatcher> VectorDescriptorMatcher::clone( bool emptyTrainData ) const
{
// TODO clone extractor
return makePtr<VectorDescriptorMatcher>( extractor, matcher->clone(emptyTrainData) );
}
}

View File

@ -1,472 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
template <typename inMatType, typename outMatType> static void
computeIntegralImages( const Mat& matI, Mat& matS, Mat& matT, Mat& _FT,
int iiType )
{
int x, y, rows = matI.rows, cols = matI.cols;
matS.create(rows + 1, cols + 1, iiType );
matT.create(rows + 1, cols + 1, iiType );
_FT.create(rows + 1, cols + 1, iiType );
const inMatType* I = matI.ptr<inMatType>();
outMatType *S = matS.ptr<outMatType>();
outMatType *T = matT.ptr<outMatType>();
outMatType *FT = _FT.ptr<outMatType>();
int istep = (int)(matI.step/matI.elemSize());
int step = (int)(matS.step/matS.elemSize());
for( x = 0; x <= cols; x++ )
S[x] = T[x] = FT[x] = 0;
S += step; T += step; FT += step;
S[0] = T[0] = 0;
FT[0] = I[0];
for( x = 1; x < cols; x++ )
{
S[x] = S[x-1] + I[x-1];
T[x] = I[x-1];
FT[x] = I[x] + I[x-1];
}
S[cols] = S[cols-1] + I[cols-1];
T[cols] = FT[cols] = I[cols-1];
for( y = 2; y <= rows; y++ )
{
I += istep, S += step, T += step, FT += step;
S[0] = S[-step]; S[1] = S[-step+1] + I[0];
T[0] = T[-step + 1];
T[1] = FT[0] = T[-step + 2] + I[-istep] + I[0];
FT[1] = FT[-step + 2] + I[-istep] + I[1] + I[0];
for( x = 2; x < cols; x++ )
{
S[x] = S[x - 1] + S[-step + x] - S[-step + x - 1] + I[x - 1];
T[x] = T[-step + x - 1] + T[-step + x + 1] - T[-step*2 + x] + I[-istep + x - 1] + I[x - 1];
FT[x] = FT[-step + x - 1] + FT[-step + x + 1] - FT[-step*2 + x] + I[x] + I[x-1];
}
S[cols] = S[cols - 1] + S[-step + cols] - S[-step + cols - 1] + I[cols - 1];
T[cols] = FT[cols] = T[-step + cols - 1] + I[-istep + cols - 1] + I[cols - 1];
}
}
template <typename iiMatType> static int
StarDetectorComputeResponses( const Mat& img, Mat& responses, Mat& sizes,
int maxSize, int iiType )
{
const int MAX_PATTERN = 17;
static const int sizes0[] = {1, 2, 3, 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128, -1};
static const int pairs[][2] = {{1, 0}, {3, 1}, {4, 2}, {5, 3}, {7, 4}, {8, 5}, {9, 6},
{11, 8}, {13, 10}, {14, 11}, {15, 12}, {16, 14}, {-1, -1}};
float invSizes[MAX_PATTERN][2];
int sizes1[MAX_PATTERN];
#if CV_SSE2
__m128 invSizes4[MAX_PATTERN][2];
__m128 sizes1_4[MAX_PATTERN];
union { int i; float f; } absmask;
absmask.i = 0x7fffffff;
volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2) && iiType == CV_32S;
#endif
struct StarFeature
{
int area;
iiMatType* p[8];
};
StarFeature f[MAX_PATTERN];
Mat sum, tilted, flatTilted;
int y, rows = img.rows, cols = img.cols;
int border, npatterns=0, maxIdx=0;
responses.create( img.size(), CV_32F );
sizes.create( img.size(), CV_16S );
while( pairs[npatterns][0] >= 0 && !
( sizes0[pairs[npatterns][0]] >= maxSize
|| sizes0[pairs[npatterns+1][0]] + sizes0[pairs[npatterns+1][0]]/2 >= std::min(rows, cols) ) )
{
++npatterns;
}
npatterns += (pairs[npatterns-1][0] >= 0);
maxIdx = pairs[npatterns-1][0];
// Create the integral image appropriate for our type & usage
if ( img.type() == CV_8U )
computeIntegralImages<uchar, iiMatType>( img, sum, tilted, flatTilted, iiType );
else if ( img.type() == CV_8S )
computeIntegralImages<char, iiMatType>( img, sum, tilted, flatTilted, iiType );
else if ( img.type() == CV_16U )
computeIntegralImages<ushort, iiMatType>( img, sum, tilted, flatTilted, iiType );
else if ( img.type() == CV_16S )
computeIntegralImages<short, iiMatType>( img, sum, tilted, flatTilted, iiType );
else
CV_Error( Error::StsUnsupportedFormat, "" );
int step = (int)(sum.step/sum.elemSize());
for(int i = 0; i <= maxIdx; i++ )
{
int ur_size = sizes0[i], t_size = sizes0[i] + sizes0[i]/2;
int ur_area = (2*ur_size + 1)*(2*ur_size + 1);
int t_area = t_size*t_size + (t_size + 1)*(t_size + 1);
f[i].p[0] = sum.ptr<iiMatType>() + (ur_size + 1)*step + ur_size + 1;
f[i].p[1] = sum.ptr<iiMatType>() - ur_size*step + ur_size + 1;
f[i].p[2] = sum.ptr<iiMatType>() + (ur_size + 1)*step - ur_size;
f[i].p[3] = sum.ptr<iiMatType>() - ur_size*step - ur_size;
f[i].p[4] = tilted.ptr<iiMatType>() + (t_size + 1)*step + 1;
f[i].p[5] = flatTilted.ptr<iiMatType>() - t_size;
f[i].p[6] = flatTilted.ptr<iiMatType>() + t_size + 1;
f[i].p[7] = tilted.ptr<iiMatType>() - t_size*step + 1;
f[i].area = ur_area + t_area;
sizes1[i] = sizes0[i];
}
// negate end points of the size range
// for a faster rejection of very small or very large features in non-maxima suppression.
sizes1[0] = -sizes1[0];
sizes1[1] = -sizes1[1];
sizes1[maxIdx] = -sizes1[maxIdx];
border = sizes0[maxIdx] + sizes0[maxIdx]/2;
for(int i = 0; i < npatterns; i++ )
{
int innerArea = f[pairs[i][1]].area;
int outerArea = f[pairs[i][0]].area - innerArea;
invSizes[i][0] = 1.f/outerArea;
invSizes[i][1] = 1.f/innerArea;
}
#if CV_SSE2
if( useSIMD )
{
for(int i = 0; i < npatterns; i++ )
{
_mm_store_ps((float*)&invSizes4[i][0], _mm_set1_ps(invSizes[i][0]));
_mm_store_ps((float*)&invSizes4[i][1], _mm_set1_ps(invSizes[i][1]));
}
for(int i = 0; i <= maxIdx; i++ )
_mm_store_ps((float*)&sizes1_4[i], _mm_set1_ps((float)sizes1[i]));
}
#endif
for( y = 0; y < border; y++ )
{
float* r_ptr = responses.ptr<float>(y);
float* r_ptr2 = responses.ptr<float>(rows - 1 - y);
short* s_ptr = sizes.ptr<short>(y);
short* s_ptr2 = sizes.ptr<short>(rows - 1 - y);
memset( r_ptr, 0, cols*sizeof(r_ptr[0]));
memset( r_ptr2, 0, cols*sizeof(r_ptr2[0]));
memset( s_ptr, 0, cols*sizeof(s_ptr[0]));
memset( s_ptr2, 0, cols*sizeof(s_ptr2[0]));
}
for( y = border; y < rows - border; y++ )
{
int x = border;
float* r_ptr = responses.ptr<float>(y);
short* s_ptr = sizes.ptr<short>(y);
memset( r_ptr, 0, border*sizeof(r_ptr[0]));
memset( s_ptr, 0, border*sizeof(s_ptr[0]));
memset( r_ptr + cols - border, 0, border*sizeof(r_ptr[0]));
memset( s_ptr + cols - border, 0, border*sizeof(s_ptr[0]));
#if CV_SSE2
if( useSIMD )
{
__m128 absmask4 = _mm_set1_ps(absmask.f);
for( ; x <= cols - border - 4; x += 4 )
{
int ofs = y*step + x;
__m128 vals[MAX_PATTERN];
__m128 bestResponse = _mm_setzero_ps();
__m128 bestSize = _mm_setzero_ps();
for(int i = 0; i <= maxIdx; i++ )
{
const iiMatType** p = (const iiMatType**)&f[i].p[0];
__m128i r0 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[0]+ofs)),
_mm_loadu_si128((const __m128i*)(p[1]+ofs)));
__m128i r1 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[3]+ofs)),
_mm_loadu_si128((const __m128i*)(p[2]+ofs)));
__m128i r2 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[4]+ofs)),
_mm_loadu_si128((const __m128i*)(p[5]+ofs)));
__m128i r3 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[7]+ofs)),
_mm_loadu_si128((const __m128i*)(p[6]+ofs)));
r0 = _mm_add_epi32(_mm_add_epi32(r0,r1), _mm_add_epi32(r2,r3));
_mm_store_ps((float*)&vals[i], _mm_cvtepi32_ps(r0));
}
for(int i = 0; i < npatterns; i++ )
{
__m128 inner_sum = vals[pairs[i][1]];
__m128 outer_sum = _mm_sub_ps(vals[pairs[i][0]], inner_sum);
__m128 response = _mm_sub_ps(_mm_mul_ps(inner_sum, invSizes4[i][1]),
_mm_mul_ps(outer_sum, invSizes4[i][0]));
__m128 swapmask = _mm_cmpgt_ps(_mm_and_ps(response,absmask4),
_mm_and_ps(bestResponse,absmask4));
bestResponse = _mm_xor_ps(bestResponse,
_mm_and_ps(_mm_xor_ps(response,bestResponse), swapmask));
bestSize = _mm_xor_ps(bestSize,
_mm_and_ps(_mm_xor_ps(sizes1_4[pairs[i][0]], bestSize), swapmask));
}
_mm_storeu_ps(r_ptr + x, bestResponse);
_mm_storel_epi64((__m128i*)(s_ptr + x),
_mm_packs_epi32(_mm_cvtps_epi32(bestSize),_mm_setzero_si128()));
}
}
#endif
for( ; x < cols - border; x++ )
{
int ofs = y*step + x;
int vals[MAX_PATTERN];
float bestResponse = 0;
int bestSize = 0;
for(int i = 0; i <= maxIdx; i++ )
{
const iiMatType** p = (const iiMatType**)&f[i].p[0];
vals[i] = (int)(p[0][ofs] - p[1][ofs] - p[2][ofs] + p[3][ofs] +
p[4][ofs] - p[5][ofs] - p[6][ofs] + p[7][ofs]);
}
for(int i = 0; i < npatterns; i++ )
{
int inner_sum = vals[pairs[i][1]];
int outer_sum = vals[pairs[i][0]] - inner_sum;
float response = inner_sum*invSizes[i][1] - outer_sum*invSizes[i][0];
if( fabs(response) > fabs(bestResponse) )
{
bestResponse = response;
bestSize = sizes1[pairs[i][0]];
}
}
r_ptr[x] = bestResponse;
s_ptr[x] = (short)bestSize;
}
}
return border;
}
static bool StarDetectorSuppressLines( const Mat& responses, const Mat& sizes, Point pt,
int lineThresholdProjected, int lineThresholdBinarized )
{
const float* r_ptr = responses.ptr<float>();
int rstep = (int)(responses.step/sizeof(r_ptr[0]));
const short* s_ptr = sizes.ptr<short>();
int sstep = (int)(sizes.step/sizeof(s_ptr[0]));
int sz = s_ptr[pt.y*sstep + pt.x];
int x, y, delta = sz/4, radius = delta*4;
float Lxx = 0, Lyy = 0, Lxy = 0;
int Lxxb = 0, Lyyb = 0, Lxyb = 0;
for( y = pt.y - radius; y <= pt.y + radius; y += delta )
for( x = pt.x - radius; x <= pt.x + radius; x += delta )
{
float Lx = r_ptr[y*rstep + x + 1] - r_ptr[y*rstep + x - 1];
float Ly = r_ptr[(y+1)*rstep + x] - r_ptr[(y-1)*rstep + x];
Lxx += Lx*Lx; Lyy += Ly*Ly; Lxy += Lx*Ly;
}
if( (Lxx + Lyy)*(Lxx + Lyy) >= lineThresholdProjected*(Lxx*Lyy - Lxy*Lxy) )
return true;
for( y = pt.y - radius; y <= pt.y + radius; y += delta )
for( x = pt.x - radius; x <= pt.x + radius; x += delta )
{
int Lxb = (s_ptr[y*sstep + x + 1] == sz) - (s_ptr[y*sstep + x - 1] == sz);
int Lyb = (s_ptr[(y+1)*sstep + x] == sz) - (s_ptr[(y-1)*sstep + x] == sz);
Lxxb += Lxb * Lxb; Lyyb += Lyb * Lyb; Lxyb += Lxb * Lyb;
}
if( (Lxxb + Lyyb)*(Lxxb + Lyyb) >= lineThresholdBinarized*(Lxxb*Lyyb - Lxyb*Lxyb) )
return true;
return false;
}
static void
StarDetectorSuppressNonmax( const Mat& responses, const Mat& sizes,
std::vector<KeyPoint>& keypoints, int border,
int responseThreshold,
int lineThresholdProjected,
int lineThresholdBinarized,
int suppressNonmaxSize )
{
int x, y, x1, y1, delta = suppressNonmaxSize/2;
int rows = responses.rows, cols = responses.cols;
const float* r_ptr = responses.ptr<float>();
int rstep = (int)(responses.step/sizeof(r_ptr[0]));
const short* s_ptr = sizes.ptr<short>();
int sstep = (int)(sizes.step/sizeof(s_ptr[0]));
short featureSize = 0;
for( y = border; y < rows - border; y += delta+1 )
for( x = border; x < cols - border; x += delta+1 )
{
float maxResponse = (float)responseThreshold;
float minResponse = (float)-responseThreshold;
Point maxPt(-1, -1), minPt(-1, -1);
int tileEndY = MIN(y + delta, rows - border - 1);
int tileEndX = MIN(x + delta, cols - border - 1);
for( y1 = y; y1 <= tileEndY; y1++ )
for( x1 = x; x1 <= tileEndX; x1++ )
{
float val = r_ptr[y1*rstep + x1];
if( maxResponse < val )
{
maxResponse = val;
maxPt = Point(x1, y1);
}
else if( minResponse > val )
{
minResponse = val;
minPt = Point(x1, y1);
}
}
if( maxPt.x >= 0 )
{
for( y1 = maxPt.y - delta; y1 <= maxPt.y + delta; y1++ )
for( x1 = maxPt.x - delta; x1 <= maxPt.x + delta; x1++ )
{
float val = r_ptr[y1*rstep + x1];
if( val >= maxResponse && (y1 != maxPt.y || x1 != maxPt.x))
goto skip_max;
}
if( (featureSize = s_ptr[maxPt.y*sstep + maxPt.x]) >= 4 &&
!StarDetectorSuppressLines( responses, sizes, maxPt, lineThresholdProjected,
lineThresholdBinarized ))
{
KeyPoint kpt((float)maxPt.x, (float)maxPt.y, featureSize, -1, maxResponse);
keypoints.push_back(kpt);
}
}
skip_max:
if( minPt.x >= 0 )
{
for( y1 = minPt.y - delta; y1 <= minPt.y + delta; y1++ )
for( x1 = minPt.x - delta; x1 <= minPt.x + delta; x1++ )
{
float val = r_ptr[y1*rstep + x1];
if( val <= minResponse && (y1 != minPt.y || x1 != minPt.x))
goto skip_min;
}
if( (featureSize = s_ptr[minPt.y*sstep + minPt.x]) >= 4 &&
!StarDetectorSuppressLines( responses, sizes, minPt,
lineThresholdProjected, lineThresholdBinarized))
{
KeyPoint kpt((float)minPt.x, (float)minPt.y, featureSize, -1, maxResponse);
keypoints.push_back(kpt);
}
}
skip_min:
;
}
}
StarDetector::StarDetector(int _maxSize, int _responseThreshold,
int _lineThresholdProjected,
int _lineThresholdBinarized,
int _suppressNonmaxSize)
: maxSize(_maxSize), responseThreshold(_responseThreshold),
lineThresholdProjected(_lineThresholdProjected),
lineThresholdBinarized(_lineThresholdBinarized),
suppressNonmaxSize(_suppressNonmaxSize)
{}
void StarDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat(), grayImage = image;
if( image.channels() > 1 ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
(*this)(grayImage, keypoints);
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
void StarDetector::operator()(const Mat& img, std::vector<KeyPoint>& keypoints) const
{
Mat responses, sizes;
int border;
// Use 32-bit integers if we won't overflow in the integral image
if ((img.depth() == CV_8U || img.depth() == CV_8S) &&
(img.rows * img.cols) < 8388608 ) // 8388608 = 2 ^ (32 - 8(bit depth) - 1(sign bit))
border = StarDetectorComputeResponses<int>( img, responses, sizes, maxSize, CV_32S );
else
border = StarDetectorComputeResponses<double>( img, responses, sizes, maxSize, CV_64F );
keypoints.clear();
if( border >= 0 )
StarDetectorSuppressNonmax( responses, sizes, keypoints, border,
responseThreshold, lineThresholdProjected,
lineThresholdBinarized, suppressNonmaxSize );
}
}

View File

@ -327,28 +327,6 @@ TEST( Features2d_DescriptorExtractor_ORB, regression )
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_FREAK, regression )
{
// TODO adjust the parameters below
CV_DescriptorExtractorTest<Hamming> test( "descriptor-freak", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
DescriptorExtractor::create("FREAK") );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_BRIEF, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brief", 1,
DescriptorExtractor::create("BRIEF") );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_OpponentBRIEF, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-opponent-brief", 1,
DescriptorExtractor::create("OpponentBRIEF") );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_KAZE, regression )
{
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f,

View File

@ -277,12 +277,6 @@ TEST( Features2d_Detector_MSER, DISABLED_regression )
test.safe_run();
}
TEST( Features2d_Detector_STAR, regression )
{
CV_FeatureDetectorTest test( "detector-star", FeatureDetector::create("STAR") );
test.safe_run();
}
TEST( Features2d_Detector_ORB, regression )
{
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") );
@ -300,15 +294,3 @@ TEST( Features2d_Detector_AKAZE, regression )
CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") );
test.safe_run();
}
TEST( Features2d_Detector_GridFAST, regression )
{
CV_FeatureDetectorTest test( "detector-grid-fast", FeatureDetector::create("GridFAST") );
test.safe_run();
}
TEST( Features2d_Detector_PyramidFAST, regression )
{
CV_FeatureDetectorTest test( "detector-pyramid-fast", FeatureDetector::create("PyramidFAST") );
test.safe_run();
}

View File

@ -155,18 +155,6 @@ TEST(Features2d_Detector_Keypoints_ORB, validation)
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_Star, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.STAR"));
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_Dense, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.Dense"));
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_KAZE, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.KAZE"));

View File

@ -2,7 +2,7 @@
from __future__ import print_function
import os, sys, re, string, fnmatch
allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "videostab", "softcascade", "superres"]
verbose = False
show_warnings = True
show_errors = True
@ -12,7 +12,6 @@ params_blacklist = {
"fromarray" : ("object", "allowND"), # python only function
"reprojectImageTo3D" : ("ddepth"), # python only argument
"composeRT" : ("d*d*"), # wildchards in parameter names are not supported by this parser
"CvSVM::train_auto" : ("\\*Grid"), # wildchards in parameter names are not supported by this parser
"error" : "args", # parameter of supporting macro
"getConvertElem" : ("from", "cn", "to", "beta", "alpha"), # arguments of returned functions
"gpu::swapChannels" : ("dstOrder") # parameter is not parsed correctly by the hdr_parser

View File

@ -390,124 +390,6 @@ private:
Ptr<DescriptorExtractor> wrapped;
};
class CV_EXPORTS_AS(GenericDescriptorMatcher) javaGenericDescriptorMatcher
{
public:
CV_WRAP void add( const std::vector<Mat>& images,
std::vector<std::vector<KeyPoint> >& keypoints )
{ return wrapped->add(images, keypoints); }
CV_WRAP const std::vector<Mat>& getTrainImages() const
{ return wrapped->getTrainImages(); }
CV_WRAP const std::vector<std::vector<KeyPoint> >& getTrainKeypoints() const
{ return wrapped->getTrainKeypoints(); }
CV_WRAP void clear()
{ return wrapped->clear(); }
CV_WRAP bool isMaskSupported()
{ return wrapped->isMaskSupported(); }
CV_WRAP void train()
{ return wrapped->train(); }
CV_WRAP void classify( const Mat& queryImage, CV_IN_OUT std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints ) const
{ return wrapped->classify(queryImage, queryKeypoints, trainImage, trainKeypoints); }
CV_WRAP void classify( const Mat& queryImage, CV_IN_OUT std::vector<KeyPoint>& queryKeypoints )
{ return wrapped->classify(queryImage, queryKeypoints); }
CV_WRAP void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<DMatch>& matches, const Mat& mask=Mat() ) const
{ return wrapped->match(queryImage, queryKeypoints, trainImage, trainKeypoints, matches, mask); }
CV_WRAP void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
const Mat& mask=Mat(), bool compactResult=false ) const
{ return wrapped->knnMatch(queryImage, queryKeypoints, trainImage, trainKeypoints,
matches, k, mask, compactResult); }
CV_WRAP void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const
{ return wrapped->radiusMatch(queryImage, queryKeypoints, trainImage, trainKeypoints,
matches, maxDistance, mask, compactResult); }
CV_WRAP void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<DMatch>& matches, const std::vector<Mat>& masks=std::vector<Mat>() )
{ return wrapped->match(queryImage, queryKeypoints, matches, masks); }
CV_WRAP void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
{ return wrapped->knnMatch(queryImage, queryKeypoints, matches, k, masks, compactResult); }
CV_WRAP void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
{ return wrapped->radiusMatch(queryImage, queryKeypoints, matches, maxDistance, masks, compactResult); }
CV_WRAP bool empty() const
{ return wrapped->empty(); }
enum
{
ONEWAY = 1,
FERN = 2
};
CV_WRAP_AS(clone) javaGenericDescriptorMatcher* jclone( bool emptyTrainData=false ) const
{
return new javaGenericDescriptorMatcher(wrapped->clone(emptyTrainData));
}
//supported: OneWay, Fern
//unsupported: Vector
CV_WRAP static javaGenericDescriptorMatcher* create( int matcherType )
{
String name;
switch(matcherType)
{
case ONEWAY:
name = "ONEWAY";
break;
case FERN:
name = "FERN";
break;
default:
CV_Error( Error::StsBadArg, "Specified generic descriptor matcher type is not supported." );
break;
}
return new javaGenericDescriptorMatcher(GenericDescriptorMatcher::create(name));
}
CV_WRAP void write( const String& fileName ) const
{
FileStorage fs(fileName, FileStorage::WRITE);
wrapped->write(fs);
}
CV_WRAP void read( const String& fileName )
{
FileStorage fs(fileName, FileStorage::READ);
wrapped->read(fs.root());
}
private:
javaGenericDescriptorMatcher(Ptr<GenericDescriptorMatcher> _wrapped) : wrapped(_wrapped)
{}
Ptr<GenericDescriptorMatcher> wrapped;
};
#if 0
//DO NOT REMOVE! The block is required for sources parser
enum

View File

@ -2,10 +2,6 @@
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE
# include "opencv2/nonfree.hpp"
#endif
#ifdef HAVE_OPENCV_FEATURES2D
# include "opencv2/features2d.hpp"
#endif
@ -28,9 +24,6 @@ JNI_OnLoad(JavaVM* vm, void* )
return -1;
bool init = true;
#ifdef HAVE_OPENCV_NONFREE
init &= cv::initModule_nonfree();
#endif
#ifdef HAVE_OPENCV_FEATURES2D
init &= cv::initModule_features2d();
#endif

View File

@ -1,7 +0,0 @@
if(BUILD_ANDROID_PACKAGE)
ocv_module_disable(nonfree)
endif()
set(the_description "Functionality with possible limitations on the use")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wshadow)
ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_cudaarithm)

View File

@ -1,250 +0,0 @@
Feature Detection and Description
=================================
SIFT
----
.. ocv:class:: SIFT : public Feature2D
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm by D. Lowe [Lowe04]_.
.. [Lowe04] Lowe, D. G., “Distinctive Image Features from Scale-Invariant Keypoints”, International Journal of Computer Vision, 60, 2, pp. 91-110, 2004.
SIFT::SIFT
----------
The SIFT constructors.
.. ocv:function:: SIFT::SIFT( int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6)
.. ocv:pyfunction:: cv2.SIFT([, nfeatures[, nOctaveLayers[, contrastThreshold[, edgeThreshold[, sigma]]]]]) -> <SIFT object>
:param nfeatures: The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast)
:param nOctaveLayers: The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
:param contrastThreshold: The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
:param edgeThreshold: The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the ``edgeThreshold``, the less features are filtered out (more features are retained).
:param sigma: The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
SIFT::operator ()
-----------------
Extract features and computes their descriptors using SIFT algorithm
.. ocv:function:: void SIFT::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:pyfunction:: cv2.SIFT.detect(image[, mask]) -> keypoints
.. ocv:pyfunction:: cv2.SIFT.compute(image, keypoints[, descriptors]) -> keypoints, descriptors
.. ocv:pyfunction:: cv2.SIFT.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors
:param img: Input 8-bit grayscale image
:param mask: Optional input mask that marks the regions where we should detect features.
:param keypoints: The input/output vector of keypoints
:param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
.. note:: Python API provides three functions. First one finds keypoints only. Second function computes the descriptors based on the keypoints we provide. Third function detects the keypoints and computes their descriptors. If you want both keypoints and descriptors, directly use third function as ``kp, des = cv2.SIFT.detectAndCompute(image, None)``
SURF
----
.. ocv:class:: SURF : public Feature2D
Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
.. ocv:member:: int extended
* 0 means that the basic descriptors (64 elements each) shall be computed
* 1 means that the extended descriptors (128 elements each) shall be computed
.. ocv:member:: int upright
* 0 means that detector computes orientation of each feature.
* 1 means that the orientation is not computed (which is much, much faster). For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting ``upright=1``.
.. ocv:member:: double hessianThreshold
Threshold for the keypoint detector. Only features, whose hessian is larger than ``hessianThreshold`` are retained by the detector. Therefore, the larger the value, the less keypoints you will get. A good default value could be from 300 to 500, depending from the image contrast.
.. ocv:member:: int nOctaves
The number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default. If you want to get very large features, use the larger value. If you want just small features, decrease it.
.. ocv:member:: int nOctaveLayers
The number of images within each octave of a gaussian pyramid. It is set to 2 by default.
.. [Bay06] Bay, H. and Tuytelaars, T. and Van Gool, L. "SURF: Speeded Up Robust Features", 9th European Conference on Computer Vision, 2006
.. note::
* An example using the SURF feature detector can be found at opencv_source_code/samples/cpp/generic_descriptor_match.cpp
* Another example using the SURF feature detector, extractor and matcher can be found at opencv_source_code/samples/cpp/matcher_simple.cpp
SURF::SURF
----------
The SURF extractor constructors.
.. ocv:function:: SURF::SURF()
.. ocv:function:: SURF::SURF( double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=true, bool upright=false )
.. ocv:pyfunction:: cv2.SURF([hessianThreshold[, nOctaves[, nOctaveLayers[, extended[, upright]]]]]) -> <SURF object>
:param hessianThreshold: Threshold for hessian keypoint detector used in SURF.
:param nOctaves: Number of pyramid octaves the keypoint detector will use.
:param nOctaveLayers: Number of octave layers within each octave.
:param extended: Extended descriptor flag (true - use extended 128-element descriptors; false - use 64-element descriptors).
:param upright: Up-right or rotated features flag (true - do not compute orientation of features; false - compute orientation).
SURF::operator()
----------------
Detects keypoints and computes SURF descriptors for them.
.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints) const
.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:pyfunction:: cv2.SURF.detect(image[, mask]) -> keypoints
.. ocv:pyfunction:: cv2.SURF.compute(image, keypoints[, descriptors]) -> keypoints, descriptors
.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors
.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image[, mask]) -> keypoints, descriptors
.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
:param image: Input 8-bit grayscale image
:param mask: Optional input mask that marks the regions where we should detect features.
:param keypoints: The input/output vector of keypoints
:param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
:param storage: Memory storage for the output keypoints and descriptors in OpenCV 1.x API.
:param params: SURF algorithm parameters in OpenCV 1.x API.
The function is parallelized with the TBB library.
If you are using the C version, make sure you call ``cv::initModule_nonfree()`` from ``nonfree/nonfree.hpp``.
cuda::SURF_CUDA
---------------
.. ocv:class:: cuda::SURF_CUDA
Class used for extracting Speeded Up Robust Features (SURF) from an image. ::
class SURF_CUDA
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the default constructor
SURF_CUDA();
//! the full constructor taking all the necessary parameters
explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! upload host keypoints to device memory
void uploadKeypoints(const vector<KeyPoint>& keypoints,
GpuMat& keypointsGPU);
//! download keypoints from device to host memory
void downloadKeypoints(const GpuMat& keypointsGPU,
vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
void downloadDescriptors(const GpuMat& descriptorsGPU,
vector<float>& descriptors);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints,
std::vector<float>& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void releaseMemory();
// SURF parameters
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
//! max keypoints = keypointsRatio * img.size().area()
float keypointsRatio;
GpuMat sum, mask1, maskSum, intBuffer;
GpuMat det, trace;
GpuMat maxPosBuffer;
};
The class ``SURF_CUDA`` implements Speeded Up Robust Features descriptor. There is a fast multi-scale Hessian keypoint detector that can be used to find the keypoints (which is the default option). But the descriptors can also be computed for the user-specified keypoints. Only 8-bit grayscale images are supported.
The class ``SURF_CUDA`` can store results in the GPU and CPU memory. It provides functions to convert results between CPU and GPU version ( ``uploadKeypoints``, ``downloadKeypoints``, ``downloadDescriptors`` ). The format of CPU results is the same as ``SURF`` results. GPU results are stored in ``GpuMat``. The ``keypoints`` matrix is :math:`\texttt{nFeatures} \times 7` matrix with the ``CV_32FC1`` type.
* ``keypoints.ptr<float>(X_ROW)[i]`` contains x coordinate of the i-th feature.
* ``keypoints.ptr<float>(Y_ROW)[i]`` contains y coordinate of the i-th feature.
* ``keypoints.ptr<float>(LAPLACIAN_ROW)[i]`` contains the laplacian sign of the i-th feature.
* ``keypoints.ptr<float>(OCTAVE_ROW)[i]`` contains the octave of the i-th feature.
* ``keypoints.ptr<float>(SIZE_ROW)[i]`` contains the size of the i-th feature.
* ``keypoints.ptr<float>(ANGLE_ROW)[i]`` contain orientation of the i-th feature.
* ``keypoints.ptr<float>(HESSIAN_ROW)[i]`` contains the response of the i-th feature.
The ``descriptors`` matrix is :math:`\texttt{nFeatures} \times \texttt{descriptorSize}` matrix with the ``CV_32FC1`` type.
The class ``SURF_CUDA`` uses some buffers and provides access to it. All buffers can be safely released between function calls.
.. seealso:: :ocv:class:`SURF`
.. note::
* An example for using the SURF keypoint matcher on GPU can be found at opencv_source_code/samples/gpu/surf_keypoint_matcher.cpp

View File

@ -1,10 +0,0 @@
********************************
nonfree. Non-free functionality
********************************
The module contains algorithms that may be patented in some countries or have some other limitations on the use.
.. toctree::
:maxdepth: 2
feature_detection

View File

@ -1,57 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_NONFREE_HPP__
#define __OPENCV_NONFREE_HPP__
#include "opencv2/nonfree/features2d.hpp"
namespace cv
{
CV_EXPORTS bool initModule_nonfree();
}
#endif
/* End of file. */

View File

@ -1,128 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_NONFREE_CUDA_HPP__
#define __OPENCV_NONFREE_CUDA_HPP__
#include "opencv2/core/cuda.hpp"
namespace cv { namespace cuda {
class CV_EXPORTS SURF_CUDA
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the default constructor
SURF_CUDA();
//! the full constructor taking all the necessary parameters
explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! returns the default norm type
int defaultNorm() const;
//! upload host keypoints to device memory
void uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
//! download keypoints from device to host memory
void downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors);
//! finds the keypoints using fast hessian detector used in SURF
//! supports CV_8UC1 images
//! keypoints will have nFeature cols and 6 rows
//! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
//! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
//! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
//! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
//! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
//! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
//! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);
//! finds the keypoints and computes their descriptors.
//! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,
bool useProvidedKeypoints = false);
void releaseMemory();
// SURF parameters
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
//! max keypoints = min(keypointsRatio * img.size().area(), 65535)
float keypointsRatio;
GpuMat sum, mask1, maskSum, intBuffer;
GpuMat det, trace;
GpuMat maxPosBuffer;
};
}} // namespace cv { namespace cuda {
#endif // __OPENCV_NONFREE_CUDA_HPP__

View File

@ -1,154 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__
#define __OPENCV_NONFREE_FEATURES_2D_HPP__
#include "opencv2/features2d.hpp"
namespace cv
{
/*!
SIFT implementation.
The class implements SIFT algorithm by D. Lowe.
*/
class CV_EXPORTS_W SIFT : public Feature2D
{
public:
CV_WRAP explicit SIFT( int nfeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10,
double sigma = 1.6);
//! returns the descriptor size in floats (128)
CV_WRAP int descriptorSize() const;
//! returns the descriptor type
CV_WRAP int descriptorType() const;
//! returns the default norm type
CV_WRAP int defaultNorm() const;
//! finds the keypoints using SIFT algorithm
void operator()(InputArray img, InputArray mask,
std::vector<KeyPoint>& keypoints) const;
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
void operator()(InputArray img, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints = false) const;
AlgorithmInfo* info() const;
void buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const;
void buildDoGPyramid( const std::vector<Mat>& pyr, std::vector<Mat>& dogpyr ) const;
void findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const;
protected:
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
CV_PROP_RW int nfeatures;
CV_PROP_RW int nOctaveLayers;
CV_PROP_RW double contrastThreshold;
CV_PROP_RW double edgeThreshold;
CV_PROP_RW double sigma;
};
typedef SIFT SiftFeatureDetector;
typedef SIFT SiftDescriptorExtractor;
/*!
SURF implementation.
The class implements SURF algorithm by H. Bay et al.
*/
class CV_EXPORTS_W SURF : public Feature2D
{
public:
//! the default constructor
CV_WRAP SURF();
//! the full constructor taking all the necessary parameters
explicit CV_WRAP SURF(double hessianThreshold,
int nOctaves = 4, int nOctaveLayers = 2,
bool extended = true, bool upright = false);
//! returns the descriptor size in float's (64 or 128)
CV_WRAP int descriptorSize() const;
//! returns the descriptor type
CV_WRAP int descriptorType() const;
//! returns the descriptor type
CV_WRAP int defaultNorm() const;
//! finds the keypoints using fast hessian detector used in SURF
void operator()(InputArray img, InputArray mask,
CV_OUT std::vector<KeyPoint>& keypoints) const;
//! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
void operator()(InputArray img, InputArray mask,
CV_OUT std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints = false) const;
AlgorithmInfo* info() const;
CV_PROP_RW double hessianThreshold;
CV_PROP_RW int nOctaves;
CV_PROP_RW int nOctaveLayers;
CV_PROP_RW bool extended;
CV_PROP_RW bool upright;
protected:
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
};
typedef SURF SurfFeatureDetector;
typedef SURF SurfDescriptorExtractor;
} /* namespace cv */
#endif

View File

@ -1,48 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/nonfree.hpp"

View File

@ -1,11 +0,0 @@
#include "perf_precomp.hpp"
#include "opencv2/ts/cuda_perf.hpp"
static const char * impls[] = {
#ifdef HAVE_CUDA
"cuda",
#endif
"plain"
};
CV_PERF_TEST_MAIN_WITH_IMPLS(nonfree, impls, perf::printCudaInfo())

View File

@ -1,32 +0,0 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "cvconfig.h"
#include "opencv2/ts.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_OCL
# include "opencv2/nonfree/ocl.hpp"
#endif
#ifdef HAVE_CUDA
# include "opencv2/nonfree/cuda.hpp"
#endif
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#endif

View File

@ -1,66 +0,0 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
typedef perf::TestBaseWithParam<std::string> surf;
#define SURF_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
PERF_TEST_P(surf, detect, testing::Values(SURF_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
SURF detector;
vector<KeyPoint> points;
TEST_CYCLE() detector(frame, mask, points);
SANITY_CHECK_KEYPOINTS(points, 1e-3);
}
PERF_TEST_P(surf, extract, testing::Values(SURF_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
SURF detector;
vector<KeyPoint> points;
vector<float> descriptors;
detector(frame, mask, points);
TEST_CYCLE() detector(frame, mask, points, descriptors, true);
SANITY_CHECK(descriptors, 1e-4);
}
PERF_TEST_P(surf, full, testing::Values(SURF_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
SURF detector;
vector<KeyPoint> points;
vector<float> descriptors;
TEST_CYCLE() detector(frame, mask, points, descriptors, false);
SANITY_CHECK_KEYPOINTS(points, 1e-3);
SANITY_CHECK(descriptors, 1e-4);
}

View File

@ -1,103 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
#ifdef HAVE_CUDA
#include "opencv2/ts/cuda_perf.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// SURF
#ifdef HAVE_OPENCV_CUDAARITHM
DEF_PARAM_TEST_1(Image, string);
PERF_TEST_P(Image, CUDA_SURF,
Values<std::string>("gpu/perf/aloe.png"))
{
declare.time(50.0);
const cv::Mat img = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
if (PERF_RUN_CUDA())
{
cv::cuda::SURF_CUDA d_surf;
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat d_keypoints, d_descriptors;
TEST_CYCLE() d_surf(d_img, cv::cuda::GpuMat(), d_keypoints, d_descriptors);
std::vector<cv::KeyPoint> gpu_keypoints;
d_surf.downloadKeypoints(d_keypoints, gpu_keypoints);
cv::Mat gpu_descriptors(d_descriptors);
sortKeyPoints(gpu_keypoints, gpu_descriptors);
SANITY_CHECK_KEYPOINTS(gpu_keypoints);
SANITY_CHECK(gpu_descriptors, 1e-3);
}
else
{
cv::SURF surf;
std::vector<cv::KeyPoint> cpu_keypoints;
cv::Mat cpu_descriptors;
TEST_CYCLE() surf(img, cv::noArray(), cpu_keypoints, cpu_descriptors);
SANITY_CHECK_KEYPOINTS(cpu_keypoints);
SANITY_CHECK(cpu_descriptors);
}
}
#endif // HAVE_OPENCV_CUDAARITHM
#endif // HAVE_CUDA

View File

@ -1,111 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
#ifdef HAVE_OPENCV_OCL
using namespace cv;
using namespace cv::ocl;
using namespace std;
typedef perf::TestBaseWithParam<std::string> OCL_SURF;
#define SURF_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
PERF_TEST_P(OCL_SURF, DISABLED_with_data_transfer, testing::Values(SURF_IMAGES))
{
string filename = getDataPath(GetParam());
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
SURF_OCL d_surf;
oclMat d_keypoints;
oclMat d_descriptors;
Mat cpu_kp;
Mat cpu_dp;
declare.time(60);
TEST_CYCLE()
{
oclMat d_src(img);
d_surf(d_src, oclMat(), d_keypoints, d_descriptors);
d_keypoints.download(cpu_kp);
d_descriptors.download(cpu_dp);
}
SANITY_CHECK(cpu_kp, 1);
SANITY_CHECK(cpu_dp, 1);
}
PERF_TEST_P(OCL_SURF, DISABLED_without_data_transfer, testing::Values(SURF_IMAGES))
{
string filename = getDataPath(GetParam());
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
SURF_OCL d_surf;
oclMat d_keypoints;
oclMat d_descriptors;
oclMat d_src(img);
declare.time(60);
TEST_CYCLE() d_surf(d_src, oclMat(), d_keypoints, d_descriptors);
Mat cpu_kp;
Mat cpu_dp;
d_keypoints.download(cpu_kp);
d_descriptors.download(cpu_dp);
SANITY_CHECK(cpu_kp, 1);
SANITY_CHECK(cpu_dp, 1);
}
#endif // HAVE_OPENCV_OCL

View File

@ -1,960 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CUDAARITHM
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace cuda { namespace device
{
namespace surf
{
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
typedef double real_t;
#else
typedef float real_t;
#endif
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int i = threadIdx.y + blockIdx_y * blockDim.y;
const int layer = blockIdx_z;
const int size = calcSize(c_octave, layer);
const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave;
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
#endif
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
else
icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
#endif
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
plus<float> op;
device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
int width;
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{
__shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
dx = dy = 0.0f;
WinReader win;
win.centerX = featureX[blockIdx.x];
win.centerY = featureY[blockIdx.x];
// The sampling intervals and wavelet sized for selecting an orientation
// and building the keypoint descriptor are defined relative to 's'
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
// Extract a window of pixels around the keypoint of size 20s
const int win_size = (int)((PATCH_SZ + 1) * s);
win.width = win.height = win_size;
// Nearest neighbour version (faster)
win.win_offset = -(win_size - 1.0f) / 2.0f;
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
descriptor_dir = 0.f;
descriptor_dir *= CV_PI_F / 180.0f;
sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int xLoadInd = tid % (PATCH_SZ + 1);
const int yLoadInd = tid / (PATCH_SZ + 1);
if (yLoadInd < (PATCH_SZ + 1))
{
if (s > 1)
{
AreaFilter<WinReader> filter(win, s, s);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
}
else
{
LinearFilter<WinReader> filter(win);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
}
}
__syncthreads();
const int xPatchInd = threadIdx.x % 5;
const int yPatchInd = threadIdx.x / 5;
if (yPatchInd < 5)
{
const int xBlockInd = threadIdx.y % 4;
const int yBlockInd = threadIdx.y / 4;
const int xInd = xBlockInd * 5 + xPatchInd;
const int yInd = yBlockInd * 5 + yPatchInd;
const float dw = c_DW[yInd * PATCH_SZ + xInd];
dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw;
dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw;
}
}
__global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float dxabs = ::fabsf(dx);
float dyabs = ::fabsf(dy);
plus<float> op;
reduce<32>(sRow, dx, threadIdx.x, op);
reduce<32>(sRow, dy, threadIdx.x, op);
reduce<32>(sRow, dxabs, threadIdx.x, op);
reduce<32>(sRow, dyabs, threadIdx.x, op);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
// write dx, dy, |dx|, |dy|
if (threadIdx.x == 0)
*descriptors_block = make_float4(dx, dy, dxabs, dyabs);
}
__global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
plus<float> op;
float d1 = 0.0f;
float d2 = 0.0f;
float abs1 = 0.0f;
float abs2 = 0.0f;
if (dy >= 0)
{
d1 = dx;
abs1 = ::fabsf(dx);
}
else
{
d2 = dx;
abs2 = ::fabsf(dx);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (threadIdx.x == 0)
descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
if (dx >= 0)
{
d1 = dy;
abs1 = ::fabsf(dy);
d2 = 0.0f;
abs2 = 0.0f;
}
else
{
d1 = 0.0f;
abs1 = 0.0f;
d2 = dy;
abs2 = ::fabsf(dy);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (threadIdx.x == 0)
descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
__shared__ float smem[BLOCK_DIM_X];
__shared__ float s_len;
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
const float val = descriptor_base[threadIdx.x];
float len = val * val;
reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
if (threadIdx.x == 0)
s_len = ::sqrtf(len);
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = val / s_len;
}
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
compute_descriptors_64<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<64><<<nFeatures, 64>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
compute_descriptors_128<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<128><<<nFeatures, 128>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} // namespace surf
}}} // namespace cv { namespace cuda { namespace cudev
#endif // HAVE_OPENCV_CUDAARITHM

View File

@ -1,74 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(SURF, "Feature2D.SURF",
obj.info()->addParam(obj, "hessianThreshold", obj.hessianThreshold);
obj.info()->addParam(obj, "nOctaves", obj.nOctaves);
obj.info()->addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
obj.info()->addParam(obj, "extended", obj.extended);
obj.info()->addParam(obj, "upright", obj.upright))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(SIFT, "Feature2D.SIFT",
obj.info()->addParam(obj, "nFeatures", obj.nfeatures);
obj.info()->addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
obj.info()->addParam(obj, "contrastThreshold", obj.contrastThreshold);
obj.info()->addParam(obj, "edgeThreshold", obj.edgeThreshold);
obj.info()->addParam(obj, "sigma", obj.sigma))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
bool initModule_nonfree(void)
{
Ptr<Algorithm> sift = createSIFT_ptr_hidden(), surf = createSURF_ptr_hidden();
return sift->info() != 0 && surf->info() != 0;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,65 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/nonfree.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/nonfree/cuda.hpp"
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CUDAARITHM
# include "opencv2/cudaarithm.hpp"
#endif
#include "opencv2/core/private.hpp"
#endif

View File

@ -1,816 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/**********************************************************************************************\
Implementation of SIFT is based on the code from http://blogs.oregonstate.edu/hess/code/sift/
Below is the original copyright.
// Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
// All rights reserved.
// The following patent has been issued for methods embodied in this
// software: "Method and apparatus for identifying scale invariant features
// in an image and use of same for locating an object in an image," David
// G. Lowe, US Patent 6,711,293 (March 23, 2004). Provisional application
// filed March 8, 1999. Asignee: The University of British Columbia. For
// further details, contact David Lowe (lowe@cs.ubc.ca) or the
// University-Industry Liaison Office of the University of British
// Columbia.
// Note that restrictions imposed by this patent (and possibly others)
// exist independently of and may be in conflict with the freedoms granted
// in this license, which refers to copyright of the program, not patents
// for any methods that it implements. Both copyright and patent law must
// be obeyed to legally use and redistribute this program and it is not the
// purpose of this license to induce you to infringe any patents or other
// property right claims or to contest validity of any such claims. If you
// redistribute or use the program, then this license merely protects you
// from committing copyright infringement. It does not protect you from
// committing patent infringement. So, before you do anything with this
// program, make sure that you have permission to do so not merely in terms
// of copyright, but also in terms of patent law.
// Please note that this license is not to be understood as a guarantee
// either. If you use the program according to this license, but in
// conflict with patent law, it does not mean that the licensor will refund
// you for any losses that you incur if you are sued for your patent
// infringement.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright and
// patent notices, this list of conditions and the following
// disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Oregon State University nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\**********************************************************************************************/
#include "precomp.hpp"
#include <iostream>
#include <stdarg.h>
namespace cv
{
/******************************* Defs and macros *****************************/
// default width of descriptor histogram array
static const int SIFT_DESCR_WIDTH = 4;
// default number of bins per histogram in descriptor array
static const int SIFT_DESCR_HIST_BINS = 8;
// assumed gaussian blur for input image
static const float SIFT_INIT_SIGMA = 0.5f;
// width of border in which to ignore keypoints
static const int SIFT_IMG_BORDER = 5;
// maximum steps of keypoint interpolation before failure
static const int SIFT_MAX_INTERP_STEPS = 5;
// default number of bins in histogram for orientation assignment
static const int SIFT_ORI_HIST_BINS = 36;
// determines gaussian sigma for orientation assignment
static const float SIFT_ORI_SIG_FCTR = 1.5f;
// determines the radius of the region used in orientation assignment
static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR;
// orientation magnitude relative to max that results in new feature
static const float SIFT_ORI_PEAK_RATIO = 0.8f;
// determines the size of a single descriptor orientation histogram
static const float SIFT_DESCR_SCL_FCTR = 3.f;
// threshold on magnitude of elements of descriptor vector
static const float SIFT_DESCR_MAG_THR = 0.2f;
// factor used to convert floating-point descriptor to unsigned char
static const float SIFT_INT_DESCR_FCTR = 512.f;
#if 0
// intermediate type used for DoG pyramids
typedef short sift_wt;
static const int SIFT_FIXPT_SCALE = 48;
#else
// intermediate type used for DoG pyramids
typedef float sift_wt;
static const int SIFT_FIXPT_SCALE = 1;
#endif
static inline void
unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale)
{
octave = kpt.octave & 255;
layer = (kpt.octave >> 8) & 255;
octave = octave < 128 ? octave : (-128 | octave);
scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave);
}
static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
{
Mat gray, gray_fpt;
if( img.channels() == 3 || img.channels() == 4 )
cvtColor(img, gray, COLOR_BGR2GRAY);
else
img.copyTo(gray);
gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
float sig_diff;
if( doubleImageSize )
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
Mat dbl;
resize(gray_fpt, dbl, Size(gray.cols*2, gray.rows*2), 0, 0, INTER_LINEAR);
GaussianBlur(dbl, dbl, Size(), sig_diff, sig_diff);
return dbl;
}
else
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
GaussianBlur(gray_fpt, gray_fpt, Size(), sig_diff, sig_diff);
return gray_fpt;
}
}
void SIFT::buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const
{
std::vector<double> sig(nOctaveLayers + 3);
pyr.resize(nOctaves*(nOctaveLayers + 3));
// precompute Gaussian sigmas using the following formula:
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
sig[0] = sigma;
double k = std::pow( 2., 1. / nOctaveLayers );
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
double sig_prev = std::pow(k, (double)(i-1))*sigma;
double sig_total = sig_prev*k;
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
}
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 3; i++ )
{
Mat& dst = pyr[o*(nOctaveLayers + 3) + i];
if( o == 0 && i == 0 )
dst = base;
// base of new octave is halved image from end of previous octave
else if( i == 0 )
{
const Mat& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
resize(src, dst, Size(src.cols/2, src.rows/2),
0, 0, INTER_NEAREST);
}
else
{
const Mat& src = pyr[o*(nOctaveLayers + 3) + i-1];
GaussianBlur(src, dst, Size(), sig[i], sig[i]);
}
}
}
}
void SIFT::buildDoGPyramid( const std::vector<Mat>& gpyr, std::vector<Mat>& dogpyr ) const
{
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 2; i++ )
{
const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
subtract(src2, src1, dst, noArray(), DataType<sift_wt>::type);
}
}
}
// Computes a gradient orientation histogram at a specified pixel
static float calcOrientationHist( const Mat& img, Point pt, int radius,
float sigma, float* hist, int n )
{
int i, j, k, len = (radius*2+1)*(radius*2+1);
float expf_scale = -1.f/(2.f * sigma * sigma);
AutoBuffer<float> buf(len*4 + n+4);
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
float* temphist = W + len + 2;
for( i = 0; i < n; i++ )
temphist[i] = 0.f;
for( i = -radius, k = 0; i <= radius; i++ )
{
int y = pt.y + i;
if( y <= 0 || y >= img.rows - 1 )
continue;
for( j = -radius; j <= radius; j++ )
{
int x = pt.x + j;
if( x <= 0 || x >= img.cols - 1 )
continue;
float dx = (float)(img.at<sift_wt>(y, x+1) - img.at<sift_wt>(y, x-1));
float dy = (float)(img.at<sift_wt>(y-1, x) - img.at<sift_wt>(y+1, x));
X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale;
k++;
}
}
len = k;
// compute gradient values, orientations and the weights over the pixel neighborhood
exp(W, W, len);
fastAtan2(Y, X, Ori, len, true);
magnitude(X, Y, Mag, len);
for( k = 0; k < len; k++ )
{
int bin = cvRound((n/360.f)*Ori[k]);
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += W[k]*Mag[k];
}
// smooth the histogram
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for( i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
float maxval = hist[0];
for( i = 1; i < n; i++ )
maxval = std::max(maxval, hist[i]);
return maxval;
}
//
// Interpolates a scale-space extremum's location and scale to subpixel
// accuracy to form an image feature. Rejects features with low contrast.
// Based on Section 4 of Lowe's paper.
static bool adjustLocalExtrema( const std::vector<Mat>& dog_pyr, KeyPoint& kpt, int octv,
int& layer, int& r, int& c, int nOctaveLayers,
float contrastThreshold, float edgeThreshold, float sigma )
{
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
float xi=0, xr=0, xc=0, contr=0;
int i = 0;
for( ; i < SIFT_MAX_INTERP_STEPS; i++ )
{
int idx = octv*(nOctaveLayers+2) + layer;
const Mat& img = dog_pyr[idx];
const Mat& prev = dog_pyr[idx-1];
const Mat& next = dog_pyr[idx+1];
Vec3f dD((img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1))*deriv_scale,
(img.at<sift_wt>(r+1, c) - img.at<sift_wt>(r-1, c))*deriv_scale,
(next.at<sift_wt>(r, c) - prev.at<sift_wt>(r, c))*deriv_scale);
float v2 = (float)img.at<sift_wt>(r, c)*2;
float dxx = (img.at<sift_wt>(r, c+1) + img.at<sift_wt>(r, c-1) - v2)*second_deriv_scale;
float dyy = (img.at<sift_wt>(r+1, c) + img.at<sift_wt>(r-1, c) - v2)*second_deriv_scale;
float dss = (next.at<sift_wt>(r, c) + prev.at<sift_wt>(r, c) - v2)*second_deriv_scale;
float dxy = (img.at<sift_wt>(r+1, c+1) - img.at<sift_wt>(r+1, c-1) -
img.at<sift_wt>(r-1, c+1) + img.at<sift_wt>(r-1, c-1))*cross_deriv_scale;
float dxs = (next.at<sift_wt>(r, c+1) - next.at<sift_wt>(r, c-1) -
prev.at<sift_wt>(r, c+1) + prev.at<sift_wt>(r, c-1))*cross_deriv_scale;
float dys = (next.at<sift_wt>(r+1, c) - next.at<sift_wt>(r-1, c) -
prev.at<sift_wt>(r+1, c) + prev.at<sift_wt>(r-1, c))*cross_deriv_scale;
Matx33f H(dxx, dxy, dxs,
dxy, dyy, dys,
dxs, dys, dss);
Vec3f X = H.solve(dD, DECOMP_LU);
xi = -X[2];
xr = -X[1];
xc = -X[0];
if( std::abs(xi) < 0.5f && std::abs(xr) < 0.5f && std::abs(xc) < 0.5f )
break;
if( std::abs(xi) > (float)(INT_MAX/3) ||
std::abs(xr) > (float)(INT_MAX/3) ||
std::abs(xc) > (float)(INT_MAX/3) )
return false;
c += cvRound(xc);
r += cvRound(xr);
layer += cvRound(xi);
if( layer < 1 || layer > nOctaveLayers ||
c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER ||
r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER )
return false;
}
// ensure convergence of interpolation
if( i >= SIFT_MAX_INTERP_STEPS )
return false;
{
int idx = octv*(nOctaveLayers+2) + layer;
const Mat& img = dog_pyr[idx];
const Mat& prev = dog_pyr[idx-1];
const Mat& next = dog_pyr[idx+1];
Matx31f dD((img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1))*deriv_scale,
(img.at<sift_wt>(r+1, c) - img.at<sift_wt>(r-1, c))*deriv_scale,
(next.at<sift_wt>(r, c) - prev.at<sift_wt>(r, c))*deriv_scale);
float t = dD.dot(Matx31f(xc, xr, xi));
contr = img.at<sift_wt>(r, c)*img_scale + t * 0.5f;
if( std::abs( contr ) * nOctaveLayers < contrastThreshold )
return false;
// principal curvatures are computed using the trace and det of Hessian
float v2 = img.at<sift_wt>(r, c)*2.f;
float dxx = (img.at<sift_wt>(r, c+1) + img.at<sift_wt>(r, c-1) - v2)*second_deriv_scale;
float dyy = (img.at<sift_wt>(r+1, c) + img.at<sift_wt>(r-1, c) - v2)*second_deriv_scale;
float dxy = (img.at<sift_wt>(r+1, c+1) - img.at<sift_wt>(r+1, c-1) -
img.at<sift_wt>(r-1, c+1) + img.at<sift_wt>(r-1, c-1)) * cross_deriv_scale;
float tr = dxx + dyy;
float det = dxx * dyy - dxy * dxy;
if( det <= 0 || tr*tr*edgeThreshold >= (edgeThreshold + 1)*(edgeThreshold + 1)*det )
return false;
}
kpt.pt.x = (c + xc) * (1 << octv);
kpt.pt.y = (r + xr) * (1 << octv);
kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
kpt.response = std::abs(contr);
return true;
}
//
// Detects features at extrema in DoG scale space. Bad features are discarded
// based on contrast and ratio of principal curvatures.
void SIFT::findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const
{
int nOctaves = (int)gauss_pyr.size()/(nOctaveLayers + 3);
int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
const int n = SIFT_ORI_HIST_BINS;
float hist[n];
KeyPoint kpt;
keypoints.clear();
for( int o = 0; o < nOctaves; o++ )
for( int i = 1; i <= nOctaveLayers; i++ )
{
int idx = o*(nOctaveLayers+2)+i;
const Mat& img = dog_pyr[idx];
const Mat& prev = dog_pyr[idx-1];
const Mat& next = dog_pyr[idx+1];
int step = (int)img.step1();
int rows = img.rows, cols = img.cols;
for( int r = SIFT_IMG_BORDER; r < rows-SIFT_IMG_BORDER; r++)
{
const sift_wt* currptr = img.ptr<sift_wt>(r);
const sift_wt* prevptr = prev.ptr<sift_wt>(r);
const sift_wt* nextptr = next.ptr<sift_wt>(r);
for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++)
{
sift_wt val = currptr[c];
// find local extrema with pixel accuracy
if( std::abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
int r1 = r, c1 = c, layer = i;
if( !adjustLocalExtrema(dog_pyr, kpt, o, layer, r1, c1,
nOctaveLayers, (float)contrastThreshold,
(float)edgeThreshold, (float)sigma) )
continue;
float scl_octv = kpt.size*0.5f/(1 << o);
float omax = calcOrientationHist(gauss_pyr[o*(nOctaveLayers+3) + layer],
Point(c1, r1),
cvRound(SIFT_ORI_RADIUS * scl_octv),
SIFT_ORI_SIG_FCTR * scl_octv,
hist, n);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
kpt.angle = 360.f - (float)((360.f/n) * bin);
if(std::abs(kpt.angle - 360.f) < FLT_EPSILON)
kpt.angle = 0.f;
keypoints.push_back(kpt);
}
}
}
}
}
}
}
static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float scl,
int d, int n, float* dst )
{
Point pt(cvRound(ptf.x), cvRound(ptf.y));
float cos_t = cosf(ori*(float)(CV_PI/180));
float sin_t = sinf(ori*(float)(CV_PI/180));
float bins_per_rad = n / 360.f;
float exp_scale = -1.f/(d * d * 0.5f);
float hist_width = SIFT_DESCR_SCL_FCTR * scl;
int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f);
// Clip the radius to the diagonal of the image to avoid autobuffer too large exception
radius = std::min(radius, (int) sqrt((double) img.cols*img.cols + img.rows*img.rows));
cos_t /= hist_width;
sin_t /= hist_width;
int i, j, k, len = (radius*2+1)*(radius*2+1), histlen = (d+2)*(d+2)*(n+2);
int rows = img.rows, cols = img.cols;
AutoBuffer<float> buf(len*6 + histlen);
float *X = buf, *Y = X + len, *Mag = Y, *Ori = Mag + len, *W = Ori + len;
float *RBin = W + len, *CBin = RBin + len, *hist = CBin + len;
for( i = 0; i < d+2; i++ )
{
for( j = 0; j < d+2; j++ )
for( k = 0; k < n+2; k++ )
hist[(i*(d+2) + j)*(n+2) + k] = 0.;
}
for( i = -radius, k = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
// Calculate sample's histogram array coords rotated relative to ori.
// Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
// r_rot = 1.5) have full weight placed in row 1 after interpolation.
float c_rot = j * cos_t - i * sin_t;
float r_rot = j * sin_t + i * cos_t;
float rbin = r_rot + d/2 - 0.5f;
float cbin = c_rot + d/2 - 0.5f;
int r = pt.y + i, c = pt.x + j;
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
{
float dx = (float)(img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1));
float dy = (float)(img.at<sift_wt>(r-1, c) - img.at<sift_wt>(r+1, c));
X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
k++;
}
}
len = k;
fastAtan2(Y, X, Ori, len, true);
magnitude(X, Y, Mag, len);
exp(W, W, len);
for( k = 0; k < len; k++ )
{
float rbin = RBin[k], cbin = CBin[k];
float obin = (Ori[k] - ori)*bins_per_rad;
float mag = Mag[k]*W[k];
int r0 = cvFloor( rbin );
int c0 = cvFloor( cbin );
int o0 = cvFloor( obin );
rbin -= r0;
cbin -= c0;
obin -= o0;
if( o0 < 0 )
o0 += n;
if( o0 >= n )
o0 -= n;
// histogram update using tri-linear interpolation
float v_r1 = mag*rbin, v_r0 = mag - v_r1;
float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01;
float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111;
float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
hist[idx] += v_rco000;
hist[idx+1] += v_rco001;
hist[idx+(n+2)] += v_rco010;
hist[idx+(n+3)] += v_rco011;
hist[idx+(d+2)*(n+2)] += v_rco100;
hist[idx+(d+2)*(n+2)+1] += v_rco101;
hist[idx+(d+3)*(n+2)] += v_rco110;
hist[idx+(d+3)*(n+2)+1] += v_rco111;
}
// finalize histogram, since the orientation histograms are circular
for( i = 0; i < d; i++ )
for( j = 0; j < d; j++ )
{
int idx = ((i+1)*(d+2) + (j+1))*(n+2);
hist[idx] += hist[idx+n];
hist[idx+1] += hist[idx+n+1];
for( k = 0; k < n; k++ )
dst[(i*d + j)*n + k] = hist[idx+k];
}
// copy histogram to the descriptor,
// apply hysteresis thresholding
// and scale the result, so that it can be easily converted
// to byte array
float nrm2 = 0;
len = d*d*n;
for( k = 0; k < len; k++ )
nrm2 += dst[k]*dst[k];
float thr = std::sqrt(nrm2)*SIFT_DESCR_MAG_THR;
for( i = 0, nrm2 = 0; i < k; i++ )
{
float val = std::min(dst[i], thr);
dst[i] = val;
nrm2 += val*val;
}
nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON);
#if 1
for( k = 0; k < len; k++ )
{
dst[k] = saturate_cast<uchar>(dst[k]*nrm2);
}
#else
float nrm1 = 0;
for( k = 0; k < len; k++ )
{
dst[k] *= nrm2;
nrm1 += dst[k];
}
nrm1 = 1.f/std::max(nrm1, FLT_EPSILON);
for( k = 0; k < len; k++ )
{
dst[k] = std::sqrt(dst[k] * nrm1);//saturate_cast<uchar>(std::sqrt(dst[k] * nrm1)*SIFT_INT_DESCR_FCTR);
}
#endif
}
static void calcDescriptors(const std::vector<Mat>& gpyr, const std::vector<KeyPoint>& keypoints,
Mat& descriptors, int nOctaveLayers, int firstOctave )
{
int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint kpt = keypoints[i];
int octave, layer;
float scale;
unpackOctave(kpt, octave, layer, scale);
CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2);
float size=kpt.size*scale;
Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer];
float angle = 360.f - kpt.angle;
if(std::abs(angle - 360.f) < FLT_EPSILON)
angle = 0.f;
calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
}
}
//////////////////////////////////////////////////////////////////////////////////////////
SIFT::SIFT( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma )
: nfeatures(_nfeatures), nOctaveLayers(_nOctaveLayers),
contrastThreshold(_contrastThreshold), edgeThreshold(_edgeThreshold), sigma(_sigma)
{
}
int SIFT::descriptorSize() const
{
return SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
}
int SIFT::descriptorType() const
{
return CV_32F;
}
int SIFT::defaultNorm() const
{
return NORM_L2;
}
void SIFT::operator()(InputArray _image, InputArray _mask,
std::vector<KeyPoint>& keypoints) const
{
(*this)(_image, _mask, keypoints, noArray());
}
void SIFT::operator()(InputArray _image, InputArray _mask,
std::vector<KeyPoint>& keypoints,
OutputArray _descriptors,
bool useProvidedKeypoints) const
{
int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.empty() || image.depth() != CV_8U )
CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
if( !mask.empty() && mask.type() != CV_8UC1 )
CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
if( useProvidedKeypoints )
{
firstOctave = 0;
int maxOctave = INT_MIN;
for( size_t i = 0; i < keypoints.size(); i++ )
{
int octave, layer;
float scale;
unpackOctave(keypoints[i], octave, layer, scale);
firstOctave = std::min(firstOctave, octave);
maxOctave = std::max(maxOctave, octave);
actualNLayers = std::max(actualNLayers, layer-2);
}
firstOctave = std::min(firstOctave, 0);
CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
actualNOctaves = maxOctave - firstOctave + 1;
}
Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
std::vector<Mat> gpyr, dogpyr;
int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave;
//double t, tf = getTickFrequency();
//t = (double)getTickCount();
buildGaussianPyramid(base, gpyr, nOctaves);
buildDoGPyramid(gpyr, dogpyr);
//t = (double)getTickCount() - t;
//printf("pyramid construction time: %g\n", t*1000./tf);
if( !useProvidedKeypoints )
{
//t = (double)getTickCount();
findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
KeyPointsFilter::removeDuplicated( keypoints );
if( nfeatures > 0 )
KeyPointsFilter::retainBest(keypoints, nfeatures);
//t = (double)getTickCount() - t;
//printf("keypoint detection time: %g\n", t*1000./tf);
if( firstOctave < 0 )
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint& kpt = keypoints[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
else
{
// filter keypoints by mask
//KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
if( _descriptors.needed() )
{
//t = (double)getTickCount();
int dsize = descriptorSize();
_descriptors.create((int)keypoints.size(), dsize, CV_32F);
Mat descriptors = _descriptors.getMat();
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
//t = (double)getTickCount() - t;
//printf("descriptor extraction time: %g\n", t*1000./tf);
}
}
void SIFT::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
(*this)(image.getMat(), mask.getMat(), keypoints, noArray());
}
void SIFT::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,432 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::cuda;
#if !defined (HAVE_CUDA) || !defined (HAVE_OPENCV_CUDAARITHM)
cv::cuda::SURF_CUDA::SURF_CUDA() { throw_no_cuda(); }
cv::cuda::SURF_CUDA::SURF_CUDA(double, int, int, bool, float, bool) { throw_no_cuda(); }
int cv::cuda::SURF_CUDA::descriptorSize() const { throw_no_cuda(); return 0;}
void cv::cuda::SURF_CUDA::uploadKeypoints(const std::vector<KeyPoint>&, GpuMat&) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::downloadKeypoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::downloadDescriptors(const GpuMat&, std::vector<float>&) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&, bool) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, std::vector<float>&, bool) { throw_no_cuda(); }
void cv::cuda::SURF_CUDA::releaseMemory() { throw_no_cuda(); }
#else // !defined (HAVE_CUDA)
namespace cv { namespace cuda { namespace device
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
using namespace ::cv::cuda::device::surf;
namespace
{
Mutex mtx;
int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
class SURF_CUDA_Invoker
{
public:
SURF_CUDA_Invoker(SURF_CUDA& surf, const GpuMat& img, const GpuMat& mask) :
surf_(surf),
img_cols(img.cols), img_rows(img.rows),
use_mask(!mask.empty())
{
CV_Assert(!img.empty() && img.type() == CV_8UC1);
CV_Assert(mask.empty() || (mask.size() == img.size() && mask.type() == CV_8UC1));
CV_Assert(surf_.nOctaves > 0 && surf_.nOctaveLayers > 0);
const int min_size = calcSize(surf_.nOctaves - 1, 0);
CV_Assert(img_rows - min_size >= 0);
CV_Assert(img_cols - min_size >= 0);
const int layer_rows = img_rows >> (surf_.nOctaves - 1);
const int layer_cols = img_cols >> (surf_.nOctaves - 1);
const int min_margin = ((calcSize((surf_.nOctaves - 1), 2) >> 1) >> (surf_.nOctaves - 1)) + 1;
CV_Assert(layer_rows - 2 * min_margin > 0);
CV_Assert(layer_cols - 2 * min_margin > 0);
maxFeatures = std::min(static_cast<int>(img.size().area() * surf.keypointsRatio), 65535);
maxCandidates = std::min(static_cast<int>(1.5 * maxFeatures), 65535);
CV_Assert(maxFeatures > 0);
counters.create(1, surf_.nOctaves + 1, CV_32SC1);
counters.setTo(Scalar::all(0));
loadGlobalConstants(maxCandidates, maxFeatures, img_rows, img_cols, surf_.nOctaveLayers, static_cast<float>(surf_.hessianThreshold));
bindImgTex(img);
cuda::integral(img, surf_.sum, surf_.intBuffer);
sumOffset = bindSumTex(surf_.sum);
if (use_mask)
{
cuda::min(mask, 1.0, surf_.mask1);
cuda::integral(surf_.mask1, surf_.maskSum, surf_.intBuffer);
maskOffset = bindMaskSumTex(surf_.maskSum);
}
}
void detectKeypoints(GpuMat& keypoints)
{
ensureSizeIsEnough(img_rows * (surf_.nOctaveLayers + 2), img_cols, CV_32FC1, surf_.det);
ensureSizeIsEnough(img_rows * (surf_.nOctaveLayers + 2), img_cols, CV_32FC1, surf_.trace);
ensureSizeIsEnough(1, maxCandidates, CV_32SC4, surf_.maxPosBuffer);
ensureSizeIsEnough(SURF_CUDA::ROWS_COUNT, maxFeatures, CV_32FC1, keypoints);
keypoints.setTo(Scalar::all(0));
for (int octave = 0; octave < surf_.nOctaves; ++octave)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
loadOctaveConstants(octave, layer_rows, layer_cols);
icvCalcLayerDetAndTrace_gpu(surf_.det, surf_.trace, img_rows, img_cols, octave, surf_.nOctaveLayers);
icvFindMaximaInLayer_gpu(surf_.det, surf_.trace, surf_.maxPosBuffer.ptr<int4>(), counters.ptr<unsigned int>() + 1 + octave,
img_rows, img_cols, octave, use_mask, surf_.nOctaveLayers);
unsigned int maxCounter;
cudaSafeCall( cudaMemcpy(&maxCounter, counters.ptr<unsigned int>() + 1 + octave, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
maxCounter = std::min(maxCounter, static_cast<unsigned int>(maxCandidates));
if (maxCounter > 0)
{
icvInterpolateKeypoint_gpu(surf_.det, surf_.maxPosBuffer.ptr<int4>(), maxCounter,
keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
keypoints.ptr<int>(SURF_CUDA::LAPLACIAN_ROW), keypoints.ptr<int>(SURF_CUDA::OCTAVE_ROW),
keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::HESSIAN_ROW),
counters.ptr<unsigned int>());
}
}
unsigned int featureCounter;
cudaSafeCall( cudaMemcpy(&featureCounter, counters.ptr<unsigned int>(), sizeof(unsigned int), cudaMemcpyDeviceToHost) );
featureCounter = std::min(featureCounter, static_cast<unsigned int>(maxFeatures));
keypoints.cols = featureCounter;
if (surf_.upright)
keypoints.row(SURF_CUDA::ANGLE_ROW).setTo(Scalar::all(360.0 - 90.0));
else
findOrientation(keypoints);
}
void findOrientation(GpuMat& keypoints)
{
const int nFeatures = keypoints.cols;
if (nFeatures > 0)
{
icvCalcOrientation_gpu(keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::ANGLE_ROW), nFeatures);
}
}
void computeDescriptors(const GpuMat& keypoints, GpuMat& descriptors, int descriptorSize)
{
const int nFeatures = keypoints.cols;
if (nFeatures > 0)
{
ensureSizeIsEnough(nFeatures, descriptorSize, CV_32F, descriptors);
compute_descriptors_gpu(descriptors, keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::ANGLE_ROW), nFeatures);
}
}
private:
SURF_CUDA_Invoker(const SURF_CUDA_Invoker&);
SURF_CUDA_Invoker& operator =(const SURF_CUDA_Invoker&);
SURF_CUDA& surf_;
int img_cols, img_rows;
bool use_mask;
int maxCandidates;
int maxFeatures;
size_t maskOffset;
size_t sumOffset;
GpuMat counters;
};
}
cv::cuda::SURF_CUDA::SURF_CUDA()
{
hessianThreshold = 100;
extended = true;
nOctaves = 4;
nOctaveLayers = 2;
keypointsRatio = 0.01f;
upright = false;
}
cv::cuda::SURF_CUDA::SURF_CUDA(double _threshold, int _nOctaves, int _nOctaveLayers, bool _extended, float _keypointsRatio, bool _upright)
{
hessianThreshold = _threshold;
extended = _extended;
nOctaves = _nOctaves;
nOctaveLayers = _nOctaveLayers;
keypointsRatio = _keypointsRatio;
upright = _upright;
}
int cv::cuda::SURF_CUDA::descriptorSize() const
{
return extended ? 128 : 64;
}
int cv::cuda::SURF_CUDA::defaultNorm() const
{
return NORM_L2;
}
void cv::cuda::SURF_CUDA::uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU)
{
if (keypoints.empty())
keypointsGPU.release();
else
{
Mat keypointsCPU(SURF_CUDA::ROWS_COUNT, static_cast<int>(keypoints.size()), CV_32FC1);
float* kp_x = keypointsCPU.ptr<float>(SURF_CUDA::X_ROW);
float* kp_y = keypointsCPU.ptr<float>(SURF_CUDA::Y_ROW);
int* kp_laplacian = keypointsCPU.ptr<int>(SURF_CUDA::LAPLACIAN_ROW);
int* kp_octave = keypointsCPU.ptr<int>(SURF_CUDA::OCTAVE_ROW);
float* kp_size = keypointsCPU.ptr<float>(SURF_CUDA::SIZE_ROW);
float* kp_dir = keypointsCPU.ptr<float>(SURF_CUDA::ANGLE_ROW);
float* kp_hessian = keypointsCPU.ptr<float>(SURF_CUDA::HESSIAN_ROW);
for (size_t i = 0, size = keypoints.size(); i < size; ++i)
{
const KeyPoint& kp = keypoints[i];
kp_x[i] = kp.pt.x;
kp_y[i] = kp.pt.y;
kp_octave[i] = kp.octave;
kp_size[i] = kp.size;
kp_dir[i] = kp.angle;
kp_hessian[i] = kp.response;
kp_laplacian[i] = 1;
}
keypointsGPU.upload(keypointsCPU);
}
}
void cv::cuda::SURF_CUDA::downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints)
{
const int nFeatures = keypointsGPU.cols;
if (nFeatures == 0)
keypoints.clear();
else
{
CV_Assert(keypointsGPU.type() == CV_32FC1 && keypointsGPU.rows == ROWS_COUNT);
Mat keypointsCPU(keypointsGPU);
keypoints.resize(nFeatures);
float* kp_x = keypointsCPU.ptr<float>(SURF_CUDA::X_ROW);
float* kp_y = keypointsCPU.ptr<float>(SURF_CUDA::Y_ROW);
int* kp_laplacian = keypointsCPU.ptr<int>(SURF_CUDA::LAPLACIAN_ROW);
int* kp_octave = keypointsCPU.ptr<int>(SURF_CUDA::OCTAVE_ROW);
float* kp_size = keypointsCPU.ptr<float>(SURF_CUDA::SIZE_ROW);
float* kp_dir = keypointsCPU.ptr<float>(SURF_CUDA::ANGLE_ROW);
float* kp_hessian = keypointsCPU.ptr<float>(SURF_CUDA::HESSIAN_ROW);
for (int i = 0; i < nFeatures; ++i)
{
KeyPoint& kp = keypoints[i];
kp.pt.x = kp_x[i];
kp.pt.y = kp_y[i];
kp.class_id = kp_laplacian[i];
kp.octave = kp_octave[i];
kp.size = kp_size[i];
kp.angle = kp_dir[i];
kp.response = kp_hessian[i];
}
}
}
void cv::cuda::SURF_CUDA::downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors)
{
if (descriptorsGPU.empty())
descriptors.clear();
else
{
CV_Assert(descriptorsGPU.type() == CV_32F);
descriptors.resize(descriptorsGPU.rows * descriptorsGPU.cols);
Mat descriptorsCPU(descriptorsGPU.size(), CV_32F, &descriptors[0]);
descriptorsGPU.download(descriptorsCPU);
}
}
void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints)
{
AutoLock lock(mtx);
if (!img.empty())
{
SURF_CUDA_Invoker surf(*this, img, mask);
surf.detectKeypoints(keypoints);
}
}
void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints)
{
AutoLock lock(mtx);
if (!img.empty())
{
SURF_CUDA_Invoker surf(*this, img, mask);
if (!useProvidedKeypoints)
surf.detectKeypoints(keypoints);
else if (!upright)
{
surf.findOrientation(keypoints);
}
surf.computeDescriptors(keypoints, descriptors, descriptorSize());
}
}
void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
{
AutoLock lock(mtx);
GpuMat keypointsGPU;
(*this)(img, mask, keypointsGPU);
downloadKeypoints(keypointsGPU, keypoints);
}
void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
GpuMat& descriptors, bool useProvidedKeypoints)
{
AutoLock lock(mtx);
GpuMat keypointsGPU;
if (useProvidedKeypoints)
uploadKeypoints(keypoints, keypointsGPU);
(*this)(img, mask, keypointsGPU, descriptors, useProvidedKeypoints);
downloadKeypoints(keypointsGPU, keypoints);
}
void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
std::vector<float>& descriptors, bool useProvidedKeypoints)
{
AutoLock lock(mtx);
GpuMat descriptorsGPU;
(*this)(img, mask, keypoints, descriptorsGPU, useProvidedKeypoints);
downloadDescriptors(descriptorsGPU, descriptors);
}
void cv::cuda::SURF_CUDA::releaseMemory()
{
sum.release();
mask1.release();
maskSum.release();
intBuffer.release();
det.release();
trace.release();
maxPosBuffer.release();
}
#endif // !defined (HAVE_CUDA)

View File

@ -1,118 +0,0 @@
///////////// see LICENSE.txt in the OpenCV root directory //////////////
#ifndef __OPENCV_NONFREE_SURF_HPP__
#define __OPENCV_NONFREE_SURF_HPP__
namespace cv
{
//! Speeded up robust features, port from CUDA module.
////////////////////////////////// SURF //////////////////////////////////////////
class SURF_OCL
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the full constructor taking all the necessary parameters
SURF_OCL();
bool init(const SURF* params);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const { return params->extended ? 128 : 64; }
void uploadKeypoints(const std::vector<KeyPoint> &keypoints, UMat &keypointsGPU);
void downloadKeypoints(const UMat &keypointsGPU, std::vector<KeyPoint> &keypoints);
//! finds the keypoints using fast hessian detector used in SURF
//! supports CV_8UC1 images
//! keypoints will have nFeature cols and 6 rows
//! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
//! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
//! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
//! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
//! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
//! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
//! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
bool detect(InputArray img, InputArray mask, UMat& keypoints);
//! finds the keypoints and computes their descriptors.
//! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
bool detectAndCompute(InputArray img, InputArray mask, UMat& keypoints,
OutputArray descriptors, bool useProvidedKeypoints = false);
protected:
bool setImage(InputArray img, InputArray mask);
// kernel callers declarations
bool calcLayerDetAndTrace(int octave, int layer_rows);
bool findMaximaInLayer(int counterOffset, int octave, int layer_rows, int layer_cols);
bool interpolateKeypoint(int maxCounter, UMat &keypoints, int octave, int layer_rows, int maxFeatures);
bool calcOrientation(UMat &keypoints);
bool setUpRight(UMat &keypoints);
bool computeDescriptors(const UMat &keypoints, OutputArray descriptors);
bool detectKeypoints(UMat &keypoints);
const SURF* params;
//! max keypoints = min(keypointsRatio * img.size().area(), 65535)
UMat sum, intBuffer;
UMat det, trace;
UMat maxPosBuffer;
int img_cols, img_rows;
int maxCandidates;
int maxFeatures;
UMat img, counters;
// texture buffers
ocl::Image2D imgTex, sumTex;
bool haveImageSupport;
String kerOpts;
int status;
};
/*
template<typename _Tp> void copyVectorToUMat(const std::vector<_Tp>& v, UMat& um)
{
if(v.empty())
um.release();
else
Mat(1, (int)(v.size()*sizeof(v[0])), CV_8U, (void*)&v[0]).copyTo(um);
}
template<typename _Tp> void copyUMatToVector(const UMat& um, std::vector<_Tp>& v)
{
if(um.empty())
v.clear();
else
{
size_t sz = um.total()*um.elemSize();
CV_Assert(um.isContinuous() && (sz % sizeof(_Tp) == 0));
v.resize(sz/sizeof(_Tp));
Mat m(um.size(), um.type(), &v[0]);
um.copyTo(m);
}
}*/
}
#endif

View File

@ -1,459 +0,0 @@
/*M/////////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "surf.hpp"
#include <cstdio>
#include <sstream>
#include "opencl_kernels_nonfree.hpp"
namespace cv
{
enum { ORI_SEARCH_INC=5, ORI_LOCAL_SIZE=(360 / ORI_SEARCH_INC) };
static inline int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbors of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
SURF_OCL::SURF_OCL()
{
img_cols = img_rows = maxCandidates = maxFeatures = 0;
haveImageSupport = false;
status = -1;
}
bool SURF_OCL::init(const SURF* p)
{
params = p;
if(status < 0)
{
status = 0;
if(ocl::haveOpenCL())
{
const ocl::Device& dev = ocl::Device::getDefault();
if( dev.type() == ocl::Device::TYPE_CPU || dev.doubleFPConfig() == 0 )
return false;
haveImageSupport = false;//dev.imageSupport();
kerOpts = haveImageSupport ? "-D HAVE_IMAGE2D -D DOUBLE_SUPPORT" : "";
// status = 1;
}
}
return status > 0;
}
bool SURF_OCL::setImage(InputArray _img, InputArray _mask)
{
if( status <= 0 )
return false;
if( !_mask.empty())
return false;
int imgtype = _img.type();
CV_Assert(!_img.empty());
CV_Assert(params && params->nOctaves > 0 && params->nOctaveLayers > 0);
int min_size = calcSize(params->nOctaves - 1, 0);
Size sz = _img.size();
img_cols = sz.width;
img_rows = sz.height;
CV_Assert(img_rows >= min_size && img_cols >= min_size);
const int layer_rows = img_rows >> (params->nOctaves - 1);
const int layer_cols = img_cols >> (params->nOctaves - 1);
const int min_margin = ((calcSize((params->nOctaves - 1), 2) >> 1) >> (params->nOctaves - 1)) + 1;
CV_Assert(layer_rows - 2 * min_margin > 0);
CV_Assert(layer_cols - 2 * min_margin > 0);
maxFeatures = std::min(static_cast<int>(img_cols*img_rows * 0.01f), 65535);
maxCandidates = std::min(static_cast<int>(1.5 * maxFeatures), 65535);
CV_Assert(maxFeatures > 0);
counters.create(1, params->nOctaves + 1, CV_32SC1);
counters.setTo(Scalar::all(0));
img.release();
if(_img.isUMat() && imgtype == CV_8UC1)
img = _img.getUMat();
else if( imgtype == CV_8UC1 )
_img.copyTo(img);
else
cvtColor(_img, img, COLOR_BGR2GRAY);
integral(img, sum);
if(haveImageSupport)
{
imgTex = ocl::Image2D(img);
sumTex = ocl::Image2D(sum);
}
return true;
}
bool SURF_OCL::detectKeypoints(UMat &keypoints)
{
// create image pyramid buffers
// different layers have same sized buffers, but they are sampled from Gaussian kernel.
det.create(img_rows * (params->nOctaveLayers + 2), img_cols, CV_32F);
trace.create(img_rows * (params->nOctaveLayers + 2), img_cols, CV_32FC1);
maxPosBuffer.create(1, maxCandidates, CV_32SC4);
keypoints.create(SURF_OCL::ROWS_COUNT, maxFeatures, CV_32F);
keypoints.setTo(Scalar::all(0));
Mat cpuCounters;
for (int octave = 0; octave < params->nOctaves; ++octave)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
if(!calcLayerDetAndTrace(octave, layer_rows))
return false;
if(!findMaximaInLayer(1 + octave, octave, layer_rows, layer_cols))
return false;
cpuCounters = counters.getMat(ACCESS_READ);
int maxCounter = cpuCounters.at<int>(1 + octave);
maxCounter = std::min(maxCounter, maxCandidates);
cpuCounters.release();
if (maxCounter > 0)
{
if(!interpolateKeypoint(maxCounter, keypoints, octave, layer_rows, maxFeatures))
return false;
}
}
cpuCounters = counters.getMat(ACCESS_READ);
int featureCounter = cpuCounters.at<int>(0);
featureCounter = std::min(featureCounter, maxFeatures);
cpuCounters.release();
keypoints = UMat(keypoints, Rect(0, 0, featureCounter, keypoints.rows));
if (params->upright)
return setUpRight(keypoints);
else
return calcOrientation(keypoints);
}
bool SURF_OCL::setUpRight(UMat &keypoints)
{
int nFeatures = keypoints.cols;
if( nFeatures == 0 )
return true;
size_t globalThreads[3] = {nFeatures, 1};
ocl::Kernel kerUpRight("SURF_setUpRight", ocl::nonfree::surf_oclsrc, kerOpts);
return kerUpRight.args(ocl::KernelArg::ReadWrite(keypoints)).run(2, globalThreads, 0, true);
}
bool SURF_OCL::computeDescriptors(const UMat &keypoints, OutputArray _descriptors)
{
int dsize = params->descriptorSize();
int nFeatures = keypoints.cols;
if (nFeatures == 0)
{
_descriptors.release();
return true;
}
_descriptors.create(nFeatures, dsize, CV_32F);
UMat descriptors;
if( _descriptors.isUMat() )
descriptors = _descriptors.getUMat();
else
descriptors.create(nFeatures, dsize, CV_32F);
ocl::Kernel kerCalcDesc, kerNormDesc;
if( dsize == 64 )
{
kerCalcDesc.create("SURF_computeDescriptors64", ocl::nonfree::surf_oclsrc, kerOpts);
kerNormDesc.create("SURF_normalizeDescriptors64", ocl::nonfree::surf_oclsrc, kerOpts);
}
else
{
CV_Assert(dsize == 128);
kerCalcDesc.create("SURF_computeDescriptors128", ocl::nonfree::surf_oclsrc, kerOpts);
kerNormDesc.create("SURF_normalizeDescriptors128", ocl::nonfree::surf_oclsrc, kerOpts);
}
size_t localThreads[] = {6, 6};
size_t globalThreads[] = {nFeatures*localThreads[0], localThreads[1]};
if(haveImageSupport)
{
kerCalcDesc.args(imgTex,
img_rows, img_cols,
ocl::KernelArg::ReadOnlyNoSize(keypoints),
ocl::KernelArg::WriteOnlyNoSize(descriptors));
}
else
{
kerCalcDesc.args(ocl::KernelArg::ReadOnlyNoSize(img),
img_rows, img_cols,
ocl::KernelArg::ReadOnlyNoSize(keypoints),
ocl::KernelArg::WriteOnlyNoSize(descriptors));
}
if(!kerCalcDesc.run(2, globalThreads, localThreads, true))
return false;
size_t localThreads_n[] = {dsize, 1};
size_t globalThreads_n[] = {nFeatures*localThreads_n[0], localThreads_n[1]};
globalThreads[0] = nFeatures * localThreads[0];
globalThreads[1] = localThreads[1];
bool ok = kerNormDesc.args(ocl::KernelArg::ReadWriteNoSize(descriptors)).
run(2, globalThreads_n, localThreads_n, true);
if(ok && !_descriptors.isUMat())
descriptors.copyTo(_descriptors);
return ok;
}
void SURF_OCL::uploadKeypoints(const std::vector<KeyPoint> &keypoints, UMat &keypointsGPU)
{
if (keypoints.empty())
keypointsGPU.release();
else
{
Mat keypointsCPU(SURF_OCL::ROWS_COUNT, static_cast<int>(keypoints.size()), CV_32FC1);
float *kp_x = keypointsCPU.ptr<float>(SURF_OCL::X_ROW);
float *kp_y = keypointsCPU.ptr<float>(SURF_OCL::Y_ROW);
int *kp_laplacian = keypointsCPU.ptr<int>(SURF_OCL::LAPLACIAN_ROW);
int *kp_octave = keypointsCPU.ptr<int>(SURF_OCL::OCTAVE_ROW);
float *kp_size = keypointsCPU.ptr<float>(SURF_OCL::SIZE_ROW);
float *kp_dir = keypointsCPU.ptr<float>(SURF_OCL::ANGLE_ROW);
float *kp_hessian = keypointsCPU.ptr<float>(SURF_OCL::HESSIAN_ROW);
for (size_t i = 0, size = keypoints.size(); i < size; ++i)
{
const KeyPoint &kp = keypoints[i];
kp_x[i] = kp.pt.x;
kp_y[i] = kp.pt.y;
kp_octave[i] = kp.octave;
kp_size[i] = kp.size;
kp_dir[i] = kp.angle;
kp_hessian[i] = kp.response;
kp_laplacian[i] = 1;
}
keypointsCPU.copyTo(keypointsGPU);
}
}
void SURF_OCL::downloadKeypoints(const UMat &keypointsGPU, std::vector<KeyPoint> &keypoints)
{
const int nFeatures = keypointsGPU.cols;
if (nFeatures == 0)
keypoints.clear();
else
{
CV_Assert(keypointsGPU.type() == CV_32FC1 && keypointsGPU.rows == ROWS_COUNT);
Mat keypointsCPU = keypointsGPU.getMat(ACCESS_READ);
keypoints.resize(nFeatures);
float *kp_x = keypointsCPU.ptr<float>(SURF_OCL::X_ROW);
float *kp_y = keypointsCPU.ptr<float>(SURF_OCL::Y_ROW);
int *kp_laplacian = keypointsCPU.ptr<int>(SURF_OCL::LAPLACIAN_ROW);
int *kp_octave = keypointsCPU.ptr<int>(SURF_OCL::OCTAVE_ROW);
float *kp_size = keypointsCPU.ptr<float>(SURF_OCL::SIZE_ROW);
float *kp_dir = keypointsCPU.ptr<float>(SURF_OCL::ANGLE_ROW);
float *kp_hessian = keypointsCPU.ptr<float>(SURF_OCL::HESSIAN_ROW);
for (int i = 0; i < nFeatures; ++i)
{
KeyPoint &kp = keypoints[i];
kp.pt.x = kp_x[i];
kp.pt.y = kp_y[i];
kp.class_id = kp_laplacian[i];
kp.octave = kp_octave[i];
kp.size = kp_size[i];
kp.angle = kp_dir[i];
kp.response = kp_hessian[i];
}
}
}
bool SURF_OCL::detect(InputArray _img, InputArray _mask, UMat& keypoints)
{
if( !setImage(_img, _mask) )
return false;
return detectKeypoints(keypoints);
}
bool SURF_OCL::detectAndCompute(InputArray _img, InputArray _mask, UMat& keypoints,
OutputArray _descriptors, bool useProvidedKeypoints )
{
if( !setImage(_img, _mask) )
return false;
if( !useProvidedKeypoints && !detectKeypoints(keypoints) )
return false;
return computeDescriptors(keypoints, _descriptors);
}
inline int divUp(int a, int b) { return (a + b-1)/b; }
////////////////////////////
// kernel caller definitions
bool SURF_OCL::calcLayerDetAndTrace(int octave, int c_layer_rows)
{
int nOctaveLayers = params->nOctaveLayers;
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
size_t localThreads[] = {16, 16};
size_t globalThreads[] =
{
divUp(max_samples_j, (int)localThreads[0]) * localThreads[0],
divUp(max_samples_i, (int)localThreads[1]) * localThreads[1] * (nOctaveLayers + 2)
};
ocl::Kernel kerCalcDetTrace("SURF_calcLayerDetAndTrace", ocl::nonfree::surf_oclsrc, kerOpts);
if(haveImageSupport)
{
kerCalcDetTrace.args(sumTex,
img_rows, img_cols, nOctaveLayers,
octave, c_layer_rows,
ocl::KernelArg::WriteOnlyNoSize(det),
ocl::KernelArg::WriteOnlyNoSize(trace));
}
else
{
kerCalcDetTrace.args(ocl::KernelArg::ReadOnlyNoSize(sum),
img_rows, img_cols, nOctaveLayers,
octave, c_layer_rows,
ocl::KernelArg::WriteOnlyNoSize(det),
ocl::KernelArg::WriteOnlyNoSize(trace));
}
return kerCalcDetTrace.run(2, globalThreads, localThreads, true);
}
bool SURF_OCL::findMaximaInLayer(int counterOffset, int octave,
int layer_rows, int layer_cols)
{
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
int nOctaveLayers = params->nOctaveLayers;
size_t localThreads[3] = {16, 16};
size_t globalThreads[3] =
{
divUp(layer_cols - 2 * min_margin, (int)localThreads[0] - 2) * localThreads[0],
divUp(layer_rows - 2 * min_margin, (int)localThreads[1] - 2) * nOctaveLayers * localThreads[1]
};
ocl::Kernel kerFindMaxima("SURF_findMaximaInLayer", ocl::nonfree::surf_oclsrc, kerOpts);
return kerFindMaxima.args(ocl::KernelArg::ReadOnlyNoSize(det),
ocl::KernelArg::ReadOnlyNoSize(trace),
ocl::KernelArg::PtrReadWrite(maxPosBuffer),
ocl::KernelArg::PtrReadWrite(counters),
counterOffset, img_rows, img_cols,
octave, nOctaveLayers,
layer_rows, layer_cols,
maxCandidates,
(float)params->hessianThreshold).run(2, globalThreads, localThreads, true);
}
bool SURF_OCL::interpolateKeypoint(int maxCounter, UMat &keypoints, int octave, int layer_rows, int max_features)
{
size_t localThreads[3] = {3, 3, 3};
size_t globalThreads[3] = {maxCounter*localThreads[0], localThreads[1], 3};
ocl::Kernel kerInterp("SURF_interpolateKeypoint", ocl::nonfree::surf_oclsrc, kerOpts);
return kerInterp.args(ocl::KernelArg::ReadOnlyNoSize(det),
ocl::KernelArg::PtrReadOnly(maxPosBuffer),
ocl::KernelArg::ReadWriteNoSize(keypoints),
ocl::KernelArg::PtrReadWrite(counters),
img_rows, img_cols, octave, layer_rows, max_features).
run(3, globalThreads, localThreads, true);
}
bool SURF_OCL::calcOrientation(UMat &keypoints)
{
int nFeatures = keypoints.cols;
if( nFeatures == 0 )
return true;
ocl::Kernel kerOri("SURF_calcOrientation", ocl::nonfree::surf_oclsrc, kerOpts);
if( haveImageSupport )
kerOri.args(sumTex, img_rows, img_cols,
ocl::KernelArg::ReadWriteNoSize(keypoints));
else
kerOri.args(ocl::KernelArg::ReadOnlyNoSize(sum),
img_rows, img_cols,
ocl::KernelArg::ReadWriteNoSize(keypoints));
size_t localThreads[3] = {ORI_LOCAL_SIZE, 1};
size_t globalThreads[3] = {nFeatures * localThreads[0], 1};
return kerOri.run(2, globalThreads, localThreads, true);
}
}

View File

@ -1,314 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include <string>
#include <iostream>
#include <iterator>
#include <fstream>
#include <numeric>
#include <algorithm>
#include <iterator>
using namespace cv;
using namespace std;
class CV_DetectorsTest : public cvtest::BaseTest
{
public:
CV_DetectorsTest();
~CV_DetectorsTest();
protected:
void run(int);
template <class T> bool testDetector(const Mat& img, const T& detector, vector<KeyPoint>& expected);
void LoadExpected(const string& file, vector<KeyPoint>& out);
};
CV_DetectorsTest::CV_DetectorsTest()
{
}
CV_DetectorsTest::~CV_DetectorsTest() {}
void getRotation(const Mat& img, Mat& aff, Mat& out)
{
Point center(img.cols/2, img.rows/2);
aff = getRotationMatrix2D(center, 30, 1);
warpAffine( img, out, aff, img.size());
}
void getZoom(const Mat& img, Mat& aff, Mat& out)
{
const double mult = 1.2;
aff.create(2, 3, CV_64F);
double *data = aff.ptr<double>();
data[0] = mult; data[1] = 0; data[2] = 0;
data[3] = 0; data[4] = mult; data[5] = 0;
warpAffine( img, out, aff, img.size());
}
void getBlur(const Mat& img, Mat& aff, Mat& out)
{
aff.create(2, 3, CV_64F);
double *data = aff.ptr<double>();
data[0] = 1; data[1] = 0; data[2] = 0;
data[3] = 0; data[4] = 1; data[5] = 0;
GaussianBlur(img, out, Size(5, 5), 2);
}
void getBrightness(const Mat& img, Mat& aff, Mat& out)
{
aff.create(2, 3, CV_64F);
double *data = aff.ptr<double>();
data[0] = 1; data[1] = 0; data[2] = 0;
data[3] = 0; data[4] = 1; data[5] = 0;
add(img, Mat(img.size(), img.type(), Scalar(15)), out);
}
void showOrig(const Mat& img, const vector<KeyPoint>& orig_pts)
{
Mat img_color;
cvtColor(img, img_color, COLOR_GRAY2BGR);
for(size_t i = 0; i < orig_pts.size(); ++i)
circle(img_color, orig_pts[i].pt, (int)orig_pts[i].size/2, Scalar(0, 255, 0));
namedWindow("O"); imshow("O", img_color);
}
void show(const string& name, const Mat& new_img, const vector<KeyPoint>& new_pts, const vector<KeyPoint>& transf_pts)
{
Mat new_img_color;
cvtColor(new_img, new_img_color, COLOR_GRAY2BGR);
for(size_t i = 0; i < transf_pts.size(); ++i)
circle(new_img_color, transf_pts[i].pt, (int)transf_pts[i].size/2, Scalar(255, 0, 0));
for(size_t i = 0; i < new_pts.size(); ++i)
circle(new_img_color, new_pts[i].pt, (int)new_pts[i].size/2, Scalar(0, 0, 255));
namedWindow(name + "_T"); imshow(name + "_T", new_img_color);
}
struct WrapPoint
{
const double* R;
WrapPoint(const Mat& rmat) : R(rmat.ptr<double>()) { };
KeyPoint operator()(const KeyPoint& kp) const
{
KeyPoint res = kp;
res.pt.x = static_cast<float>(kp.pt.x * R[0] + kp.pt.y * R[1] + R[2]);
res.pt.y = static_cast<float>(kp.pt.x * R[3] + kp.pt.y * R[4] + R[5]);
return res;
}
};
struct sortByR { bool operator()(const KeyPoint& kp1, const KeyPoint& kp2) { return norm(kp1.pt) < norm(kp2.pt); } };
template <class T> bool CV_DetectorsTest::testDetector(const Mat& img, const T& detector, vector<KeyPoint>& exp)
{
vector<KeyPoint> orig_kpts;
detector(img, orig_kpts);
typedef void (*TransfFunc )(const Mat&, Mat&, Mat& FransfFunc);
const TransfFunc transfFunc[] = { getRotation, getZoom, getBlur, getBrightness };
//const string names[] = { "Rotation", "Zoom", "Blur", "Brightness" };
const size_t case_num = sizeof(transfFunc)/sizeof(transfFunc[0]);
vector<Mat> affs(case_num);
vector<Mat> new_imgs(case_num);
vector< vector<KeyPoint> > new_kpts(case_num);
vector< vector<KeyPoint> > transf_kpts(case_num);
//showOrig(img, orig_kpts);
for(size_t i = 0; i < case_num; ++i)
{
transfFunc[i](img, affs[i], new_imgs[i]);
detector(new_imgs[i], new_kpts[i]);
transform(orig_kpts.begin(), orig_kpts.end(), back_inserter(transf_kpts[i]), WrapPoint(affs[i]));
//show(names[i], new_imgs[i], new_kpts[i], transf_kpts[i]);
}
const float thres = 3;
const float nthres = 3;
vector<KeyPoint> result;
for(size_t i = 0; i < orig_kpts.size(); ++i)
{
const KeyPoint& okp = orig_kpts[i];
int foundCounter = 0;
for(size_t j = 0; j < case_num; ++j)
{
const KeyPoint& tkp = transf_kpts[j][i];
size_t k = 0;
for(; k < new_kpts[j].size(); ++k)
if (norm(new_kpts[j][k].pt - tkp.pt) < nthres && fabs(new_kpts[j][k].size - tkp.size) < thres)
break;
if (k != new_kpts[j].size())
++foundCounter;
}
if (foundCounter == (int)case_num)
result.push_back(okp);
}
sort(result.begin(), result.end(), sortByR());
sort(exp.begin(), exp.end(), sortByR());
if (result.size() != exp.size())
{
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
int foundCounter1 = 0;
for(size_t i = 0; i < exp.size(); ++i)
{
const KeyPoint& e = exp[i];
size_t j = 0;
for(; j < result.size(); ++j)
{
const KeyPoint& r = result[i];
if (norm(r.pt-e.pt) < nthres && fabs(r.size - e.size) < thres)
break;
}
if (j != result.size())
++foundCounter1;
}
int foundCounter2 = 0;
for(size_t i = 0; i < result.size(); ++i)
{
const KeyPoint& r = result[i];
size_t j = 0;
for(; j < exp.size(); ++j)
{
const KeyPoint& e = exp[i];
if (norm(r.pt-e.pt) < nthres && fabs(r.size - e.size) < thres)
break;
}
if (j != exp.size())
++foundCounter2;
}
//showOrig(img, result); waitKey();
const float errorRate = 0.9f;
if (float(foundCounter1)/exp.size() < errorRate || float(foundCounter2)/result.size() < errorRate)
{
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH);
return false;
}
return true;
}
struct SurfNoMaskWrap
{
const SURF& detector;
SurfNoMaskWrap(const SURF& surf) : detector(surf) {}
SurfNoMaskWrap& operator=(const SurfNoMaskWrap&);
void operator()(const Mat& img, vector<KeyPoint>& kpts) const { detector(img, Mat(), kpts); }
};
void CV_DetectorsTest::LoadExpected(const string& file, vector<KeyPoint>& out)
{
Mat mat_exp;
FileStorage fs(file, FileStorage::READ);
if (fs.isOpened())
{
read( fs["ResultVectorData"], mat_exp, Mat() );
out.resize(mat_exp.cols / sizeof(KeyPoint));
copy(mat_exp.ptr<KeyPoint>(), mat_exp.ptr<KeyPoint>() + out.size(), out.begin());
}
else
{
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA);
out.clear();
}
}
void CV_DetectorsTest::run( int /*start_from*/ )
{
Mat img = imread(string(ts->get_data_path()) + "shared/graffiti.png", 0);
if (img.empty())
{
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
Mat to_test(img.size() * 2, img.type(), Scalar(0));
Mat roi = to_test(Rect(img.rows/2, img.cols/2, img.cols, img.rows));
img.copyTo(roi);
GaussianBlur(to_test, to_test, Size(3, 3), 1.5);
vector<KeyPoint> exp;
LoadExpected(string(ts->get_data_path()) + "detectors/surf.xml", exp);
if (exp.empty())
return;
if (!testDetector(to_test, SurfNoMaskWrap(SURF(1536+512+512, 2)), exp))
return;
LoadExpected(string(ts->get_data_path()) + "detectors/star.xml", exp);
if (exp.empty())
return;
if (!testDetector(to_test, StarDetector(45, 30, 10, 8, 5), exp))
return;
ts->set_failed_test_info( cvtest::TS::OK);
}
TEST(Features2d_Detectors, regression) { CV_DetectorsTest test; test.safe_run(); }

File diff suppressed because it is too large Load Diff

View File

@ -1,132 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
using namespace cv;
const string FEATURES2D_DIR = "features2d";
const string IMAGE_FILENAME = "tsukuba.png";
/****************************************************************************************\
* Test for KeyPoint *
\****************************************************************************************/
class CV_FeatureDetectorKeypointsTest : public cvtest::BaseTest
{
public:
CV_FeatureDetectorKeypointsTest(const Ptr<FeatureDetector>& _detector) :
detector(_detector) {}
protected:
virtual void run(int)
{
cv::initModule_features2d();
CV_Assert(detector);
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
// Read the test image.
Mat image = imread(imgFilename);
if(image.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imgFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
vector<KeyPoint> keypoints;
detector->detect(image, keypoints);
if(keypoints.empty())
{
ts->printf(cvtest::TS::LOG, "Detector can't find keypoints in image.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return;
}
Rect r(0, 0, image.cols, image.rows);
for(size_t i = 0; i < keypoints.size(); i++)
{
const KeyPoint& kp = keypoints[i];
if(!r.contains(kp.pt))
{
ts->printf(cvtest::TS::LOG, "KeyPoint::pt is out of image (x=%f, y=%f).\n", kp.pt.x, kp.pt.y);
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return;
}
if(kp.size <= 0.f)
{
ts->printf(cvtest::TS::LOG, "KeyPoint::size is not positive (%f).\n", kp.size);
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return;
}
if((kp.angle < 0.f && kp.angle != -1.f) || kp.angle >= 360.f)
{
ts->printf(cvtest::TS::LOG, "KeyPoint::angle is out of range [0, 360). It's %f.\n", kp.angle);
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return;
}
}
ts->set_failed_test_info(cvtest::TS::OK);
}
Ptr<FeatureDetector> detector;
};
// Registration of tests
TEST(Features2d_Detector_Keypoints_SURF, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"));
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_SIFT, validation)
{
CV_FeatureDetectorKeypointsTest test(FeatureDetector::create("SIFT"));
test.safe_run();
}

View File

@ -1,3 +0,0 @@
#include "test_precomp.hpp"
CV_TEST_MAIN("cv")

View File

@ -1,32 +0,0 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include <iostream>
#include "opencv2/ts.hpp"
#include "opencv2/ts/cuda_test.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/ts/cuda_test.hpp"
#include "opencv2/opencv_modules.hpp"
#include "cvconfig.h"
#ifdef HAVE_OPENCV_OCL
# include "opencv2/nonfree/ocl.hpp"
#endif
#ifdef HAVE_CUDA
# include "opencv2/nonfree/cuda.hpp"
#endif
#endif

View File

@ -1,710 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
using namespace cv;
const string IMAGE_TSUKUBA = "/features2d/tsukuba.png";
const string IMAGE_BIKES = "/detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
#define SHOW_DEBUG_LOG 0
static
Mat generateHomography(float angle)
{
// angle - rotation around Oz in degrees
float angleRadian = static_cast<float>(angle * CV_PI / 180);
Mat H = Mat::eye(3, 3, CV_32FC1);
H.at<float>(0,0) = H.at<float>(1,1) = std::cos(angleRadian);
H.at<float>(0,1) = -std::sin(angleRadian);
H.at<float>(1,0) = std::sin(angleRadian);
return H;
}
static
Mat rotateImage(const Mat& srcImage, float angle, Mat& dstImage, Mat& dstMask)
{
// angle - rotation around Oz in degrees
float diag = std::sqrt(static_cast<float>(srcImage.cols * srcImage.cols + srcImage.rows * srcImage.rows));
Mat LUShift = Mat::eye(3, 3, CV_32FC1); // left up
LUShift.at<float>(0,2) = static_cast<float>(-srcImage.cols/2);
LUShift.at<float>(1,2) = static_cast<float>(-srcImage.rows/2);
Mat RDShift = Mat::eye(3, 3, CV_32FC1); // right down
RDShift.at<float>(0,2) = diag/2;
RDShift.at<float>(1,2) = diag/2;
Size sz(cvRound(diag), cvRound(diag));
Mat srcMask(srcImage.size(), CV_8UC1, Scalar(255));
Mat H = RDShift * generateHomography(angle) * LUShift;
warpPerspective(srcImage, dstImage, H, sz);
warpPerspective(srcMask, dstMask, H, sz);
return H;
}
void rotateKeyPoints(const vector<KeyPoint>& src, const Mat& H, float angle, vector<KeyPoint>& dst)
{
// suppose that H is rotation given from rotateImage() and angle has value passed to rotateImage()
vector<Point2f> srcCenters, dstCenters;
KeyPoint::convert(src, srcCenters);
perspectiveTransform(srcCenters, dstCenters, H);
dst = src;
for(size_t i = 0; i < dst.size(); i++)
{
dst[i].pt = dstCenters[i];
float dstAngle = src[i].angle + angle;
if(dstAngle >= 360.f)
dstAngle -= 360.f;
dst[i].angle = dstAngle;
}
}
void scaleKeyPoints(const vector<KeyPoint>& src, vector<KeyPoint>& dst, float scale)
{
dst.resize(src.size());
for(size_t i = 0; i < src.size(); i++)
dst[i] = KeyPoint(src[i].pt.x * scale, src[i].pt.y * scale, src[i].size * scale, src[i].angle);
}
static
float calcCirclesIntersectArea(const Point2f& p0, float r0, const Point2f& p1, float r1)
{
float c = static_cast<float>(norm(p0 - p1)), sqr_c = c * c;
float sqr_r0 = r0 * r0;
float sqr_r1 = r1 * r1;
if(r0 + r1 <= c)
return 0;
float minR = std::min(r0, r1);
float maxR = std::max(r0, r1);
if(c + minR <= maxR)
return static_cast<float>(CV_PI * minR * minR);
float cos_halfA0 = (sqr_r0 + sqr_c - sqr_r1) / (2 * r0 * c);
float cos_halfA1 = (sqr_r1 + sqr_c - sqr_r0) / (2 * r1 * c);
float A0 = 2 * acos(cos_halfA0);
float A1 = 2 * acos(cos_halfA1);
return 0.5f * sqr_r0 * (A0 - sin(A0)) +
0.5f * sqr_r1 * (A1 - sin(A1));
}
static
float calcIntersectRatio(const Point2f& p0, float r0, const Point2f& p1, float r1)
{
float intersectArea = calcCirclesIntersectArea(p0, r0, p1, r1);
float unionArea = static_cast<float>(CV_PI) * (r0 * r0 + r1 * r1) - intersectArea;
return intersectArea / unionArea;
}
static
void matchKeyPoints(const vector<KeyPoint>& keypoints0, const Mat& H,
const vector<KeyPoint>& keypoints1,
vector<DMatch>& matches)
{
vector<Point2f> points0;
KeyPoint::convert(keypoints0, points0);
Mat points0t;
if(H.empty())
points0t = Mat(points0);
else
perspectiveTransform(Mat(points0), points0t, H);
matches.clear();
vector<uchar> usedMask(keypoints1.size(), 0);
for(int i0 = 0; i0 < static_cast<int>(keypoints0.size()); i0++)
{
int nearestPointIndex = -1;
float maxIntersectRatio = 0.f;
const float r0 = 0.5f * keypoints0[i0].size;
for(size_t i1 = 0; i1 < keypoints1.size(); i1++)
{
if(nearestPointIndex >= 0 && usedMask[i1])
continue;
float r1 = 0.5f * keypoints1[i1].size;
float intersectRatio = calcIntersectRatio(points0t.at<Point2f>(i0), r0,
keypoints1[i1].pt, r1);
if(intersectRatio > maxIntersectRatio)
{
maxIntersectRatio = intersectRatio;
nearestPointIndex = static_cast<int>(i1);
}
}
matches.push_back(DMatch(i0, nearestPointIndex, maxIntersectRatio));
if(nearestPointIndex >= 0)
usedMask[nearestPointIndex] = 1;
}
}
static void removeVerySmallKeypoints(vector<KeyPoint>& keypoints)
{
size_t i, j = 0, n = keypoints.size();
for( i = 0; i < n; i++ )
{
if( (keypoints[i].octave & 128) != 0 )
;
else
keypoints[j++] = keypoints[i];
}
keypoints.resize(j);
}
class DetectorRotationInvarianceTest : public cvtest::BaseTest
{
public:
DetectorRotationInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
float _minKeyPointMatchesRatio,
float _minAngleInliersRatio) :
featureDetector(_featureDetector),
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minAngleInliersRatio(_minAngleInliersRatio)
{
CV_Assert(featureDetector);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_TSUKUBA;
// Read test data
Mat image0 = imread(imageFilename), image1, mask1;
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
{
Mat H = rotateImage(image0, static_cast<float>(angle), image1, mask1);
vector<KeyPoint> keypoints1;
featureDetector->detect(image1, keypoints1, mask1);
removeVerySmallKeypoints(keypoints1);
vector<DMatch> matches;
matchKeyPoints(keypoints0, H, keypoints1, matches);
int angleInliersCount = 0;
const float minIntersectRatio = 0.5f;
int keyPointMatchesCount = 0;
for(size_t m = 0; m < matches.size(); m++)
{
if(matches[m].distance < minIntersectRatio)
continue;
keyPointMatchesCount++;
// Check does this inlier have consistent angles
const float maxAngleDiff = 15.f; // grad
float angle0 = keypoints0[matches[m].queryIdx].angle;
float angle1 = keypoints1[matches[m].trainIdx].angle;
if(angle0 == -1 || angle1 == -1)
CV_Error(Error::StsBadArg, "Given FeatureDetector is not rotation invariant, it can not be tested here.\n");
CV_Assert(angle0 >= 0.f && angle0 < 360.f);
CV_Assert(angle1 >= 0.f && angle1 < 360.f);
float rotAngle0 = angle0 + angle;
if(rotAngle0 >= 360.f)
rotAngle0 -= 360.f;
float angleDiff = std::max(rotAngle0, angle1) - std::min(rotAngle0, angle1);
angleDiff = std::min(angleDiff, static_cast<float>(360.f - angleDiff));
CV_Assert(angleDiff >= 0.f);
bool isAngleCorrect = angleDiff < maxAngleDiff;
if(isAngleCorrect)
angleInliersCount++;
}
float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints0.size();
if(keyPointMatchesRatio < minKeyPointMatchesRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect keyPointMatchesRatio: curr = %f, min = %f.\n",
keyPointMatchesRatio, minKeyPointMatchesRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
if(keyPointMatchesCount)
{
float angleInliersRatio = static_cast<float>(angleInliersCount) / keyPointMatchesCount;
if(angleInliersRatio < minAngleInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect angleInliersRatio: curr = %f, min = %f.\n",
angleInliersRatio, minAngleInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
#if SHOW_DEBUG_LOG
std::cout << "keyPointMatchesRatio - " << keyPointMatchesRatio
<< " - angleInliersRatio " << static_cast<float>(angleInliersCount) / keyPointMatchesCount << std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
float minKeyPointMatchesRatio;
float minAngleInliersRatio;
};
class DescriptorRotationInvarianceTest : public cvtest::BaseTest
{
public:
DescriptorRotationInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
const Ptr<DescriptorExtractor>& _descriptorExtractor,
int _normType,
float _minDescInliersRatio) :
featureDetector(_featureDetector),
descriptorExtractor(_descriptorExtractor),
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
CV_Assert(featureDetector);
CV_Assert(descriptorExtractor);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_TSUKUBA;
// Read test data
Mat image0 = imread(imageFilename), image1, mask1;
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
vector<KeyPoint> keypoints0;
Mat descriptors0;
featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(normType);
const float minIntersectRatio = 0.5f;
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
{
Mat H = rotateImage(image0, static_cast<float>(angle), image1, mask1);
vector<KeyPoint> keypoints1;
rotateKeyPoints(keypoints0, H, static_cast<float>(angle), keypoints1);
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints1[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints1[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
if(descInliersRatio < minDescInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect descInliersRatio: curr = %f, min = %f.\n",
descInliersRatio, minDescInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
#if SHOW_DEBUG_LOG
std::cout << "descInliersRatio " << static_cast<float>(descInliersCount) / keypoints0.size() << std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
Ptr<DescriptorExtractor> descriptorExtractor;
int normType;
float minDescInliersRatio;
};
class DetectorScaleInvarianceTest : public cvtest::BaseTest
{
public:
DetectorScaleInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
float _minKeyPointMatchesRatio,
float _minScaleInliersRatio) :
featureDetector(_featureDetector),
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minScaleInliersRatio(_minScaleInliersRatio)
{
CV_Assert(featureDetector);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_BIKES;
// Read test data
Mat image0 = imread(imageFilename);
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
float scale = 1.f + scaleIdx * 0.5f;
Mat image1;
resize(image0, image1, Size(), 1./scale, 1./scale);
vector<KeyPoint> keypoints1, osiKeypoints1; // osi - original size image
featureDetector->detect(image1, keypoints1);
removeVerySmallKeypoints(keypoints1);
if(keypoints1.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
if(keypoints1.size() > keypoints0.size())
{
ts->printf(cvtest::TS::LOG, "Strange behavior of the detector. "
"It gives more points count in an image of the smaller size.\n"
"original size (%d, %d), keypoints count = %d\n"
"reduced size (%d, %d), keypoints count = %d\n",
image0.cols, image0.rows, keypoints0.size(),
image1.cols, image1.rows, keypoints1.size());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return;
}
scaleKeyPoints(keypoints1, osiKeypoints1, scale);
vector<DMatch> matches;
// image1 is query image (it's reduced image0)
// image0 is train image
matchKeyPoints(osiKeypoints1, Mat(), keypoints0, matches);
const float minIntersectRatio = 0.5f;
int keyPointMatchesCount = 0;
int scaleInliersCount = 0;
for(size_t m = 0; m < matches.size(); m++)
{
if(matches[m].distance < minIntersectRatio)
continue;
keyPointMatchesCount++;
// Check does this inlier have consistent sizes
const float maxSizeDiff = 0.8f;//0.9f; // grad
float size0 = keypoints0[matches[m].trainIdx].size;
float size1 = osiKeypoints1[matches[m].queryIdx].size;
CV_Assert(size0 > 0 && size1 > 0);
if(std::min(size0, size1) > maxSizeDiff * std::max(size0, size1))
scaleInliersCount++;
}
float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints1.size();
if(keyPointMatchesRatio < minKeyPointMatchesRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect keyPointMatchesRatio: curr = %f, min = %f.\n",
keyPointMatchesRatio, minKeyPointMatchesRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
if(keyPointMatchesCount)
{
float scaleInliersRatio = static_cast<float>(scaleInliersCount) / keyPointMatchesCount;
if(scaleInliersRatio < minScaleInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect scaleInliersRatio: curr = %f, min = %f.\n",
scaleInliersRatio, minScaleInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
#if SHOW_DEBUG_LOG
std::cout << "keyPointMatchesRatio - " << keyPointMatchesRatio
<< " - scaleInliersRatio " << static_cast<float>(scaleInliersCount) / keyPointMatchesCount << std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
float minKeyPointMatchesRatio;
float minScaleInliersRatio;
};
class DescriptorScaleInvarianceTest : public cvtest::BaseTest
{
public:
DescriptorScaleInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
const Ptr<DescriptorExtractor>& _descriptorExtractor,
int _normType,
float _minDescInliersRatio) :
featureDetector(_featureDetector),
descriptorExtractor(_descriptorExtractor),
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
CV_Assert(featureDetector);
CV_Assert(descriptorExtractor);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_BIKES;
// Read test data
Mat image0 = imread(imageFilename);
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
Mat descriptors0;
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(normType);
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
float scale = 1.f + scaleIdx * 0.5f;
Mat image1;
resize(image0, image1, Size(), 1./scale, 1./scale);
vector<KeyPoint> keypoints1;
scaleKeyPoints(keypoints0, keypoints1, 1.0f/scale);
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
const float minIntersectRatio = 0.5f;
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints0[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints0[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
if(descInliersRatio < minDescInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect descInliersRatio: curr = %f, min = %f.\n",
descInliersRatio, minDescInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
#if SHOW_DEBUG_LOG
std::cout << "descInliersRatio " << static_cast<float>(descInliersCount) / keypoints0.size() << std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
Ptr<DescriptorExtractor> descriptorExtractor;
int normType;
float minKeyPointMatchesRatio;
float minDescInliersRatio;
};
// Tests registration
/*
* Detector's rotation invariance check
*/
TEST(Features2d_RotationInvariance_Detector_SURF, regression)
{
DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
0.44f,
0.76f);
test.safe_run();
}
TEST(Features2d_RotationInvariance_Detector_SIFT, DISABLED_regression)
{
DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
0.45f,
0.70f);
test.safe_run();
}
/*
* Descriptors's rotation invariance check
*/
TEST(Features2d_RotationInvariance_Descriptor_SURF, regression)
{
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
NORM_L1,
0.83f);
test.safe_run();
}
TEST(Features2d_RotationInvariance_Descriptor_SIFT, regression)
{
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
Algorithm::create<DescriptorExtractor>("Feature2D.SIFT"),
NORM_L1,
0.98f);
test.safe_run();
}
/*
* Detector's scale invariance check
*/
TEST(Features2d_ScaleInvariance_Detector_SURF, regression)
{
DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
0.64f,
0.84f);
test.safe_run();
}
TEST(Features2d_ScaleInvariance_Detector_SIFT, regression)
{
DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
0.69f,
0.99f);
test.safe_run();
}
/*
* Descriptor's scale invariance check
*/
TEST(Features2d_ScaleInvariance_Descriptor_SURF, regression)
{
DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
NORM_L1,
0.61f);
test.safe_run();
}
TEST(Features2d_ScaleInvariance_Descriptor_SIFT, regression)
{
DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
Algorithm::create<DescriptorExtractor>("Feature2D.SIFT"),
NORM_L1,
0.78f);
test.safe_run();
}
TEST(Features2d_RotationInvariance2_Detector_SURF, regression)
{
Mat cross(100, 100, CV_8UC1, Scalar(255));
line(cross, Point(30, 50), Point(69, 50), Scalar(100), 3);
line(cross, Point(50, 30), Point(50, 69), Scalar(100), 3);
SURF surf(8000., 3, 4, true, false);
vector<KeyPoint> keypoints;
surf(cross, noArray(), keypoints);
ASSERT_EQ(keypoints.size(), (vector<KeyPoint>::size_type) 5);
ASSERT_LT( fabs(keypoints[1].response - keypoints[2].response), 1e-6);
ASSERT_LT( fabs(keypoints[1].response - keypoints[3].response), 1e-6);
ASSERT_LT( fabs(keypoints[1].response - keypoints[4].response), 1e-6);
}

View File

@ -1,198 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
/////////////////////////////////////////////////////////////////////////////////////////////////
// SURF
#ifdef HAVE_OPENCV_CUDAARITHM
namespace
{
IMPLEMENT_PARAM_CLASS(SURF_HessianThreshold, double)
IMPLEMENT_PARAM_CLASS(SURF_Octaves, int)
IMPLEMENT_PARAM_CLASS(SURF_OctaveLayers, int)
IMPLEMENT_PARAM_CLASS(SURF_Extended, bool)
IMPLEMENT_PARAM_CLASS(SURF_Upright, bool)
}
PARAM_TEST_CASE(SURF, SURF_HessianThreshold, SURF_Octaves, SURF_OctaveLayers, SURF_Extended, SURF_Upright)
{
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
virtual void SetUp()
{
hessianThreshold = GET_PARAM(0);
nOctaves = GET_PARAM(1);
nOctaveLayers = GET_PARAM(2);
extended = GET_PARAM(3);
upright = GET_PARAM(4);
}
};
CUDA_TEST_P(SURF, Detector)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::cuda::SURF_CUDA surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), cv::cuda::GpuMat(), keypoints);
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints_gold;
surf_gold(image, cv::noArray(), keypoints_gold);
ASSERT_EQ(keypoints_gold.size(), keypoints.size());
int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
EXPECT_GT(matchedRatio, 0.95);
}
CUDA_TEST_P(SURF, Detector_Masked)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
cv::cuda::SURF_CUDA surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), loadMat(mask), keypoints);
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints_gold;
surf_gold(image, mask, keypoints_gold);
ASSERT_EQ(keypoints_gold.size(), keypoints.size());
int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
EXPECT_GT(matchedRatio, 0.95);
}
CUDA_TEST_P(SURF, Descriptor)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::cuda::SURF_CUDA surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints;
surf_gold(image, cv::noArray(), keypoints);
cv::cuda::GpuMat descriptors;
surf(loadMat(image), cv::cuda::GpuMat(), keypoints, descriptors, true);
cv::Mat descriptors_gold;
surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
cv::BFMatcher matcher(surf.defaultNorm());
std::vector<cv::DMatch> matches;
matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
int matchedCount = getMatchedPointsCount(keypoints, keypoints, matches);
double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
EXPECT_GT(matchedRatio, 0.6);
}
INSTANTIATE_TEST_CASE_P(CUDA_Features2D, SURF, testing::Combine(
testing::Values(SURF_HessianThreshold(100.0), SURF_HessianThreshold(500.0), SURF_HessianThreshold(1000.0)),
testing::Values(SURF_Octaves(3), SURF_Octaves(4)),
testing::Values(SURF_OctaveLayers(2), SURF_OctaveLayers(3)),
testing::Values(SURF_Extended(false), SURF_Extended(true)),
testing::Values(SURF_Upright(false), SURF_Upright(true))));
#endif // HAVE_OPENCV_CUDAARITHM
#endif // HAVE_CUDA

View File

@ -1,215 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_OPENCV_OCL
using namespace std;
using std::tr1::get;
static bool keyPointsEquals(const cv::KeyPoint& p1, const cv::KeyPoint& p2)
{
const double maxPtDif = 0.1;
const double maxSizeDif = 0.1;
const double maxAngleDif = 0.1;
const double maxResponseDif = 0.01;
double dist = cv::norm(p1.pt - p2.pt);
if (dist < maxPtDif &&
fabs(p1.size - p2.size) < maxSizeDif &&
abs(p1.angle - p2.angle) < maxAngleDif &&
abs(p1.response - p2.response) < maxResponseDif &&
p1.octave == p2.octave &&
p1.class_id == p2.class_id)
{
return true;
}
return false;
}
static int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
std::sort(actual.begin(), actual.end(), perf::comparators::KeypointGreater());
std::sort(gold.begin(), gold.end(), perf::comparators::KeypointGreater());
int validCount = 0;
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
static int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches)
{
int validCount = 0;
for (size_t i = 0; i < matches.size(); ++i)
{
const cv::DMatch& m = matches[i];
const cv::KeyPoint& p1 = keypoints1[m.queryIdx];
const cv::KeyPoint& p2 = keypoints2[m.trainIdx];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
IMPLEMENT_PARAM_CLASS(HessianThreshold, double)
IMPLEMENT_PARAM_CLASS(Octaves, int)
IMPLEMENT_PARAM_CLASS(OctaveLayers, int)
IMPLEMENT_PARAM_CLASS(Extended, bool)
IMPLEMENT_PARAM_CLASS(Upright, bool)
PARAM_TEST_CASE(SURF, HessianThreshold, Octaves, OctaveLayers, Extended, Upright)
{
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
virtual void SetUp()
{
hessianThreshold = get<0>(GetParam());
nOctaves = get<1>(GetParam());
nOctaveLayers = get<2>(GetParam());
extended = get<3>(GetParam());
upright = get<4>(GetParam());
}
};
TEST_P(SURF, DISABLED_Detector)
{
cv::Mat image = cv::imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/fruits.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::ocl::SURF_OCL surf;
surf.hessianThreshold = static_cast<float>(hessianThreshold);
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
std::vector<cv::KeyPoint> keypoints;
surf(cv::ocl::oclMat(image), cv::ocl::oclMat(), keypoints);
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints_gold;
surf_gold(image, cv::noArray(), keypoints_gold);
ASSERT_EQ(keypoints_gold.size(), keypoints.size());
int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
EXPECT_GT(matchedRatio, 0.99);
}
TEST_P(SURF, DISABLED_Descriptor)
{
cv::Mat image = cv::imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/fruits.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::ocl::SURF_OCL surf;
surf.hessianThreshold = static_cast<float>(hessianThreshold);
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints;
surf_gold(image, cv::noArray(), keypoints);
cv::ocl::oclMat descriptors;
surf(cv::ocl::oclMat(image), cv::ocl::oclMat(), keypoints, descriptors, true);
cv::Mat descriptors_gold;
surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
cv::BFMatcher matcher(surf.defaultNorm());
std::vector<cv::DMatch> matches;
matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
int matchedCount = getMatchedPointsCount(keypoints, keypoints, matches);
double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
EXPECT_GT(matchedRatio, 0.35);
}
INSTANTIATE_TEST_CASE_P(OCL_Features2D, SURF, testing::Combine(
testing::Values(HessianThreshold(500.0), HessianThreshold(1000.0)),
testing::Values(Octaves(3), Octaves(4)),
testing::Values(OctaveLayers(2), OctaveLayers(3)),
testing::Values(Extended(false), Extended(true)),
testing::Values(Upright(false), Upright(true))));
#endif // HAVE_OPENCV_OCL

View File

@ -20,6 +20,7 @@ ocv_list_filterout(candidate_deps "^opencv_matlab$")
ocv_list_filterout(candidate_deps "^opencv_tracking$")
ocv_list_filterout(candidate_deps "^opencv_optflow$")
ocv_list_filterout(candidate_deps "^opencv_bgsegm$")
ocv_list_filterout(candidate_deps "^opencv_xfeatures2d$")
ocv_add_module(${MODULE_NAME} BINDINGS OPTIONAL ${candidate_deps})

View File

@ -1,3 +1,3 @@
set(the_description "Images stitching")
ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_objdetect
OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_nonfree)
OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d)

View File

@ -88,7 +88,7 @@ SURF features finder. ::
/* hidden */
};
.. seealso:: :ocv:class:`detail::FeaturesFinder`, :ocv:class:`SURF`
.. seealso:: :ocv:class:`detail::FeaturesFinder`, ``SURF``
detail::OrbFeaturesFinder
-------------------------

View File

@ -5,7 +5,7 @@
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
opencv_objdetect opencv_photo opencv_nonfree opencv_features2d opencv_calib3d
opencv_objdetect opencv_photo opencv_features2d opencv_calib3d
opencv_stitching opencv_videostab opencv_shape)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})

File diff suppressed because it is too large Load Diff

View File

@ -1,305 +0,0 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void help(char** argv)
{
cout << "\nThis program demonstrats keypoint finding and matching between 2 images using features2d framework.\n"
<< " In one case, the 2nd image is synthesized by homography from the first, in the second case, there are 2 images\n"
<< "\n"
<< "Case1: second image is obtained from the first (given) image using random generated homography matrix\n"
<< argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image] [evaluate(0 or 1)]\n"
<< "Example of case1:\n"
<< "./descriptor_extractor_matcher SURF SURF FlannBased NoneFilter cola.jpg 0\n"
<< "\n"
<< "Case2: both images are given. If ransacReprojThreshold>=0 then homography matrix are calculated\n"
<< argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image1] [image2] [ransacReprojThreshold]\n"
<< "\n"
<< "Matches are filtered using homography matrix in case1 and case2 (if ransacReprojThreshold>=0)\n"
<< "Example of case2:\n"
<< "./descriptor_extractor_matcher SURF SURF BruteForce CrossCheckFilter cola1.jpg cola2.jpg 3\n"
<< "\n"
<< "Possible detectorType values: see in documentation on createFeatureDetector().\n"
<< "Possible descriptorType values: see in documentation on createDescriptorExtractor().\n"
<< "Possible matcherType values: see in documentation on createDescriptorMatcher().\n"
<< "Possible matcherFilterType values: NoneFilter, CrossCheckFilter." << endl;
}
#define DRAW_RICH_KEYPOINTS_MODE 0
#define DRAW_OUTLIERS_MODE 0
const string winName = "correspondences";
enum { NONE_FILTER = 0, CROSS_CHECK_FILTER = 1 };
static int getMatcherFilterType( const string& str )
{
if( str == "NoneFilter" )
return NONE_FILTER;
if( str == "CrossCheckFilter" )
return CROSS_CHECK_FILTER;
CV_Error(Error::StsBadArg, "Invalid filter name");
return -1;
}
static void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& matches12 )
{
vector<DMatch> matches;
descriptorMatcher->match( descriptors1, descriptors2, matches12 );
}
static void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& filteredMatches12, int knn=1 )
{
filteredMatches12.clear();
vector<vector<DMatch> > matches12, matches21;
descriptorMatcher->knnMatch( descriptors1, descriptors2, matches12, knn );
descriptorMatcher->knnMatch( descriptors2, descriptors1, matches21, knn );
for( size_t m = 0; m < matches12.size(); m++ )
{
bool findCrossCheck = false;
for( size_t fk = 0; fk < matches12[m].size(); fk++ )
{
DMatch forward = matches12[m][fk];
for( size_t bk = 0; bk < matches21[forward.trainIdx].size(); bk++ )
{
DMatch backward = matches21[forward.trainIdx][bk];
if( backward.trainIdx == forward.queryIdx )
{
filteredMatches12.push_back(forward);
findCrossCheck = true;
break;
}
}
if( findCrossCheck ) break;
}
}
}
static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
{
H.create(3, 3, CV_32FC1);
H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
H.at<float>(0,1) = rng.uniform(-0.1f, 0.1f);
H.at<float>(0,2) = rng.uniform(-0.1f, 0.1f)*src.cols;
H.at<float>(1,0) = rng.uniform(-0.1f, 0.1f);
H.at<float>(1,1) = rng.uniform( 0.8f, 1.2f);
H.at<float>(1,2) = rng.uniform(-0.1f, 0.1f)*src.rows;
H.at<float>(2,0) = rng.uniform( -1e-4f, 1e-4f);
H.at<float>(2,1) = rng.uniform( -1e-4f, 1e-4f);
H.at<float>(2,2) = rng.uniform( 0.8f, 1.2f);
warpPerspective( src, dst, H, src.size() );
}
static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
vector<KeyPoint>& keypoints1, const Mat& descriptors1,
Ptr<FeatureDetector>& detector, Ptr<DescriptorExtractor>& descriptorExtractor,
Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter, bool eval,
double ransacReprojThreshold, RNG& rng )
{
CV_Assert( !img1.empty() );
Mat H12;
if( isWarpPerspective )
warpPerspectiveRand(img1, img2, H12, rng );
else
CV_Assert( !img2.empty()/* && img2.cols==img1.cols && img2.rows==img1.rows*/ );
cout << endl << "< Extracting keypoints from second image..." << endl;
vector<KeyPoint> keypoints2;
detector->detect( img2, keypoints2 );
cout << keypoints2.size() << " points" << endl << ">" << endl;
if( !H12.empty() && eval )
{
cout << "< Evaluate feature detector..." << endl;
float repeatability;
int correspCount;
evaluateFeatureDetector( img1, img2, H12, &keypoints1, &keypoints2, repeatability, correspCount );
cout << "repeatability = " << repeatability << endl;
cout << "correspCount = " << correspCount << endl;
cout << ">" << endl;
}
cout << "< Computing descriptors for keypoints from second image..." << endl;
Mat descriptors2;
descriptorExtractor->compute( img2, keypoints2, descriptors2 );
cout << ">" << endl;
cout << "< Matching descriptors..." << endl;
vector<DMatch> filteredMatches;
switch( matcherFilter )
{
case CROSS_CHECK_FILTER :
crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );
break;
default :
simpleMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches );
}
cout << ">" << endl;
if( !H12.empty() && eval )
{
cout << "< Evaluate descriptor matcher..." << endl;
vector<Point2f> curve;
Ptr<GenericDescriptorMatcher> gdm = makePtr<VectorDescriptorMatcher>( descriptorExtractor, descriptorMatcher );
evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
Point2f firstPoint = *curve.begin();
Point2f lastPoint = *curve.rbegin();
int prevPointIndex = -1;
cout << "1-precision = " << firstPoint.x << "; recall = " << firstPoint.y << endl;
for( float l_p = 0; l_p <= 1 + FLT_EPSILON; l_p+=0.05f )
{
int nearest = getNearestPoint( curve, l_p );
if( nearest >= 0 )
{
Point2f curPoint = curve[nearest];
if( curPoint.x > firstPoint.x && curPoint.x < lastPoint.x && nearest != prevPointIndex )
{
cout << "1-precision = " << curPoint.x << "; recall = " << curPoint.y << endl;
prevPointIndex = nearest;
}
}
}
cout << "1-precision = " << lastPoint.x << "; recall = " << lastPoint.y << endl;
cout << ">" << endl;
}
vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() );
for( size_t i = 0; i < filteredMatches.size(); i++ )
{
queryIdxs[i] = filteredMatches[i].queryIdx;
trainIdxs[i] = filteredMatches[i].trainIdx;
}
if( !isWarpPerspective && ransacReprojThreshold >= 0 )
{
cout << "< Computing homography (RANSAC)..." << endl;
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
H12 = findHomography( Mat(points1), Mat(points2), RANSAC, ransacReprojThreshold );
cout << ">" << endl;
}
Mat drawImg;
if( !H12.empty() ) // filter outliers
{
vector<char> matchesMask( filteredMatches.size(), 0 );
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
double maxInlierDist = ransacReprojThreshold < 0 ? 3 : ransacReprojThreshold;
for( size_t i1 = 0; i1 < points1.size(); i1++ )
{
if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
matchesMask[i1] = 1;
}
// draw inliers
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask
#if DRAW_RICH_KEYPOINTS_MODE
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
#endif
);
#if DRAW_OUTLIERS_MODE
// draw outliers
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
matchesMask[i1] = !matchesMask[i1];
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(255, 0, 0), Scalar(0, 0, 255), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif
cout << "Number of inliers: " << countNonZero(matchesMask) << endl;
}
else
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg );
imshow( winName, drawImg );
}
int main(int argc, char** argv)
{
if( argc != 7 && argc != 8 )
{
help(argv);
return -1;
}
cv::initModule_nonfree();
bool isWarpPerspective = argc == 7;
double ransacReprojThreshold = -1;
if( !isWarpPerspective )
ransacReprojThreshold = atof(argv[7]);
cout << "< Creating detector, descriptor extractor and descriptor matcher ..." << endl;
Ptr<FeatureDetector> detector = FeatureDetector::create( argv[1] );
Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( argv[2] );
Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( argv[3] );
int mactherFilterType = getMatcherFilterType( argv[4] );
bool eval = !isWarpPerspective ? false : (atoi(argv[6]) == 0 ? false : true);
cout << ">" << endl;
if( !detector || !descriptorExtractor || !descriptorMatcher )
{
cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
return -1;
}
cout << "< Reading the images..." << endl;
Mat img1 = imread( argv[5] ), img2;
if( !isWarpPerspective )
img2 = imread( argv[6] );
cout << ">" << endl;
if( img1.empty() || (!isWarpPerspective && img2.empty()) )
{
cout << "Can not read images" << endl;
return -1;
}
cout << endl << "< Extracting keypoints from first image..." << endl;
vector<KeyPoint> keypoints1;
detector->detect( img1, keypoints1 );
cout << keypoints1.size() << " points" << endl << ">" << endl;
cout << "< Computing descriptors for keypoints from first image..." << endl;
Mat descriptors1;
descriptorExtractor->compute( img1, keypoints1, descriptors1 );
cout << ">" << endl;
namedWindow(winName, 1);
RNG rng = theRNG();
doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
ransacReprojThreshold, rng );
for(;;)
{
char c = (char)waitKey(0);
if( c == '\x1b' ) // esc
{
cout << "Exiting ..." << endl;
break;
}
else if( isWarpPerspective )
{
doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
ransacReprojThreshold, rng );
}
}
return 0;
}

View File

@ -1,75 +0,0 @@
/*
* shape_context.cpp -- Shape context demo for shape matching
*/
#include "opencv2/shape.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <opencv2/core/utility.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
static void help()
{
printf("\nThis program demonstrates how to use common interface for shape transformers\n"
"Call\n"
"shape_transformation [image1] [image2]\n");
}
int main(int argc, char** argv)
{
help();
Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
if(img1.empty() || img2.empty() || argc<2)
{
printf("Can't read one of the images\n");
return -1;
}
// detecting keypoints
SurfFeatureDetector detector(5000);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
BFMatcher matcher(extractor.defaultNorm());
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
// drawing the results
namedWindow("matches", 1);
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
imshow("matches", img_matches);
// extract points
vector<Point2f> pts1, pts2;
for (size_t ii=0; ii<keypoints1.size(); ii++)
pts1.push_back( keypoints1[ii].pt );
for (size_t ii=0; ii<keypoints2.size(); ii++)
pts2.push_back( keypoints2[ii].pt );
// Apply TPS
Ptr<ThinPlateSplineShapeTransformer> mytps = createThinPlateSplineShapeTransformer(25000); //TPS with a relaxed constraint
mytps->estimateTransformation(pts1, pts2, matches);
mytps->warpImage(img2, img2);
imshow("Tranformed", img2);
waitKey(0);
return 0;
}

View File

@ -1,102 +0,0 @@
/**
* @file SURF_FlannMatcher
* @brief SURF detector + descriptor + FLANN Matcher
* @author A. Huaman
*/
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
using namespace std;
using namespace cv;
void readme();
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
/**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }

View File

@ -1,126 +0,0 @@
/**
* @file SURF_Homography
* @brief SURF detector + descriptor + FLANN Matcher + FindHomography
* @author A. Huaman
*/
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/features2d.hpp"
using namespace std;
using namespace cv;
void readme();
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( (float)img_object.cols, 0);
line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}
/**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_Homography <img1> <img2>" << std::endl; }

View File

@ -1,73 +0,0 @@
/**
* @file SURF_descriptor
* @brief SURF detector + descritpor + BruteForce Matcher + drawing matches with OpenCV functions
* @author A. Huaman
*/
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
using namespace cv;
void readme();
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(extractor.defaultNorm());
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );
waitKey(0);
return 0;
}
/**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }

View File

@ -1,63 +0,0 @@
/**
* @file SURF_detector
* @brief SURF keypoint detection + keypoint drawing with OpenCV functions
* @author A. Huaman
*/
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
using namespace cv;
void readme();
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//-- Show detected (drawn) keypoints
imshow("Keypoints 1", img_keypoints_1 );
imshow("Keypoints 2", img_keypoints_2 );
waitKey(0);
return 0;
}
/**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }

View File

@ -1,234 +0,0 @@
/*
* video_homography.cpp
*
* Created on: Oct 18, 2010
* Author: erublee
*/
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <iostream>
#include <list>
#include <vector>
using namespace std;
using namespace cv;
static void help(char **av)
{
cout << "\nThis program demonstrated the use of features2d with the Fast corner detector and brief descriptors\n"
<< "to track planar objects by computing their homography from the key (training) image to the query (test) image\n\n" << endl;
cout << "usage: " << av[0] << " <video device number>\n" << endl;
cout << "The following keys do stuff:" << endl;
cout << " t : grabs a reference frame to match against" << endl;
cout << " l : makes the reference frame new every frame" << endl;
cout << " q or escape: quit" << endl;
}
namespace
{
void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query,
std::vector<cv::DMatch>& matches, Mat& img, const vector<unsigned char>& mask = vector<
unsigned char> ())
{
for (int i = 0; i < (int)matches.size(); i++)
{
if (mask.empty() || mask[i])
{
Point2f pt_new = query[matches[i].queryIdx].pt;
Point2f pt_old = train[matches[i].trainIdx].pt;
cv::line(img, pt_new, pt_old, Scalar(125, 255, 125), 1);
cv::circle(img, pt_new, 2, Scalar(255, 0, 125), 1);
}
}
}
//Takes a descriptor and turns it into an xy point
void keypoints2points(const vector<KeyPoint>& in, vector<Point2f>& out)
{
out.clear();
out.reserve(in.size());
for (size_t i = 0; i < in.size(); ++i)
{
out.push_back(in[i].pt);
}
}
//Takes an xy point and appends that to a keypoint structure
void points2keypoints(const vector<Point2f>& in, vector<KeyPoint>& out)
{
out.clear();
out.reserve(in.size());
for (size_t i = 0; i < in.size(); ++i)
{
out.push_back(KeyPoint(in[i], 1));
}
}
//Uses computed homography H to warp original input points to new planar position
void warpKeypoints(const Mat& H, const vector<KeyPoint>& in, vector<KeyPoint>& out)
{
vector<Point2f> pts;
keypoints2points(in, pts);
vector<Point2f> pts_w(pts.size());
Mat m_pts_w(pts_w);
perspectiveTransform(Mat(pts), m_pts_w, H);
points2keypoints(pts_w, out);
}
//Converts matching indices to xy points
void matches2points(const vector<KeyPoint>& train, const vector<KeyPoint>& query,
const std::vector<cv::DMatch>& matches, std::vector<cv::Point2f>& pts_train,
std::vector<Point2f>& pts_query)
{
pts_train.clear();
pts_query.clear();
pts_train.reserve(matches.size());
pts_query.reserve(matches.size());
size_t i = 0;
for (; i < matches.size(); i++)
{
const DMatch & dmatch = matches[i];
pts_query.push_back(query[dmatch.queryIdx].pt);
pts_train.push_back(train[dmatch.trainIdx].pt);
}
}
void resetH(Mat&H)
{
H = Mat::eye(3, 3, CV_32FC1);
}
}
int main(int ac, char ** av)
{
if (ac != 2)
{
help(av);
return 1;
}
BriefDescriptorExtractor brief(32);
VideoCapture capture;
capture.open(atoi(av[1]));
if (!capture.isOpened())
{
help(av);
cout << "capture device " << atoi(av[1]) << " failed to open!" << endl;
return 1;
}
cout << "following keys do stuff:" << endl;
cout << "t : grabs a reference frame to match against" << endl;
cout << "l : makes the reference frame new every frame" << endl;
cout << "q or escape: quit" << endl;
Mat frame;
vector<DMatch> matches;
BFMatcher desc_matcher(brief.defaultNorm());
vector<Point2f> train_pts, query_pts;
vector<KeyPoint> train_kpts, query_kpts;
vector<unsigned char> match_mask;
Mat gray;
bool ref_live = true;
Mat train_desc, query_desc;
const int DESIRED_FTRS = 500;
GridAdaptedFeatureDetector detector(makePtr<FastFeatureDetector>(10, true), DESIRED_FTRS, 4, 4);
Mat H_prev = Mat::eye(3, 3, CV_32FC1);
for (;;)
{
capture >> frame;
if (frame.empty())
break;
cvtColor(frame, gray, COLOR_RGB2GRAY);
detector.detect(gray, query_kpts); //Find interest points
brief.compute(gray, query_kpts, query_desc); //Compute brief descriptors at each keypoint location
if (!train_kpts.empty())
{
vector<KeyPoint> test_kpts;
warpKeypoints(H_prev.inv(), query_kpts, test_kpts);
Mat mask = windowedMatchingMask(test_kpts, train_kpts, 25, 25);
desc_matcher.match(query_desc, train_desc, matches, mask);
drawKeypoints(frame, test_kpts, frame, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_OVER_OUTIMG);
matches2points(train_kpts, query_kpts, matches, train_pts, query_pts);
if (matches.size() > 5)
{
Mat H = findHomography(train_pts, query_pts, RANSAC, 4, match_mask);
if (countNonZero(Mat(match_mask)) > 15)
{
H_prev = H;
}
else
resetH(H_prev);
drawMatchesRelative(train_kpts, query_kpts, matches, frame, match_mask);
}
else
resetH(H_prev);
}
else
{
H_prev = Mat::eye(3, 3, CV_32FC1);
Mat out;
drawKeypoints(gray, query_kpts, out);
frame = out;
}
imshow("frame", frame);
if (ref_live)
{
train_kpts = query_kpts;
query_desc.copyTo(train_desc);
}
char key = (char)waitKey(2);
switch (key)
{
case 'l':
ref_live = true;
resetH(H_prev);
break;
case 't':
ref_live = false;
train_kpts = query_kpts;
query_desc.copyTo(train_desc);
resetH(H_prev);
break;
case 27:
case 'q':
return 0;
break;
}
}
return 0;
}

View File

@ -1,7 +1,6 @@
SET(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui
opencv_ml opencv_video opencv_objdetect opencv_features2d
opencv_calib3d opencv_legacy opencv_contrib opencv_cuda
opencv_nonfree opencv_softcascade opencv_superres
opencv_calib3d opencv_cuda opencv_superres
opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc
opencv_cudafeatures2d opencv_cudaoptflow opencv_cudabgsegm
opencv_cudastereo opencv_cudalegacy)

View File

@ -1,4 +1,4 @@
SET(OPENCV_TAPI_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_video opencv_imgcodecs opencv_videoio opencv_highgui opencv_objdetect opencv_features2d opencv_calib3d opencv_nonfree opencv_flann)
SET(OPENCV_TAPI_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_video opencv_imgcodecs opencv_videoio opencv_highgui opencv_objdetect opencv_features2d opencv_calib3d opencv_flann)
ocv_check_dependencies(${OPENCV_TAPI_SAMPLES_REQUIRED_DEPS})

View File

@ -1,225 +0,0 @@
#include <iostream>
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/nonfree.hpp"
using namespace cv;
const int LOOP_NUM = 10;
const int GOOD_PTS_MAX = 50;
const float GOOD_PORTION = 0.15f;
int64 work_begin = 0;
int64 work_end = 0;
static void workBegin()
{
work_begin = getTickCount();
}
static void workEnd()
{
work_end = getTickCount() - work_begin;
}
static double getTime()
{
return work_end /((double)getTickFrequency() )* 1000.;
}
template<class KPDetector>
struct SURFDetector
{
KPDetector surf;
SURFDetector(double hessian = 800.0)
:surf(hessian)
{
}
template<class T>
void operator()(const T& in, const T& mask, std::vector<cv::KeyPoint>& pts, T& descriptors, bool useProvided = false)
{
surf(in, mask, pts, descriptors, useProvided);
}
};
template<class KPMatcher>
struct SURFMatcher
{
KPMatcher matcher;
template<class T>
void match(const T& in1, const T& in2, std::vector<cv::DMatch>& matches)
{
matcher.match(in1, in2, matches);
}
};
static Mat drawGoodMatches(
const Mat& img1,
const Mat& img2,
const std::vector<KeyPoint>& keypoints1,
const std::vector<KeyPoint>& keypoints2,
std::vector<DMatch>& matches,
std::vector<Point2f>& scene_corners_
)
{
//-- Sort matches and preserve top 10% matches
std::sort(matches.begin(), matches.end());
std::vector< DMatch > good_matches;
double minDist = matches.front().distance;
double maxDist = matches.back().distance;
const int ptsPairs = std::min(GOOD_PTS_MAX, (int)(matches.size() * GOOD_PORTION));
for( int i = 0; i < ptsPairs; i++ )
{
good_matches.push_back( matches[i] );
}
std::cout << "\nMax distance: " << maxDist << std::endl;
std::cout << "Min distance: " << minDist << std::endl;
std::cout << "Calculating homography using " << ptsPairs << " point pairs." << std::endl;
// drawing the results
Mat img_matches;
drawMatches( img1, keypoints1, img2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
}
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = Point(0,0);
obj_corners[1] = Point( img1.cols, 0 );
obj_corners[2] = Point( img1.cols, img1.rows );
obj_corners[3] = Point( 0, img1.rows );
std::vector<Point2f> scene_corners(4);
Mat H = findHomography( obj, scene, RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
scene_corners_ = scene_corners;
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches,
scene_corners[0] + Point2f( (float)img1.cols, 0), scene_corners[1] + Point2f( (float)img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
line( img_matches,
scene_corners[1] + Point2f( (float)img1.cols, 0), scene_corners[2] + Point2f( (float)img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
line( img_matches,
scene_corners[2] + Point2f( (float)img1.cols, 0), scene_corners[3] + Point2f( (float)img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
line( img_matches,
scene_corners[3] + Point2f( (float)img1.cols, 0), scene_corners[0] + Point2f( (float)img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
return img_matches;
}
////////////////////////////////////////////////////
// This program demonstrates the usage of SURF_OCL.
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
{
const char* keys =
"{ h help | false | print help message }"
"{ l left | box.png | specify left image }"
"{ r right | box_in_scene.png | specify right image }"
"{ o output | SURF_output.jpg | specify output save path }"
"{ m cpu_mode | false | run without OpenCL }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help"))
{
std::cout << "Usage: surf_matcher [options]" << std::endl;
std::cout << "Available options:" << std::endl;
cmd.printMessage();
return EXIT_SUCCESS;
}
if (cmd.has("cpu_mode"))
{
ocl::setUseOpenCL(false);
std::cout << "OpenCL was disabled" << std::endl;
}
UMat img1, img2;
std::string outpath = cmd.get<std::string>("o");
std::string leftName = cmd.get<std::string>("l");
imread(leftName, IMREAD_GRAYSCALE).copyTo(img1);
if(img1.empty())
{
std::cout << "Couldn't load " << leftName << std::endl;
cmd.printMessage();
return EXIT_FAILURE;
}
std::string rightName = cmd.get<std::string>("r");
imread(rightName, IMREAD_GRAYSCALE).copyTo(img2);
if(img2.empty())
{
std::cout << "Couldn't load " << rightName << std::endl;
cmd.printMessage();
return EXIT_FAILURE;
}
double surf_time = 0.;
//declare input/output
std::vector<KeyPoint> keypoints1, keypoints2;
std::vector<DMatch> matches;
UMat _descriptors1, _descriptors2;
Mat descriptors1 = _descriptors1.getMat(ACCESS_RW),
descriptors2 = _descriptors2.getMat(ACCESS_RW);
//instantiate detectors/matchers
SURFDetector<SURF> surf;
SURFMatcher<BFMatcher> matcher;
//-- start of timing section
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
surf(img1.getMat(ACCESS_READ), Mat(), keypoints1, descriptors1);
surf(img2.getMat(ACCESS_READ), Mat(), keypoints2, descriptors2);
matcher.match(descriptors1, descriptors2, matches);
}
workEnd();
std::cout << "FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
std::vector<Point2f> corner;
Mat img_matches = drawGoodMatches(img1.getMat(ACCESS_READ), img2.getMat(ACCESS_READ), keypoints1, keypoints2, matches, corner);
//-- Show detected matches
namedWindow("surf matches", 0);
imshow("surf matches", img_matches);
imwrite(outpath, img_matches);
waitKey(0);
return EXIT_SUCCESS;
}