opencv/doc/features2d_feature_detection.tex

1654 lines
61 KiB
TeX

\section{Feature detection and description}
\ifCPy
\ifPy
\cvclass{CvSURFPoint}
A SURF keypoint, represented as a tuple \texttt{((x, y), laplacian, size, dir, hessian)}.
\begin{description}
\cvarg{x}{x-coordinate of the feature within the image}
\cvarg{y}{y-coordinate of the feature within the image}
\cvarg{laplacian}{-1, 0 or +1. sign of the laplacian at the point. Can be used to speedup feature comparison since features with laplacians of different signs can not match}
\cvarg{size}{size of the feature}
\cvarg{dir}{orientation of the feature: 0..360 degrees}
\cvarg{hessian}{value of the hessian (can be used to approximately estimate the feature strengths; see also params.hessianThreshold)}
\end{description}
\fi
\cvCPyFunc{ExtractSURF}
Extracts Speeded Up Robust Features from an image.
\cvdefC{
void cvExtractSURF( \par const CvArr* image,\par const CvArr* mask,\par CvSeq** keypoints,\par CvSeq** descriptors,\par CvMemStorage* storage,\par CvSURFParams params );
}
\cvdefPy{ExtractSURF(image,mask,storage,params)-> (keypoints,descriptors)}
\begin{description}
\cvarg{image}{The input 8-bit grayscale image}
\cvarg{mask}{The optional input 8-bit mask. The features are only found in the areas that contain more than 50\% of non-zero mask pixels}
\ifC
\cvarg{keypoints}{The output parameter; double pointer to the sequence of keypoints. The sequence of CvSURFPoint structures is as follows:}
\begin{lstlisting}
typedef struct CvSURFPoint
{
CvPoint2D32f pt; // position of the feature within the image
int laplacian; // -1, 0 or +1. sign of the laplacian at the point.
// can be used to speedup feature comparison
// (normally features with laplacians of different
// signs can not match)
int size; // size of the feature
float dir; // orientation of the feature: 0..360 degrees
float hessian; // value of the hessian (can be used to
// approximately estimate the feature strengths;
// see also params.hessianThreshold)
}
CvSURFPoint;
\end{lstlisting}
\cvarg{descriptors}{The optional output parameter; double pointer to the sequence of descriptors. Depending on the params.extended value, each element of the sequence will be either a 64-element or a 128-element floating-point (\texttt{CV\_32F}) vector. If the parameter is NULL, the descriptors are not computed}
\else
\cvarg{keypoints}{sequence of keypoints.}
\cvarg{descriptors}{sequence of descriptors. Each SURF descriptor is a list of floats, of length 64 or 128.}
\fi
\cvarg{storage}{Memory storage where keypoints and descriptors will be stored}
\ifC
\cvarg{params}{Various algorithm parameters put to the structure CvSURFParams:}
\begin{lstlisting}
typedef struct CvSURFParams
{
int extended; // 0 means basic descriptors (64 elements each),
// 1 means extended descriptors (128 elements each)
double hessianThreshold; // only features with keypoint.hessian
// larger than that are extracted.
// good default value is ~300-500 (can depend on the
// average local contrast and sharpness of the image).
// user can further filter out some features based on
// their hessian values and other characteristics.
int nOctaves; // the number of octaves to be used for extraction.
// With each next octave the feature size is doubled
// (3 by default)
int nOctaveLayers; // The number of layers within each octave
// (4 by default)
}
CvSURFParams;
CvSURFParams cvSURFParams(double hessianThreshold, int extended=0);
// returns default parameters
\end{lstlisting}
\else
\cvarg{params}{Various algorithm parameters in a tuple \texttt{(extended, hessianThreshold, nOctaves, nOctaveLayers)}:
\begin{description}
\cvarg{extended}{0 means basic descriptors (64 elements each), 1 means extended descriptors (128 elements each)}
\cvarg{hessianThreshold}{only features with hessian larger than that are extracted. good default value is ~300-500 (can depend on the average local contrast and sharpness of the image). user can further filter out some features based on their hessian values and other characteristics.}
\cvarg{nOctaves}{the number of octaves to be used for extraction. With each next octave the feature size is doubled (3 by default)}
\cvarg{nOctaveLayers}{The number of layers within each octave (4 by default)}
\end{description}}
\fi
\end{description}
The function cvExtractSURF finds robust features in the image, as
described in \cite{Bay06}. For each feature it returns its location, size,
orientation and optionally the descriptor, basic or extended. The function
can be used for object tracking and localization, image stitching etc.
\ifC
See the
\texttt{find\_obj.cpp} demo in OpenCV samples directory.
\else
To extract strong SURF features from an image
\begin{lstlisting}
>>> import cv
>>> im = cv.LoadImageM("building.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE)
>>> (keypoints, descriptors) = cv.ExtractSURF(im, None, cv.CreateMemStorage(), (0, 30000, 3, 1))
>>> print len(keypoints), len(descriptors)
6 6
>>> for ((x, y), laplacian, size, dir, hessian) in keypoints:
... print "x=\%d y=\%d laplacian=\%d size=\%d dir=\%f hessian=\%f" \% (x, y, laplacian, size, dir, hessian)
x=30 y=27 laplacian=-1 size=31 dir=69.778503 hessian=36979.789062
x=296 y=197 laplacian=1 size=33 dir=111.081039 hessian=31514.349609
x=296 y=266 laplacian=1 size=32 dir=107.092300 hessian=31477.908203
x=254 y=284 laplacian=1 size=31 dir=279.137360 hessian=34169.800781
x=498 y=525 laplacian=-1 size=33 dir=278.006592 hessian=31002.759766
x=777 y=281 laplacian=1 size=70 dir=167.940964 hessian=35538.363281
\end{lstlisting}
\fi
\cvCPyFunc{GetStarKeypoints}
Retrieves keypoints using the StarDetector algorithm.
\cvdefC{
CvSeq* cvGetStarKeypoints( \par const CvArr* image,\par CvMemStorage* storage,\par CvStarDetectorParams params=cvStarDetectorParams() );
}
\cvdefPy{GetStarKeypoints(image,storage,params)-> keypoints}
\begin{description}
\cvarg{image}{The input 8-bit grayscale image}
\cvarg{storage}{Memory storage where the keypoints will be stored}
\ifC
\cvarg{params}{Various algorithm parameters given to the structure CvStarDetectorParams:}
\begin{lstlisting}
typedef struct CvStarDetectorParams
{
int maxSize; // maximal size of the features detected. The following
// values of the parameter are supported:
// 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128
int responseThreshold; // threshold for the approximatd laplacian,
// used to eliminate weak features
int lineThresholdProjected; // another threshold for laplacian to
// eliminate edges
int lineThresholdBinarized; // another threshold for the feature
// scale to eliminate edges
int suppressNonmaxSize; // linear size of a pixel neighborhood
// for non-maxima suppression
}
CvStarDetectorParams;
\end{lstlisting}
\else
\cvarg{params}{Various algorithm parameters in a tuple \texttt{(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize)}:
\begin{description}
\cvarg{maxSize}{maximal size of the features detected. The following values of the parameter are supported: 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128}
\cvarg{responseThreshold}{threshold for the approximatd laplacian, used to eliminate weak features}
\cvarg{lineThresholdProjected}{another threshold for laplacian to eliminate edges}
\cvarg{lineThresholdBinarized}{another threshold for the feature scale to eliminate edges}
\cvarg{suppressNonmaxSize}{linear size of a pixel neighborhood for non-maxima suppression}
\end{description}
}
\fi
\end{description}
The function GetStarKeypoints extracts keypoints that are local
scale-space extremas. The scale-space is constructed by computing
approximate values of laplacians with different sigma's at each
pixel. Instead of using pyramids, a popular approach to save computing
time, all of the laplacians are computed at each pixel of the original
high-resolution image. But each approximate laplacian value is computed
in O(1) time regardless of the sigma, thanks to the use of integral
images. The algorithm is based on the paper
Agrawal08
, but instead
of a square, hexagon or octagon it uses an 8-end star shape, hence the name,
consisting of overlapping upright and tilted squares.
\ifC
Each computed feature is represented by the following structure:
\begin{lstlisting}
typedef struct CvStarKeypoint
{
CvPoint pt; // coordinates of the feature
int size; // feature size, see CvStarDetectorParams::maxSize
float response; // the approximated laplacian value at that point.
}
CvStarKeypoint;
inline CvStarKeypoint cvStarKeypoint(CvPoint pt, int size, float response);
\end{lstlisting}
\else
Each keypoint is represented by a tuple \texttt{((x, y), size, response)}:
\begin{description}
\cvarg{x, y}{Screen coordinates of the keypoint}
\cvarg{size}{feature size, up to \texttt{maxSize}}
\cvarg{response}{approximated laplacian value for the keypoint}
\end{description}
\fi
\ifC
Below is the small usage sample:
\begin{lstlisting}
#include "cv.h"
#include "highgui.h"
int main(int argc, char** argv)
{
const char* filename = argc > 1 ? argv[1] : "lena.jpg";
IplImage* img = cvLoadImage( filename, 0 ), *cimg;
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* keypoints = 0;
int i;
if( !img )
return 0;
cvNamedWindow( "image", 1 );
cvShowImage( "image", img );
cvNamedWindow( "features", 1 );
cimg = cvCreateImage( cvGetSize(img), 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
keypoints = cvGetStarKeypoints( img, storage, cvStarDetectorParams(45) );
for( i = 0; i < (keypoints ? keypoints->total : 0); i++ )
{
CvStarKeypoint kpt = *(CvStarKeypoint*)cvGetSeqElem(keypoints, i);
int r = kpt.size/2;
cvCircle( cimg, kpt.pt, r, CV_RGB(0,255,0));
cvLine( cimg, cvPoint(kpt.pt.x + r, kpt.pt.y + r),
cvPoint(kpt.pt.x - r, kpt.pt.y - r), CV_RGB(0,255,0));
cvLine( cimg, cvPoint(kpt.pt.x - r, kpt.pt.y + r),
cvPoint(kpt.pt.x + r, kpt.pt.y - r), CV_RGB(0,255,0));
}
cvShowImage( "features", cimg );
cvWaitKey();
}
\end{lstlisting}
\fi
\fi
\ifCpp
\cvclass{KeyPoint}
Data structure for salient point detectors
\begin{lstlisting}
class KeyPoint
{
public:
// the default constructor
KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0),
class_id(-1) {}
// the full constructor
KeyPoint(Point2f _pt, float _size, float _angle=-1,
float _response=0, int _octave=0, int _class_id=-1)
: pt(_pt), size(_size), angle(_angle), response(_response),
octave(_octave), class_id(_class_id) {}
// another form of the full constructor
KeyPoint(float x, float y, float _size, float _angle=-1,
float _response=0, int _octave=0, int _class_id=-1)
: pt(x, y), size(_size), angle(_angle), response(_response),
octave(_octave), class_id(_class_id) {}
// converts vector of keypoints to vector of points
static void convert(const std::vector<KeyPoint>& keypoints,
std::vector<Point2f>& points2f,
const std::vector<int>& keypointIndexes=std::vector<int>());
// converts vector of points to the vector of keypoints, where each
// keypoint is assigned the same size and the same orientation
static void convert(const std::vector<Point2f>& points2f,
std::vector<KeyPoint>& keypoints,
float size=1, float response=1, int octave=0,
int class_id=-1);
// computes overlap for pair of keypoints;
// overlap is a ratio between area of keypoint regions intersection and
// area of keypoint regions union (now keypoint region is circle)
static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);
Point2f pt; // coordinates of the keypoints
float size; // diameter of the meaningfull keypoint neighborhood
float angle; // computed orientation of the keypoint (-1 if not applicable)
float response; // the response by which the most strong keypoints
// have been selected. Can be used for the further sorting
// or subsampling
int octave; // octave (pyramid layer) from which the keypoint has been extracted
int class_id; // object class (if the keypoints need to be clustered by
// an object they belong to)
};
// writes vector of keypoints to the file storage
void write(FileStorage& fs, const string& name, const vector<KeyPoint>& keypoints);
// reads vector of keypoints from the specified file storage node
void read(const FileNode& node, CV_OUT vector<KeyPoint>& keypoints);
\end{lstlisting}
\cvclass{MSER}
Maximally-Stable Extremal Region Extractor
\begin{lstlisting}
class MSER : public CvMSERParams
{
public:
// default constructor
MSER();
// constructor that initializes all the algorithm parameters
MSER( int _delta, int _min_area, int _max_area,
float _max_variation, float _min_diversity,
int _max_evolution, double _area_threshold,
double _min_margin, int _edge_blur_size );
// runs the extractor on the specified image; returns the MSERs,
// each encoded as a contour (vector<Point>, see findContours)
// the optional mask marks the area where MSERs are searched for
void operator()( const Mat& image, vector<vector<Point> >& msers, const Mat& mask ) const;
};
\end{lstlisting}
The class encapsulates all the parameters of MSER (see \url{http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions}) extraction algorithm.
\cvclass{StarDetector}
Implements Star keypoint detector
\begin{lstlisting}
class StarDetector : CvStarDetectorParams
{
public:
// default constructor
StarDetector();
// the full constructor initialized all the algorithm parameters:
// maxSize - maximum size of the features. The following
// values of the parameter are supported:
// 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128
// responseThreshold - threshold for the approximated laplacian,
// used to eliminate weak features. The larger it is,
// the less features will be retrieved
// lineThresholdProjected - another threshold for the laplacian to
// eliminate edges
// lineThresholdBinarized - another threshold for the feature
// size to eliminate edges.
// The larger the 2 threshold, the more points you get.
StarDetector(int maxSize, int responseThreshold,
int lineThresholdProjected,
int lineThresholdBinarized,
int suppressNonmaxSize);
// finds keypoints in an image
void operator()(const Mat& image, vector<KeyPoint>& keypoints) const;
};
\end{lstlisting}
The class implements a modified version of CenSurE keypoint detector described in
\cite{Agrawal08}
\cvclass{SIFT}
Class for extracting keypoints and computing descriptors using approach named Scale Invariant Feature Transform (SIFT).
\begin{lstlisting}
class CV_EXPORTS SIFT
{
public:
struct CommonParams
{
static const int DEFAULT_NOCTAVES = 4;
static const int DEFAULT_NOCTAVE_LAYERS = 3;
static const int DEFAULT_FIRST_OCTAVE = -1;
enum{ FIRST_ANGLE = 0, AVERAGE_ANGLE = 1 };
CommonParams();
CommonParams( int _nOctaves, int _nOctaveLayers, int _firstOctave,
int _angleMode );
int nOctaves, nOctaveLayers, firstOctave;
int angleMode;
};
struct DetectorParams
{
static double GET_DEFAULT_THRESHOLD()
{ return 0.04 / SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS / 2.0; }
static double GET_DEFAULT_EDGE_THRESHOLD() { return 10.0; }
DetectorParams();
DetectorParams( double _threshold, double _edgeThreshold );
double threshold, edgeThreshold;
};
struct DescriptorParams
{
static double GET_DEFAULT_MAGNIFICATION() { return 3.0; }
static const bool DEFAULT_IS_NORMALIZE = true;
static const int DESCRIPTOR_SIZE = 128;
DescriptorParams();
DescriptorParams( double _magnification, bool _isNormalize,
bool _recalculateAngles );
double magnification;
bool isNormalize;
bool recalculateAngles;
};
SIFT();
//! sift-detector constructor
SIFT( double _threshold, double _edgeThreshold,
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
int _angleMode=CommonParams::FIRST_ANGLE );
//! sift-descriptor constructor
SIFT( double _magnification, bool _isNormalize=true,
bool _recalculateAngles = true,
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
int _angleMode=CommonParams::FIRST_ANGLE );
SIFT( const CommonParams& _commParams,
const DetectorParams& _detectorParams = DetectorParams(),
const DescriptorParams& _descriptorParams = DescriptorParams() );
//! returns the descriptor size in floats (128)
int descriptorSize() const { return DescriptorParams::DESCRIPTOR_SIZE; }
//! finds the keypoints using SIFT algorithm
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints) const;
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints,
Mat& descriptors,
bool useProvidedKeypoints=false) const;
CommonParams getCommonParams () const { return commParams; }
DetectorParams getDetectorParams () const { return detectorParams; }
DescriptorParams getDescriptorParams () const { return descriptorParams; }
protected:
...
};
\end{lstlisting}
\cvclass{SURF}
Class for extracting Speeded Up Robust Features from an image.
\begin{lstlisting}
class SURF : public CvSURFParams
{
public:
// default constructor
SURF();
// constructor that initializes all the algorithm parameters
SURF(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false);
// returns the number of elements in each descriptor (64 or 128)
int descriptorSize() const;
// detects keypoints using fast multi-scale Hessian detector
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints) const;
// detects keypoints and computes the SURF descriptors for them
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints,
vector<float>& descriptors,
bool useProvidedKeypoints=false) const;
};
\end{lstlisting}
The class \texttt{SURF} implements Speeded Up Robust Features descriptor \cite{Bay06}.
There is fast multi-scale Hessian keypoint detector that can be used to find the keypoints
(which is the default option), but the descriptors can be also computed for the user-specified keypoints.
The function can be used for object tracking and localization, image stitching etc. See the
\texttt{find\_obj.cpp} demo in OpenCV samples directory.
\section{Common Interfaces for Feature Detection and Descriptor Extraction}
Both detectors and descriptors in OpenCV have wrappers with common interface that enables to switch easily
between different algorithms solving the same problem. All objects that implement keypoint detectors inherit
FeatureDetector interface. Descriptors that are represented as vectors in a multidimensional space can be
computed with DescriptorExtractor interface. DescriptorMatcher interface can be used to find matches between
two sets of descriptors. GenericDescriptorMatcher is a more generic interface for descriptors. It does not make any
assumptions about descriptor representation. Every descriptor with DescriptorExtractor interface has a wrapper with
GenericDescriptorMatcher interface (see VectorDescriptorMatch). There are descriptors such as one way descriptor and
ferns that have GenericDescriptorMatcher interface implemented, but do not support DescriptorExtractor.
\cvclass{FeatureDetector}
Abstract base class for 2D image feature detectors.
\begin{lstlisting}
class CV_EXPORTS FeatureDetector
{
public:
virtual ~FeatureDetector() {}
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const = 0;
void detect( const vector<Mat>& imageCollection,
vector<vector<KeyPoint> >& pointCollection,
const vector<Mat>& masks=vector<Mat>() ) const;
virtual void read(const FileNode&) {}
virtual void write(FileStorage&) const {}
protected:
...
};
\end{lstlisting}
\cvCppFunc{FeatureDetector::detect}
Detect keypoints in an image (first variant) or image set (second variant).
\cvdefCpp{
void FeatureDetector::detect( const Mat\& image,
\par vector<KeyPoint>\& keypoints,
\par const Mat\& mask=Mat() ) const;\\
void FeatureDetector::detect( const vector<Mat>\& imageCollection,
\par vector<vector<KeyPoint> >\& pointCollection,
\par const vector<Mat>\& masks=vector<Mat>() ) const;
}
\begin{description}
\cvarg{image}{The image.}
\cvarg{keypoints}{The detected keypoints.}
\cvarg{mask}{Mask specifying where to look for keypoints (optional). Must be a char matrix
with non-zero values in the region of interest.}
\end{description}
\begin{description}
\cvarg{imageCollection}{Image collection.}
\cvarg{pointCollection}{Collection of keypoints detected in an input images.}
\cvarg{masks}{Masks for each input image specifying where to look for keypoints (optional).
Each element of \texttt{masks} vector must be a char matrix with non-zero values in the region of interest.}
\end{description}
\cvCppFunc{FeatureDetector::read}
Read feature detector from file node.
\cvdefCpp{
void FeatureDetector::read( const FileNode\& fn );
}
\begin{description}
\cvarg{fn}{File node from which detector will be read.}
\end{description}
\cvCppFunc{FeatureDetector::write}
Write feature detector to file storage.
\cvdefCpp{
void FeatureDetector::write( FileStorage\& fs ) const;
}
\begin{description}
\cvarg{fs}{File storage in which detector will be written.}
\end{description}
\cvclass{FastFeatureDetector}
Wrapping class for feature detection using \cvCppCross{FAST} method.
\begin{lstlisting}
class FastFeatureDetector : public FeatureDetector
{
public:
FastFeatureDetector( int _threshold=1, bool _nonmaxSuppression=true );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{GoodFeaturesToTrackDetector}
Wrapping class for feature detection using \cvCppCross{goodFeaturesToTrack} method.
\begin{lstlisting}
class GoodFeaturesToTrackDetector : public FeatureDetector
{
public:
GoodFeaturesToTrackDetector( int _maxCorners, double _qualityLevel,
double _minDistance, int _blockSize=3,
bool _useHarrisDetector=false, double _k=0.04 );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{MserFeatureDetector}
Wrapping class for feature detection using \cvCppCross{MSER} class.
\begin{lstlisting}
class MserFeatureDetector : public FeatureDetector
{
public:
MserFeatureDetector( CvMSERParams params=cvMSERParams () );
MserFeatureDetector( int delta, int minArea, int maxArea,
double maxVariation, double minDiversity,
int maxEvolution, double areaThreshold,
double minMargin, int edgeBlurSize );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{StarFeatureDetector}
Wrapping class for feature detection using \cvCppCross{StarDetector} class.
\begin{lstlisting}
class StarFeatureDetector : public FeatureDetector
{
public:
StarFeatureDetector( int maxSize=16, int responseThreshold=30,
int lineThresholdProjected = 10,
int lineThresholdBinarized=8, int suppressNonmaxSize=5 );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{SiftFeatureDetector}
Wrapping class for feature detection using \cvCppCross{SIFT} class.
\begin{lstlisting}
class SiftFeatureDetector : public FeatureDetector
{
public:
SiftFeatureDetector( double threshold=SIFT::DetectorParams::GET_DEFAULT_THRESHOLD(),
double edgeThreshold=SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD(),
int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{SurfFeatureDetector}
Wrapping class for feature detection using \cvCppCross{SURF} class.
\begin{lstlisting}
class SurfFeatureDetector : public FeatureDetector
{
public:
SurfFeatureDetector( double hessianThreshold = 400., int octaves = 3,
int octaveLayers = 4 );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{GridAdaptedFeatureDetector}
Adapts a detector to partition the source image into a grid and detect
points in each cell.
\begin{lstlisting}
class GridAdaptedFeatureDetector : public FeatureDetector
{
public:
/*
* detector Detector that will be adapted.
* maxTotalKeypoints Maximum count of keypoints detected on the image.
* Only the strongest keypoints will be keeped.
* gridRows Grid rows count.
* gridCols Grid column count.
*/
GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
int maxTotalKeypoints, int gridRows=4,
int gridCols=4 );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
// todo read/write
virtual void read( const FileNode& fn ) {}
virtual void write( FileStorage& fs ) const {}
protected:
...
};
\end{lstlisting}
\cvclass{PyramidAdaptedFeatureDetector}
Adapts a detector to detect points over multiple levels of a Gaussian
pyramid. Useful for detectors that are not inherently scaled.
\begin{lstlisting}
class PyramidAdaptedFeatureDetector : public FeatureDetector
{
public:
PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
int levels=2 );
virtual void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
// todo read/write
virtual void read( const FileNode& fn ) {}
virtual void write( FileStorage& fs ) const {}
protected:
...
};
\end{lstlisting}
\cvCppFunc{createFeatureDetector}
Feature detector factory that creates \cvCppCross{FeatureDetector} of given type with
default parameters (rather using default constructor).
\begin{lstlisting}
Ptr<FeatureDetector> createFeatureDetector( const string& detectorType );
\end{lstlisting}
\begin{description}
\cvarg{detectorType}{Feature detector type, e.g. ''SURF'', ''FAST'', ...}
\end{description}
\cvclass{DescriptorExtractor}
Abstract base class for computing descriptors for image keypoints.
\begin{lstlisting}
class CV_EXPORTS DescriptorExtractor
{
public:
virtual ~DescriptorExtractor() {}
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints,
Mat& descriptors ) const = 0;
void compute( const vector<Mat>& imageCollection,
vector<vector<KeyPoint> >& pointCollection,
vector<Mat>& descCollection ) const;
virtual void read( const FileNode& ) {}
virtual void write( FileStorage& ) const {}
virtual int descriptorSize() const = 0;
virtual int descriptorType() const = 0;
protected:
...
};
\end{lstlisting}
In this interface we assume a keypoint descriptor can be represented as a
dense, fixed-dimensional vector of some basic type. Most descriptors used
in practice follow this pattern, as it makes it very easy to compute
distances between descriptors. Therefore we represent a collection of
descriptors as a \cvCppCross{Mat}, where each row is one keypoint descriptor.
\cvCppFunc{DescriptorExtractor::compute}
Compute the descriptors for a set of keypoints detected in an image or image collection.
\cvdefCpp{
void DescriptorExtractor::compute( const Mat\& image,
\par vector<KeyPoint>\& keypoints,
\par Mat\& descriptors ) const;\\
void DescriptorExtractor::compute( const vector<Mat>\& imageCollection,
\par vector<vector<KeyPoint> >\& pointCollection,
\par vector<Mat>\& descCollection ) const;
}
\begin{description}
\cvarg{image}{The image.}
\cvarg{keypoints}{The keypoints. Keypoints for which a descriptor cannot be computed are removed.}
\cvarg{descriptors}{The descriptors. Row i is the descriptor for keypoint i.}
\end{description}
\begin{description}
\cvarg{imageCollection}{Image collection.}
\cvarg{pointCollection}{Keypoints collection. pointCollection[i] is keypoints
detected in imageCollection[i]. Keypoints for which a descriptor
cannot be computed are removed.}
\cvarg{descCollection}{Descriptor collection. descCollection[i] is descriptors
computed for pointCollection[i].}
\end{description}
\cvCppFunc{DescriptorExtractor::read}
Read descriptor extractor from file node.
\cvdefCpp{
void DescriptorExtractor::read( const FileNode\& fn );
}
\begin{description}
\cvarg{fn}{File node from which detector will be read.}
\end{description}
\cvCppFunc{DescriptorExtractor::write}
Write descriptor extractor to file storage.
\cvdefCpp{
void DescriptorExtractor::write( FileStorage\& fs ) const;
}
\begin{description}
\cvarg{fs}{File storage in which detector will be written.}
\end{description}
\cvclass{SiftDescriptorExtractor}
Wrapping class for descriptors computing using \cvCppCross{SIFT} class.
\begin{lstlisting}
class SiftDescriptorExtractor : public DescriptorExtractor
{
public:
SiftDescriptorExtractor(
double magnification=SIFT::DescriptorParams::GET_DEFAULT_MAGNIFICATION(),
bool isNormalize=true, bool recalculateAngles=true,
int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints,
Mat& descriptors) const;
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
\end{lstlisting}
\cvclass{SurfDescriptorExtractor}
Wrapping class for descriptors computing using \cvCppCross{SURF} class.
\begin{lstlisting}
class SurfDescriptorExtractor : public DescriptorExtractor
{
public:
SurfDescriptorExtractor( int nOctaves=4,
int nOctaveLayers=2, bool extended=false );
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints,
Mat& descriptors) const;
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
\end{lstlisting}
\cvclass{CalonderDescriptorExtractor}
Wrapping class for descriptors computing using \cvCppCross{RTreeClassifier} class.
\begin{lstlisting}
template<typename T>
class CalonderDescriptorExtractor : public DescriptorExtractor
{
public:
CalonderDescriptorExtractor( const string& classifierFile );
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints,
Mat& descriptors ) const;
virtual void read( const FileNode &fn );
virtual void write( FileStorage &fs ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
\end{lstlisting}
\cvclass{DMatch}
Match between two keypoint descriptors: query descriptor index,
train descriptor index, train image index and distance between descriptors.
\begin{lstlisting}
struct DMatch
{
DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1),
distance(std::numeric_limits<float>::max()) {}
DMatch( int _queryIdx, int _trainIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1),
distance(_distance) {}
DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx),
distance(_distance) {}
int queryIdx; // query descriptor index
int trainIdx; // train descriptor index
int imgIdx; // train image index
float distance;
// less is better
bool operator<( const DMatch &m) const;
};
\end{lstlisting}
\cvclass{DescriptorMatcher}
Abstract base class for matching keypoint descriptors. It has two groups
of match methods: for matching descriptors of one image with other image or
with image set.
\begin{lstlisting}
class DescriptorMatcher
{
public:
virtual ~DescriptorMatcher() {}
virtual void add( const vector<Mat>& descCollection );
const vector<Mat>& getTrainDescCollection() const;
virtual void clear();
virtual bool supportMask() = 0;
virtual void train() = 0;
/*
* Group of methods to match descriptors from image pair.
*/
void match( const Mat& queryDescs, const Mat& trainDescs,
vector<DMatch>& matches, const Mat& mask=Mat() ) const;
void knnMatch( const Mat& queryDescs, const Mat& trainDescs,
vector<vector<DMatch> >& matches, int knn,
const Mat& mask=Mat(), bool compactResult=false ) const;
void radiusMatch( const Mat& queryDescs, const Mat& trainDescs,
vector<vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const;
/*
* Group of methods to match descriptors from one image to image set.
*/
void match( const Mat& queryDescs, vector<DMatch>& matches,
const vector<Mat>& masks=vector<Mat>() );
void knnMatch( const Mat& queryDescs, vector<vector<DMatch> >& matches,
int knn, const vector<Mat>& masks=vector<Mat>(),
bool compactResult=false );
void radiusMatch( const Mat& queryDescs, vector<vector<DMatch> >& matches,
float maxDistance, const vector<Mat>& masks=vector<Mat>(),
bool compactResult=false );
virtual void read( const FileNode& ) {}
virtual void write( FileStorage& ) const {}
protected:
vector<Mat> trainDescCollection;
...
};
\end{lstlisting}
\cvCppFunc{DescriptorMatcher::add}
Add descriptors to train descriptor collection. If collection \texttt{trainDescCollection} is not empty
the new descriptors are added to existing train descriptors.
\cvdefCpp{
void add( const vector<Mat>\& descCollection );
}
\begin{description}
\cvarg{descCollection}{Descriptors to add. Each \texttt{trainDescCollection[i]} is from the same train image.}
\end{description}
\cvCppFunc{DescriptorMatcher::getTrainDescCollection}
Returns constant link to the train descriptor collection (i.e. \texttt{trainDescCollection}).
\cvdefCpp{
const vector<Mat>\& getTrainDescCollection() const;
}
\cvCppFunc{DescriptorMatcher::clear}
Clear train descriptor collection.
\cvdefCpp{
void DescriptorMatcher::clear();
}
\cvCppFunc{DescriptorMatcher::supportMask}
Returns true if descriptor matcher supports masking permissible matches.
\cvdefCpp{
bool DescriptorMatcher::supportMask();
}
\cvCppFunc{DescriptorMatcher::train}
Train descriptor matcher (e.g. train flann index).
\cvdefCpp{
void DescriptorMatcher::train();
}
\cvCppFunc{DescriptorMatcher::match}
Find the best match for each descriptor from a query set with train descriptors.
Supposed that the query descriptors are of keypoints detected on the same query image.
In first variant of this method train descriptors are set as input argument and
supposed that they are of keypoints detected on the same train image. In second variant
of the method train descriptors collection that was set using \texttt{add} method is used.
Optional mask (or masks) can be set to describe which descriptors can be matched.
\texttt{descriptors\_1[i]} can be matched with \texttt{descriptors\_2[j]} only if \texttt{mask.at<uchar>(i,j)} is non-zero.
\cvdefCpp{
void DescriptorMatcher::match( const Mat\& queryDescs,
\par const Mat\& trainDescs,
\par vector<DMatch>\& matches,
\par const Mat\& mask=Mat() ) const;
}
\cvdefCpp{
void DescriptorMatcher::match( const Mat\& queryDescs,
\par vector<DMatch>\& matches,
\par const vector<Mat>\& masks=vector<Mat>() );
}
\begin{description}
\cvarg{queryDescs}{Query set of descriptors.}
\cvarg{trainDescs}{Train set of descriptors.}
\cvarg{matches}{Matches. If some query descriptor masked out in \texttt{mask} no match will be added for this descriptor.
So \texttt{matches} size may be less query descriptors count.}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\cvarg{masks}{The set of masks. Each \texttt{masks[i]} specifies permissible matches between input query descriptors
and stored train descriptors from i-th image (i.e. \texttt{trainDescCollection[i])}.}
\end{description}
\cvCppFunc{DescriptorMatcher::knnMatch}
Find the knn best matches for each descriptor from a query set with train descriptors.
Found knn (or less if not possible) matches are returned in distance increasing order.
Details about query and train descriptors see in \cvCppCross{DescriptorMatcher::match}.
\cvdefCpp{
void DescriptorMatcher::knnMatch( const Mat\& queryDescs,
\par const Mat\& trainDescs, vector<vector<DMatch> >\& matches,
\par int knn, const Mat\& mask=Mat(),
\par bool compactResult=false ) const;
}
\cvdefCpp{
void DescriptorMatcher::knnMatch( const Mat\& queryDescs,
\par vector<vector<DMatch> >\& matches, int knn,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\begin{description}
\cvarg{queryDescs, trainDescs, mask, masks}{See in \cvCppCross{DescriptorMatcher::match}.}
\cvarg{matches}{Mathes. Each \texttt{matches[i]} is knn or less matches for the same query descriptor.}
\cvarg{knn}{Count of best matches will be found per each query descriptor (or less if it's not possible).}
\cvarg{compactResult}{It's used when mask (or masks) is not empty. If \texttt{compactResult} is false
\texttt{matches} vector will have the same size as \texttt{queryDescs} rows. If \texttt{compactResult}
is true \texttt{matches} vector will not contain matches for fully masked out query descriptors.}
\end{description}
\cvCppFunc{DescriptorMatcher::radiusMatch}
Find the best matches for each query descriptor which have distance less than given threshold.
Found matches are returned in distance increasing order. Details about query and train
descriptors see in \cvCppCross{DescriptorMatcher::match}.
\cvdefCpp{
void DescriptorMatcher::radiusMatch( const Mat\& queryDescs,
\par const Mat\& trainDescs, vector<vector<DMatch> >\& matches,
\par float maxDistance, const Mat\& mask=Mat(),
\par bool compactResult=false ) const;
}
\cvdefCpp{
void DescriptorMatcher::radiusMatch( const Mat\& queryDescs,
\par vector<vector<DMatch> >\& matches, float maxDistance,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\begin{description}
\cvarg{queryDescs, trainDescs, mask, masks}{See in \cvCppCross{DescriptorMatcher::match}.}
\cvarg{matches, compactResult}{See in \cvCppCross{DescriptorMatcher::knnMatch}.}
\cvarg{maxDistance}{The threshold to found match distances.}
\end{description}
\cvclass{BruteForceMatcher}
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest
descriptor in the second set by trying each one. This descriptor matcher supports masking
permissible matches between descriptor sets.
\begin{lstlisting}
template<class Distance>
class BruteForceMatcher : public DescriptorMatcher
{
public:
BruteForceMatcher( Distance d = Distance() ) : distance(d) {}
virtual ~BruteForceMatcher() {}
virtual void train() {}
virtual bool supportMask() { return true; }
protected:
...
}
\end{lstlisting}
For efficiency, BruteForceMatcher is templated on the distance metric.
For float descriptors, a common choice would be \texttt{L2<float>}. Class \texttt{L2} is defined as:
\begin{lstlisting}
template<typename T>
struct Accumulator
{
typedef T Type;
};
template<> struct Accumulator<unsigned char> { typedef unsigned int Type; };
template<> struct Accumulator<unsigned short> { typedef unsigned int Type; };
template<> struct Accumulator<char> { typedef int Type; };
template<> struct Accumulator<short> { typedef int Type; };
/*
* Squared Euclidean distance functor
*/
template<class T>
struct L2
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
{
ResultType result = ResultType();
for( int i = 0; i < size; i++ )
{
ResultType diff = a[i] - b[i];
result += diff*diff;
}
return sqrt(result);
}
};
\end{lstlisting}
\cvclass{FlannBasedMatcher}
Flann based descriptor matcher. This matcher trains \cvCppCross{flann::Index} on
train descriptor collection and calls it's nearest search methods to find best matches.
So this matcher may be faster in cases of matching to large train collection than
brute force matcher. \texttt{FlannBasedMatcher} does not support masking permissible
matches between descriptor sets, because \cvCppCross{flann::Index} does not
support this.
\begin{lstlisting}
class FlannBasedMatcher : public DescriptorMatcher
{
public:
FlannBasedMatcher(
const Ptr<flann::IndexParams>& indexParams=new flann::KDTreeIndexParams(),
const Ptr<flann::SearchParams>& searchParams=new flann::SearchParams() );
virtual void add( const vector<Mat>& descCollection );
virtual void clear();
virtual void train();
virtual bool supportMask() { return false; }
protected:
...
};
\end{lstlisting}
\cvCppFunc{createDescriptorMatcher}
Descriptor matcher factory that creates \cvCppCross{DescriptorMatcher} of
given type with default parameters (rather using default constructor).
\begin{lstlisting}
Ptr<DescriptorMatcher> createDescriptorMatcher( const string& descriptorMatcherType );
\end{lstlisting}
\begin{description}
\cvarg{descriptorMatcherType}{Descriptor matcher type, e. g. ''BruteForce'', ''FlannBased'', ...}
\end{description}
\cvclass{GenericDescriptorMatcher}
Abstract interface for a keypoint descriptor extracting and matching.
There is \cvCppCross{DescriptorExtractor} and \cvCppCross{DescriptorMatcher}
for these purposes too, but their interfaces are intended for descriptors
represented as vectors in a multidimensional space. \texttt{GenericDescriptorMatcher}
is a more generic interface for descriptors.
As \cvCppCross{DescriptorMatcher}, \texttt{GenericDescriptorMatcher} has two groups
of match methods: for matching keypoints of one image with other image or
with image set.
\begin{lstlisting}
class GenericDescriptorMatcher
{
public:
GenericDescriptorMatcher() {}
virtual ~GenericDescriptorMatcher() {}
virtual void add( const vector<Mat>& imgCollection,
vector<vector<KeyPoint> >& pointCollection );
const vector<Mat>& getTrainImgCollection() const;
const vector<vector<KeyPoint> >& getTrainPointCollection() const;
virtual void clear();
virtual void train() = 0;
virtual bool supportMask() = 0;
virtual void classify( const Mat& queryImage,
vector<KeyPoint>& queryPoints,
const Mat& trainImage,
vector<KeyPoint>& trainPoints ) const;
virtual void classify( const Mat& queryImage,
vector<KeyPoint>& queryPoints );
/*
* Group of methods to match keypoints from image pair.
*/
void match( const Mat& queryImg, vector<KeyPoint>& queryPoints,
const Mat& trainImg, vector<KeyPoint>& trainPoints,
vector<DMatch>& matches, const Mat& mask=Mat() ) const;
void knnMatch( const Mat& queryImg, vector<KeyPoint>& queryPoints,
const Mat& trainImg, vector<KeyPoint>& trainPoints,
vector<vector<DMatch> >& matches, int knn,
const Mat& mask=Mat(), bool compactResult=false ) const;
void radiusMatch( const Mat& queryImg, vector<KeyPoint>& queryPoints,
const Mat& trainImg, vector<KeyPoint>& trainPoints,
vector<vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const;
/*
* Group of methods to match keypoints from one image to image set.
*/
void match( const Mat& queryImg, vector<KeyPoint>& queryPoints,
vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() );
void knnMatch( const Mat& queryImg, vector<KeyPoint>& queryPoints,
vector<vector<DMatch> >& matches, int knn,
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
void radiusMatch( const Mat& queryImg, vector<KeyPoint>& queryPoints,
vector<vector<DMatch> >& matches, float maxDistance,
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
virtual void read( const FileNode& ) {}
virtual void write( FileStorage& ) const {}
protected:
...
};
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::add}
Adds images and keypoints from them to the train collection (descriptors are supposed to be calculated here).
If train collection is not empty new image and keypoints from them will be added to
existing data.
\cvdefCpp{
void GenericDescriptorMatcher::add( const vector<Mat>\& imgCollection,
\par vector<vector<KeyPoint> >\& pointCollection );
}
\begin{description}
\cvarg{imgCollection}{Image collection.}
\cvarg{pointCollection}{Point collection. Assumes that \texttt{pointCollection[i]} are keypoints
detected in an image \texttt{imgCollection[i]}. }
\end{description}
\cvCppFunc{GenericDescriptorMatcher::getTrainImgCollection}
Returns train image collection.
\begin{lstlisting}
const vector<Mat>& GenericDescriptorMatcher::getTrainImgCollection() const;
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::getTrainPointCollection}
Returns train keypoints collection.
\begin{lstlisting}
const vector<vector<KeyPoint> >&
GenericDescriptorMatcher::getTrainPointCollection() const;
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::clear}
Clear train collection (iamges and keypoints).
\begin{lstlisting}
void GenericDescriptorMatcher::clear();
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::train}
Train the object, e.g. tree-based structure to extract descriptors or
to optimize descriptors matching.
\begin{lstlisting}
void GenericDescriptorMatcher::train();
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::supportMask}
Returns true if generic descriptor matcher supports masking permissible matches.
\begin{lstlisting}
void GenericDescriptorMatcher::supportMask();
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::classify}
Classifies query keypoints under keypoints of one train image qiven as input argument
(first version of the method) or train image collection that set using
\cvCppCross{GenericDescriptorMatcher::add} (second version).
\cvdefCpp{
void GenericDescriptorMatcher::classify( \par const Mat\& queryImage,
\par vector<KeyPoint>\& queryPoints,
\par const Mat\& trainImage,
\par vector<KeyPoint>\& trainPoints ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::classify( const Mat\& queryImage,
\par vector<KeyPoint>\& queryPoints );
}
\begin{description}
\cvarg{queryImage}{The query image.}
\cvarg{queryPoints}{Keypoints from the query image.}
\cvarg{trainImage}{The train image.}
\cvarg{trainPoints}{Keypoints from the train image.}
\end{description}
\cvCppFunc{GenericDescriptorMatcher::match}
Find best match for query keypoints to the training set. In first version of method
one train image and keypoints detected on it - are input arguments. In second version
query keypoints are matched to training collectin that set using
\cvCppCross{GenericDescriptorMatcher::add}. As in \cvCppCross{DescriptorMatcher::match}
the mask can be set.
\cvdefCpp{
void GenericDescriptorMatcher::match(
\par const Mat\& queryImg, vector<KeyPoint>\& queryPoints,
\par const Mat\& trainImg, vector<KeyPoint>\& trainPoints,
\par vector<DMatch>\& matches, const Mat\& mask=Mat() ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::match(
\par const Mat\& queryImg, vector<KeyPoint>\& queryPoints,
\par vector<DMatch>\& matches,
\par const vector<Mat>\& masks=vector<Mat>() );
}
\begin{description}
\cvarg{queryImg}{Query image.}
\cvarg{queryPoints}{Keypoint detected in \texttt{queryImg}.}
\cvarg{trainImg}{Train image.}
\cvarg{trainPoints}{Keypoint detected in \texttt{trainImg}.}
\cvarg{matches}{Matches. If some query descriptor (keypoint) masked out in \texttt{mask}
no match will be added for this descriptor.
So \texttt{matches} size may be less query keypoints count.}
\cvarg{mask}{Mask specifying permissible matches between input query and train keypoints.}
\cvarg{masks}{The set of masks. Each \texttt{masks[i]} specifies permissible matches between input query keypoints
and stored train keypointss from i-th image.}
\end{description}
\cvCppFunc{GenericDescriptorMatcher::knnMatch}
Find the knn best matches for each keypoint from a query set with train keypoints.
Found knn (or less if not possible) matches are returned in distance increasing order.
Details see in \cvCppCross{GenericDescriptorMatcher::match} and \cvCppCross{DescriptorMatcher::knnMatch}.
\cvdefCpp{
void GenericDescriptorMatcher::knnMatch(
\par const Mat\& queryImg, vector<KeyPoint>\& queryPoints,
\par const Mat\& trainImg, vector<KeyPoint>\& trainPoints,
\par vector<vector<DMatch> >\& matches, int knn,
\par const Mat\& mask=Mat(), bool compactResult=false ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::knnMatch(
\par const Mat\& queryImg, vector<KeyPoint>\& queryPoints,
\par vector<vector<DMatch> >\& matches, int knn,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\cvCppFunc{GenericDescriptorMatcher::radiusMatch}
Find the best matches for each query keypoint which have distance less than given threshold.
Found matches are returned in distance increasing order. Details see in
\cvCppCross{GenericDescriptorMatcher::match} and \cvCppCross{DescriptorMatcher::radiusMatch}.
\cvdefCpp{
void GenericDescriptorMatcher::radiusMatch(
\par const Mat\& queryImg, vector<KeyPoint>\& queryPoints,
\par const Mat\& trainImg, vector<KeyPoint>\& trainPoints,
\par vector<vector<DMatch> >\& matches, float maxDistance,
\par const Mat\& mask=Mat(), bool compactResult=false ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::radiusMatch(
\par const Mat\& queryImg, vector<KeyPoint>\& queryPoints,
\par vector<vector<DMatch> >\& matches, float maxDistance,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\cvCppFunc{GenericDescriptorMatcher::read}
Reads matcher object from a file node.
\cvdefCpp{
void GenericDescriptorMatcher::read( const FileNode\& fn );
}
\cvCppFunc{GenericDescriptorMatcher::write}
Writes match object to a file storage
\cvdefCpp{
void GenericDescriptorMatcher::write( FileStorage\& fs ) const;
}
\cvclass{OneWayDescriptorMatcher}
Wrapping class for computing, matching and classification of descriptors using \cvCppCross{OneWayDescriptorBase} class.
\begin{lstlisting}
class OneWayDescriptorMatcher : public GenericDescriptorMatcher
{
public:
class Params
{
public:
static const int POSE_COUNT = 500;
static const int PATCH_WIDTH = 24;
static const int PATCH_HEIGHT = 24;
static float GET_MIN_SCALE() { return 0.7f; }
static float GET_MAX_SCALE() { return 1.5f; }
static float GET_STEP_SCALE() { return 1.2f; }
Params( int _poseCount = POSE_COUNT,
Size _patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
string _pcaFilename = string(),
string _trainPath = string(),
string _trainImagesList = string(),
float _minScale = GET_MIN_SCALE(), float _maxScale = GET_MAX_SCALE(),
float _stepScale = GET_STEP_SCALE() ) :
poseCount(_poseCount), patchSize(_patchSize), pcaFilename(_pcaFilename),
trainPath(_trainPath), trainImagesList(_trainImagesList),
minScale(_minScale), maxScale(_maxScale), stepScale(_stepScale) {}
int poseCount;
Size patchSize;
string pcaFilename;
string trainPath;
string trainImagesList;
float minScale, maxScale, stepScale;
};
// Equivalent to calling PointMatchOneWay() followed by Initialize(_params)
OneWayDescriptorMatcher( const Params& _params=Params() );
virtual ~OneWayDescriptorMatcher();
void initialize( const Params& _params,
const Ptr<OneWayDescriptorBase>& _base=Ptr<OneWayDescriptorBase>() );
virtual void clear ();
virtual void train();
virtual bool supportMask() { return false; }
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{FernDescriptorMatcher}
Wrapping class for computing, matching and classification of descriptors using \cvCppCross{FernClassifier} class.
\begin{lstlisting}
class FernDescriptorMatcher : public GenericDescriptorMatcher
{
public:
class Params
{
public:
Params( int _nclasses=0,
int _patchSize=FernClassifier::PATCH_SIZE,
int _signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
int _nstructs=FernClassifier::DEFAULT_STRUCTS,
int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
int _nviews=FernClassifier::DEFAULT_VIEWS,
int _compressionMethod=FernClassifier::COMPRESSION_NONE,
const PatchGenerator& patchGenerator=PatchGenerator() );
Params( const string& _filename );
int nclasses;
int patchSize;
int signatureSize;
int nstructs;
int structSize;
int nviews;
int compressionMethod;
PatchGenerator patchGenerator;
string filename;
};
FernDescriptorMatcher( const Params& _params=Params() );
virtual ~FernDescriptorMatcher();
virtual void clear();
virtual void train();
virtual bool supportMask() { return false; }
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{VectorDescriptorMatcher}
Class used for matching descriptors that can be described as vectors in a finite-dimensional space.
\begin{lstlisting}
class VectorDescriptorMatcher : public GenericDescriptorMatcher
{
public:
VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& _extractor,
const Ptr<DescriptorMatcher>& _matcher )
: extractor( _extractor ), matcher( _matcher )
{ CV_Assert( !extractor.empty() && !matcher.empty() ); }
virtual ~VectorDescriptorMatcher() {}
virtual void add( const vector<Mat>& imgCollection,
vector<vector<KeyPoint> >& pointCollection );
virtual void clear();
virtual void train();
virtual bool supportMask() { matcher->supportMask(); }
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
Example of creating:
\begin{lstlisting}
VectorDescriptorMatcher matcher( new SurfDescriptorExtractor,
new BruteForceMatcher<L2<float> > );
\end{lstlisting}
\cvCppFunc{drawMatches}
This function draws matches of keypints from two images on output image.
Match is a line connecting two keypoints (circles).
\cvdefCpp{
void drawMatches( const Mat\& img1, const vector<KeyPoint>\& keypoints1,
\par const Mat\& img2, const vector<KeyPoint>\& keypoints2,
\par const vector<DMatch>\& matches1to2, Mat\& outImg,
\par const Scalar\& matchColor=Scalar::all(-1),
\par const Scalar\& singlePointColor=Scalar::all(-1),
\par const vector<char>\& matchesMask=vector<char>(),
\par int flags=DrawMatchesFlags::DEFAULT );
}
\cvdefCpp{
void drawMatches( const Mat\& img1, const vector<KeyPoint>\& keypoints1,
\par const Mat\& img2, const vector<KeyPoint>\& keypoints2,
\par const vector<vector<DMatch> >\& matches1to2, Mat\& outImg,
\par const Scalar\& matchColor=Scalar::all(-1),
\par const Scalar\& singlePointColor=Scalar::all(-1),
\par const vector<vector<char>>\& matchesMask=
\par vector<vector<char> >(),
\par int flags=DrawMatchesFlags::DEFAULT );
}
\begin{description}
\cvarg{img1}{First source image.}
\cvarg{keypoints1}{Keypoints from first source image.}
\cvarg{img2}{Second source image.}
\cvarg{keypoints2}{Keypoints from second source image.}
\cvarg{matches}{Matches from first image to second one, i.e. \texttt{keypoints1[i]}
has corresponding point \texttt{keypoints2[matches[i]]}. }
\cvarg{outImg}{Output image. Its content depends on \texttt{flags} value
what is drawn in output image. See below possible \texttt{flags} bit values. }
\cvarg{matchColor}{Color of matches (lines and connected keypoints).
If \texttt{matchColor==Scalar::all(-1)} color will be generated randomly.}
\cvarg{singlePointColor}{Color of single keypoints (circles), i.e. keypoints not having the matches.
If \texttt{singlePointColor==Scalar::all(-1)} color will be generated randomly.}
\cvarg{matchesMask}{Mask determining which matches will be drawn. If mask is empty all matches will be drawn. }
\cvarg{flags}{Each bit of \texttt{flags} sets some feature of drawing.
Possible \texttt{flags} bit values is defined by \texttt{DrawMatchesFlags}, see below. }
\end{description}
\begin{lstlisting}
struct DrawMatchesFlags
{
enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create),
// i.e. existing memory of output image may be reused.
// Two source image, matches and single keypoints
// will be drawn.
// For each keypoint only the center point will be
// drawn (without the circle around keypoint with
// keypoint size and orientation).
DRAW_OVER_OUTIMG = 1, // Output image matrix will not be
// created (Mat::create). Matches will be drawn
// on existing content of output image.
NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.
DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around
// keypoint with keypoint size and orientation will
// be drawn.
};
};
\end{lstlisting}
\cvCppFunc{drawKeypoints}
Draw keypoints.
\cvdefCpp{
void drawKeypoints( const Mat\& image,
\par const vector<KeyPoint>\& keypoints,
\par Mat\& outImg, const Scalar\& color=Scalar::all(-1),
\par int flags=DrawMatchesFlags::DEFAULT );
}
\begin{description}
\cvarg{image}{Source image.}
\cvarg{keypoints}{Keypoints from source image.}
\cvarg{outImg}{Output image. Its content depends on \texttt{flags} value
what is drawn in output image. See possible \texttt{flags} bit values. }
\cvarg{color}{Color of keypoints}.
\cvarg{flags}{Each bit of \texttt{flags} sets some feature of drawing.
Possible \texttt{flags} bit values is defined by \texttt{DrawMatchesFlags},
see above in \cvCppCross{drawMatches}. }
\end{description}
\fi