merged 2.4 into trunk

This commit is contained in:
Vadim Pisarevsky
2012-04-30 14:33:52 +00:00
parent 3f1c6d7357
commit d5a0088bbe
194 changed files with 10158 additions and 8225 deletions

View File

@@ -95,9 +95,9 @@ Creates a descriptor extractor by name.
The current implementation supports the following types of a descriptor extractor:
* ``"SIFT"`` -- :ocv:class:`SiftDescriptorExtractor`
* ``"SURF"`` -- :ocv:class:`SurfDescriptorExtractor`
* ``"ORB"`` -- :ocv:class:`OrbDescriptorExtractor`
* ``"SIFT"`` -- :ocv:class:`SIFT`
* ``"SURF"`` -- :ocv:class:`SURF`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
@@ -105,107 +105,6 @@ A combined format is also supported: descriptor extractor adapter name ( ``"Oppo
for example: ``"OpponentSIFT"`` .
SiftDescriptorExtractor
-----------------------
.. ocv:class:: SiftDescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`SIFT` class. ::
class SiftDescriptorExtractor : public DescriptorExtractor
{
public:
SiftDescriptorExtractor(
const SIFT::DescriptorParams& descriptorParams=SIFT::DescriptorParams(),
const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
SiftDescriptorExtractor( double magnification, bool isNormalize=true,
bool recalculateAngles=true, int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
SurfDescriptorExtractor
-----------------------
.. ocv:class:: SurfDescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`SURF` class. ::
class SurfDescriptorExtractor : public DescriptorExtractor
{
public:
SurfDescriptorExtractor( int nOctaves=4,
int nOctaveLayers=2, bool extended=false );
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
OrbDescriptorExtractor
---------------------------
.. ocv:class:: OrbDescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`ORB` class. ::
template<typename T>
class ORbDescriptorExtractor : public DescriptorExtractor
{
public:
OrbDescriptorExtractor( ORB::PatchSize patch_size );
virtual void read( const FileNode &fn );
virtual void write( FileStorage &fs ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
CalonderDescriptorExtractor
---------------------------
.. ocv:class:: CalonderDescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`RTreeClassifier` class. ::
template<typename T>
class CalonderDescriptorExtractor : public DescriptorExtractor
{
public:
CalonderDescriptorExtractor( const string& classifierFile );
virtual void read( const FileNode &fn );
virtual void write( FileStorage &fs ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
OpponentColorDescriptorExtractor
--------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor

View File

@@ -248,7 +248,7 @@ Creates a descriptor matcher of a given type with the default parameters (using
*
``BruteForce-Hamming``
*
``BruteForce-HammingLUT``
``BruteForce-Hamming(2)``
*
``FlannBased``
@@ -256,102 +256,22 @@ Creates a descriptor matcher of a given type with the default parameters (using
BruteForceMatcher
BFMatcher
-----------------
.. ocv:class:: BruteForceMatcher
.. ocv:class::BFMatcher
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets. ::
template<class Distance>
class BruteForceMatcher : public DescriptorMatcher
{
public:
BruteForceMatcher( Distance d = Distance() );
virtual ~BruteForceMatcher();
virtual bool isMaskSupported() const;
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
}
BFMatcher::BFMatcher
--------------------
Brute-force matcher constructor.
.. ocv:function:: BFMatcher::BFMatcher( int distanceType, bool crossCheck=false )
For efficiency, ``BruteForceMatcher`` is used as a template parameterized with the distance type. For float descriptors, ``L2<float>`` is a common choice. The following distances are supported: ::
template<typename T>
struct Accumulator
{
typedef T Type;
};
template<> struct Accumulator<unsigned char> { typedef unsigned int Type; };
template<> struct Accumulator<unsigned short> { typedef unsigned int Type; };
template<> struct Accumulator<char> { typedef int Type; };
template<> struct Accumulator<short> { typedef int Type; };
/*
* Euclidean distance functor
*/
template<class T>
struct L2
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
};
:param distanceType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
/*
* Squared Euclidean distance functor
*/
template<class T>
struct SL2
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
};
// Note: in case of SL2 distance a parameter maxDistance in the method DescriptorMatcher::radiusMatch
// is a squared maximum distance in L2.
/*
* Manhattan distance (city block distance) functor
*/
template<class T>
struct CV_EXPORTS L1
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
};
/*
* Hamming distance functor
*/
struct HammingLUT
{
typedef unsigned char ValueType;
typedef int ResultType;
ResultType operator()( const unsigned char* a, const unsigned char* b,
int size ) const;
...
};
struct Hamming
{
typedef unsigned char ValueType;
typedef int ResultType;
ResultType operator()( const unsigned char* a, const unsigned char* b,
int size ) const;
};
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
FlannBasedMatcher

View File

@@ -140,10 +140,10 @@ The following detector types are supported:
* ``"FAST"`` -- :ocv:class:`FastFeatureDetector`
* ``"STAR"`` -- :ocv:class:`StarFeatureDetector`
* ``"SIFT"`` -- :ocv:class:`SiftFeatureDetector`
* ``"SURF"`` -- :ocv:class:`SurfFeatureDetector`
* ``"ORB"`` -- :ocv:class:`OrbFeatureDetector`
* ``"MSER"`` -- :ocv:class:`MserFeatureDetector`
* ``"SIFT"`` -- :ocv:class:`SIFT` (nonfree module)
* ``"SURF"`` -- :ocv:class:`SURF` (nonfree module)
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"MSER"`` -- :ocv:class:`MSER`
* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector`
* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled
* ``"Dense"`` -- :ocv:class:`DenseFeatureDetector`
@@ -250,67 +250,6 @@ Wrapping class for feature detection using the
...
};
SiftFeatureDetector
-------------------
.. ocv:class:: SiftFeatureDetector
Wrapping class for feature detection using the
:ocv:class:`SIFT` class. ::
class SiftFeatureDetector : public FeatureDetector
{
public:
SiftFeatureDetector(
const SIFT::DetectorParams& detectorParams=SIFT::DetectorParams(),
const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
SiftFeatureDetector( double threshold, double edgeThreshold,
int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
SurfFeatureDetector
-------------------
.. ocv:class:: SurfFeatureDetector
Wrapping class for feature detection using the
:ocv:class:`SURF` class. ::
class SurfFeatureDetector : public FeatureDetector
{
public:
SurfFeatureDetector( double hessianThreshold = 400., int octaves = 3,
int octaveLayers = 4 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
OrbFeatureDetector
-------------------
.. ocv:class:: OrbFeatureDetector
Wrapping class for feature detection using the
:ocv:class:`ORB` class. ::
class OrbFeatureDetector : public FeatureDetector
{
public:
OrbFeatureDetector( size_t n_features );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
DenseFeatureDetector
--------------------
.. ocv:class:: DenseFeatureDetector
@@ -605,15 +544,3 @@ StarAdjuster
StarAdjuster(double initial_thresh = 30.0);
...
};
SurfAdjuster
------------
.. ocv:class:: SurfAdjuster
:ocv:class:`AdjusterAdapter` for :ocv:class:`SurfFeatureDetector`. This class adjusts the ``hessianThreshold`` of ``SurfFeatureDetector``. ::
class SurfAdjuster: public SurfAdjuster
{
SurfAdjuster();
...
};

View File

@@ -238,122 +238,6 @@ Clones the matcher.
but with empty train data.
OneWayDescriptorMatcher
-----------------------
.. ocv:class:: OneWayDescriptorMatcher
Wrapping class for computing, matching, and classifying descriptors using the
:ocv:class:`OneWayDescriptorBase` class. ::
class OneWayDescriptorMatcher : public GenericDescriptorMatcher
{
public:
class Params
{
public:
static const int POSE_COUNT = 500;
static const int PATCH_WIDTH = 24;
static const int PATCH_HEIGHT = 24;
static float GET_MIN_SCALE() { return 0.7f; }
static float GET_MAX_SCALE() { return 1.5f; }
static float GET_STEP_SCALE() { return 1.2f; }
Params( int poseCount = POSE_COUNT,
Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
string pcaFilename = string(),
string trainPath = string(), string trainImagesList = string(),
float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
float stepScale = GET_STEP_SCALE() );
int poseCount;
Size patchSize;
string pcaFilename;
string trainPath;
string trainImagesList;
float minScale, maxScale, stepScale;
};
OneWayDescriptorMatcher( const Params& params=Params() );
virtual ~OneWayDescriptorMatcher();
void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
// Clears keypoints stored in collection and OneWayDescriptorBase
virtual void clear();
virtual void train();
virtual bool isMaskSupported();
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
};
FernDescriptorMatcher
---------------------
.. ocv:class:: FernDescriptorMatcher
Wrapping class for computing, matching, and classifying descriptors using the
:ocv:class:`FernClassifier` class. ::
class FernDescriptorMatcher : public GenericDescriptorMatcher
{
public:
class Params
{
public:
Params( int nclasses=0,
int patchSize=FernClassifier::PATCH_SIZE,
int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
int nstructs=FernClassifier::DEFAULT_STRUCTS,
int structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
int nviews=FernClassifier::DEFAULT_VIEWS,
int compressionMethod=FernClassifier::COMPRESSION_NONE,
const PatchGenerator& patchGenerator=PatchGenerator() );
Params( const string& filename );
int nclasses;
int patchSize;
int signatureSize;
int nstructs;
int structSize;
int nviews;
int compressionMethod;
PatchGenerator patchGenerator;
string filename;
};
FernDescriptorMatcher( const Params& params=Params() );
virtual ~FernDescriptorMatcher();
virtual void clear();
virtual void train();
virtual bool isMaskSupported();
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
};
VectorDescriptorMatcher
-----------------------
.. ocv:class:: VectorDescriptorMatcher

View File

@@ -7,7 +7,7 @@ FAST
--------
Detects corners using the FAST algorithm
.. ocv:function:: void FAST( const Mat& image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true )
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true )
:param image: Image where keypoints (corners) are detected.
@@ -17,7 +17,9 @@ Detects corners using the FAST algorithm
:param nonmaxSupression: If it is true, non-maximum suppression is applied to detected corners (keypoints).
Detects corners using the FAST algorithm by E. Rosten (*Machine Learning for High-speed Corner Detection*, 2006).
Detects corners using the FAST algorithm by [Rosten06]_.
.. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006.
MSER
@@ -46,533 +48,51 @@ The class encapsulates all the parameters of the MSER extraction algorithm (see
http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http://opencv.willowgarage.com/wiki/documentation/cpp/features2d/MSER for useful comments and parameters description.
StarDetector
------------
.. ocv:class:: StarDetector
Class implementing the ``Star`` keypoint detector, a modified version of the ``CenSurE`` keypoint detector described in [Agrawal08]_.
.. [Agrawal08] Agrawal, M. and Konolige, K. and Blas, M.R. "CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching", ECCV08, 2008
StarDetector::StarDetector
--------------------------
The Star Detector constructor
.. ocv:function:: StarDetector::StarDetector()
.. ocv:function:: StarDetector::StarDetector(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize)
.. ocv:pyfunction:: cv2.StarDetector(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize) -> <StarDetector object>
:param maxSize: maximum size of the features. The following values are supported: 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128. In the case of a different value the result is undefined.
:param responseThreshold: threshold for the approximated laplacian, used to eliminate weak features. The larger it is, the less features will be retrieved
:param lineThresholdProjected: another threshold for the laplacian to eliminate edges
:param lineThresholdBinarized: yet another threshold for the feature size to eliminate edges. The larger the 2nd threshold, the more points you get.
StarDetector::operator()
------------------------
Finds keypoints in an image
.. ocv:function:: void StarDetector::operator()(const Mat& image, vector<KeyPoint>& keypoints)
.. ocv:pyfunction:: cv2.StarDetector.detect(image) -> keypoints
.. ocv:cfunction:: CvSeq* cvGetStarKeypoints( const CvArr* image, CvMemStorage* storage, CvStarDetectorParams params=cvStarDetectorParams() )
.. ocv:pyoldfunction:: cv.GetStarKeypoints(image, storage, params)-> keypoints
:param image: The input 8-bit grayscale image
:param keypoints: The output vector of keypoints
:param storage: The memory storage used to store the keypoints (OpenCV 1.x API only)
:param params: The algorithm parameters stored in ``CvStarDetectorParams`` (OpenCV 1.x API only)
ORB
----
---
.. ocv:class:: ORB
Class for extracting ORB features and descriptors from an image. ::
Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor, described in [RRKB11]_. The algorithm uses FAST in pyramids to detect stable keypoints, selects the strongest features using FAST or Harris response, finds their orientation using first-order moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or k-tuples) are rotated according to the measured orientation).
class ORB
{
public:
/** The patch sizes that can be used (only one right now) */
struct CommonParams
{
enum { DEFAULT_N_LEVELS = 3, DEFAULT_FIRST_LEVEL = 0};
.. [RRKB11] Ethan Rublee, Vincent Rabaud, Kurt Konolige, Gary R. Bradski: ORB: An efficient alternative to SIFT or SURF. ICCV 2011: 2564-2571.
/** default constructor */
CommonParams(float scale_factor = 1.2f, unsigned int n_levels = DEFAULT_N_LEVELS,
int edge_threshold = 31, unsigned int first_level = DEFAULT_FIRST_LEVEL);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
ORB::ORB
--------
The ORB constructor
/** Coefficient by which we divide the dimensions from one scale pyramid level to the next */
float scale_factor_;
/** The number of levels in the scale pyramid */
unsigned int n_levels_;
/** The level at which the image is given
* if 1, that means we will also look at the image scale_factor_ times bigger
*/
unsigned int first_level_;
/** How far from the boundary the points should be */
int edge_threshold_;
};
.. ocv:function:: ORB::ORB()
// constructor that initializes all the algorithm parameters
// n_features is the number of desired features
ORB(size_t n_features = 500, const CommonParams & detector_params = CommonParams());
// returns the number of elements in each descriptor (32 bytes)
int descriptorSize() const;
// detects keypoints using ORB
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints) const;
// detects ORB keypoints and computes the ORB descriptors for them;
// output vector "descriptors" stores elements of descriptors and has size
// equal descriptorSize()*keypoints.size() as each descriptor is
// descriptorSize() elements of this vector.
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints,
cv::Mat& descriptors,
bool useProvidedKeypoints=false) const;
};
.. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31)
The class implements ORB.
RandomizedTree
--------------
.. ocv:class:: RandomizedTree
Class containing a base structure for ``RTreeClassifier``. ::
class CV_EXPORTS RandomizedTree
{
public:
friend class RTreeClassifier;
RandomizedTree();
~RandomizedTree();
void train(std::vector<BaseKeypoint> const& base_set,
RNG &rng, int depth, int views,
size_t reduced_num_dim, int num_quant_bits);
void train(std::vector<BaseKeypoint> const& base_set,
RNG &rng, PatchGenerator &make_patch, int depth,
int views, size_t reduced_num_dim, int num_quant_bits);
// next two functions are EXPERIMENTAL
//(do not use unless you know exactly what you do)
static void quantizeVector(float *vec, int dim, int N, float bnds[2],
int clamp_mode=0);
static void quantizeVector(float *src, int dim, int N, float bnds[2],
uchar *dst);
// patch_data must be a 32x32 array (no row padding)
float* getPosterior(uchar* patch_data);
const float* getPosterior(uchar* patch_data) const;
uchar* getPosterior2(uchar* patch_data);
void read(const char* file_name, int num_quant_bits);
void read(std::istream &is, int num_quant_bits);
void write(const char* file_name) const;
void write(std::ostream &os) const;
int classes() { return classes_; }
int depth() { return depth_; }
void discardFloatPosteriors() { freePosteriors(1); }
inline void applyQuantization(int num_quant_bits)
{ makePosteriors2(num_quant_bits); }
private:
int classes_;
int depth_;
int num_leaves_;
std::vector<RTreeNode> nodes_;
float **posteriors_; // 16-byte aligned posteriors
uchar **posteriors2_; // 16-byte aligned posteriors
std::vector<int> leaf_counts_;
void createNodes(int num_nodes, RNG &rng);
void allocPosteriorsAligned(int num_leaves, int num_classes);
void freePosteriors(int which);
// which: 1=posteriors_, 2=posteriors2_, 3=both
void init(int classes, int depth, RNG &rng);
void addExample(int class_id, uchar* patch_data);
void finalize(size_t reduced_num_dim, int num_quant_bits);
int getIndex(uchar* patch_data) const;
inline float* getPosteriorByIndex(int index);
inline uchar* getPosteriorByIndex2(int index);
inline const float* getPosteriorByIndex(int index) const;
void convertPosteriorsToChar();
void makePosteriors2(int num_quant_bits);
void compressLeaves(size_t reduced_num_dim);
void estimateQuantPercForPosteriors(float perc[2]);
};
RandomizedTree::train
-------------------------
Trains a randomized tree using an input set of keypoints.
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
:param nfeatures: The maximum number of features to retain.
:param rng: Random-number generator used for training.
:param scaleFactor: Pyramid decimation ratio, greater than 1. ``scaleFactor==2`` means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
:param make_patch: Patch generator used for training.
:param nlevels: The number of pyramid levels. The smallest level will have linear size equal to ``input_image_linear_size/pow(scaleFactor, nlevels)``.
:param depth: Maximum tree depth.
:param views: Number of random views of each keypoint neighborhood to generate.
:param reduced_num_dim: Number of dimensions used in the compressed signature.
:param edgeThreshold: This is size of the border where the features are not detected. It should roughly match the ``patchSize`` parameter.
:param num_quant_bits: Number of bits used for quantization.
:param firstLevel: It should be 0 in the current implementation.
:param WTA_K: The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as ``NORM_HAMMING2`` (2 bits per bin). When ``WTA_K=4``, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
:param scoreType: The default HARRIS_SCORE means that Harris algorithm is used to rank features (the score is written to ``KeyPoint::score`` and is used to retain best ``nfeatures`` features); FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, but it is a little faster to compute.
:param patchSize: size of the patch used by the oriented BRIEF descriptor. Of course, on smaller pyramid layers the perceived image area covered by a feature will be larger.
RandomizedTree::read
------------------------
Reads a pre-saved randomized tree from a file or stream.
.. ocv:function:: read(const char* file_name, int num_quant_bits)
.. ocv:function:: read(std::istream &is, int num_quant_bits)
:param file_name: Name of the file that contains randomized tree data.
:param is: Input stream associated with the file that contains randomized tree data.
:param num_quant_bits: Number of bits used for quantization.
RandomizedTree::write
-------------------------
Writes the current randomized tree to a file or stream.
.. ocv:function:: void write(const char* file_name) const
.. ocv:function:: void write(std::ostream &os) const
:param file_name: Name of the file where randomized tree data is stored.
:param os: Output stream associated with the file where randomized tree data is stored.
RandomizedTree::applyQuantization
-------------------------------------
.. ocv:function:: void applyQuantization(int num_quant_bits)
Applies quantization to the current randomized tree.
:param num_quant_bits: Number of bits used for quantization.
RTreeNode
---------
.. ocv:class:: RTreeNode
Class containing a base structure for ``RandomizedTree``. ::
struct RTreeNode
{
short offset1, offset2;
RTreeNode() {}
RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
: offset1(y1*PATCH_SIZE + x1),
offset2(y2*PATCH_SIZE + x2)
{}
//! Left child on 0, right child on 1
inline bool operator() (uchar* patch_data) const
{
return patch_data[offset1] > patch_data[offset2];
}
};
RTreeClassifier
ORB::operator()
---------------
.. ocv:class:: RTreeClassifier
Finds keypoints in an image and computes their descriptors
.. ocv:function:: void ORB::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false ) const
Class containing ``RTreeClassifier``. It represents the Calonder descriptor originally introduced by Michael Calonder. ::
class CV_EXPORTS RTreeClassifier
{
public:
static const int DEFAULT_TREES = 48;
static const size_t DEFAULT_NUM_QUANT_BITS = 4;
RTreeClassifier();
void train(std::vector<BaseKeypoint> const& base_set,
RNG &rng,
int num_trees = RTreeClassifier::DEFAULT_TREES,
int depth = DEFAULT_DEPTH,
int views = DEFAULT_VIEWS,
size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
bool print_status = true);
void train(std::vector<BaseKeypoint> const& base_set,
RNG &rng,
PatchGenerator &make_patch,
int num_trees = RTreeClassifier::DEFAULT_TREES,
int depth = DEFAULT_DEPTH,
int views = DEFAULT_VIEWS,
size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
bool print_status = true);
// sig must point to a memory block of at least
//classes()*sizeof(float|uchar) bytes
void getSignature(IplImage *patch, uchar *sig);
void getSignature(IplImage *patch, float *sig);
void getSparseSignature(IplImage *patch, float *sig,
float thresh);
static int countNonZeroElements(float *vec, int n, double tol=1e-10);
static inline void safeSignatureAlloc(uchar **sig, int num_sig=1,
int sig_len=176);
static inline uchar* safeSignatureAlloc(int num_sig=1,
int sig_len=176);
inline int classes() { return classes_; }
inline int original_num_classes()
{ return original_num_classes_; }
void setQuantization(int num_quant_bits);
void discardFloatPosteriors();
void read(const char* file_name);
void read(std::istream &is);
void write(const char* file_name) const;
void write(std::ostream &os) const;
std::vector<RandomizedTree> trees_;
private:
int classes_;
int num_quant_bits_;
uchar **posteriors_;
ushort *ptemp_;
int original_num_classes_;
bool keep_floats_;
};
RTreeClassifier::train
--------------------------
Trains a randomized tree classifier using an input set of keypoints.
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
:param image: The input 8-bit grayscale image.
:param rng: Random-number generator used for training.
:param mask: The operation mask.
:param make_patch: Patch generator used for training.
:param keypoints: The output vector of keypoints.
:param num_trees: Number of randomized trees used in ``RTreeClassificator`` .
:param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it.
:param depth: Maximum tree depth.
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
:param views: Number of random views of each keypoint neighborhood to generate.
:param reduced_num_dim: Number of dimensions used in the compressed signature.
:param num_quant_bits: Number of bits used for quantization.
:param print_status: Current status of training printed on the console.
RTreeClassifier::getSignature
---------------------------------
Returns a signature for an image patch.
.. ocv:function:: void getSignature(IplImage *patch, uchar *sig)
.. ocv:function:: void getSignature(IplImage *patch, float *sig)
:param patch: Image patch to calculate the signature for.
:param sig: Output signature (array dimension is ``reduced_num_dim)`` .
RTreeClassifier::getSparseSignature
---------------------------------------
Returns a sparse signature for an image patch
.. ocv:function:: void getSparseSignature(IplImage *patch, float *sig, float thresh)
:param patch: Image patch to calculate the signature for.
:param sig: Output signature (array dimension is ``reduced_num_dim)`` .
:param thresh: Threshold used for compressing the signature.
Returns a signature for an image patch similarly to ``getSignature`` but uses a threshold for removing all signature elements below the threshold so that the signature is compressed.
RTreeClassifier::countNonZeroElements
-----------------------------------------
Returns the number of non-zero elements in an input array.
.. ocv:function:: static int countNonZeroElements(float *vec, int n, double tol=1e-10)
:param vec: Input vector containing float elements.
:param n: Input vector size.
:param tol: Threshold used for counting elements. All elements less than ``tol`` are considered as zero elements.
RTreeClassifier::read
-------------------------
Reads a pre-saved ``RTreeClassifier`` from a file or stream.
.. ocv:function:: read(const char* file_name)
.. ocv:function:: read(std::istream& is)
:param file_name: Name of the file that contains randomized tree data.
:param is: Input stream associated with the file that contains randomized tree data.
RTreeClassifier::write
--------------------------
Writes the current ``RTreeClassifier`` to a file or stream.
.. ocv:function:: void write(const char* file_name) const
.. ocv:function:: void write(std::ostream &os) const
:param file_name: Name of the file where randomized tree data is stored.
:param os: Output stream associated with the file where randomized tree data is stored.
RTreeClassifier::setQuantization
------------------------------------
Applies quantization to the current randomized tree.
.. ocv:function:: void setQuantization(int num_quant_bits)
:param num_quant_bits: Number of bits used for quantization.
The example below demonstrates the usage of ``RTreeClassifier`` for matching the features. The features are extracted from the test and train images with SURF. Output is
:math:`best\_corr` and
:math:`best\_corr\_idx` arrays that keep the best probabilities and corresponding features indices for every train feature. ::
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
CvSURFParams params = cvSURFParams(500, 1);
cvExtractSURF( test_image, 0, &imageKeypoints, &imageDescriptors,
storage, params );
cvExtractSURF( train_image, 0, &objectKeypoints, &objectDescriptors,
storage, params );
RTreeClassifier detector;
int patch_width = PATCH_SIZE;
iint patch_height = PATCH_SIZE;
vector<BaseKeypoint> base_set;
int i=0;
CvSURFPoint* point;
for (i=0;i<(n_points > 0 ? n_points : objectKeypoints->total);i++)
{
point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,i);
base_set.push_back(
BaseKeypoint(point->pt.x,point->pt.y,train_image));
}
//Detector training
RNG rng( cvGetTickCount() );
PatchGenerator gen(0,255,2,false,0.7,1.3,-CV_PI/3,CV_PI/3,
-CV_PI/3,CV_PI/3);
printf("RTree Classifier training...n");
detector.train(base_set,rng,gen,24,DEFAULT_DEPTH,2000,
(int)base_set.size(), detector.DEFAULT_NUM_QUANT_BITS);
printf("Donen");
float* signature = new float[detector.original_num_classes()];
float* best_corr;
int* best_corr_idx;
if (imageKeypoints->total > 0)
{
best_corr = new float[imageKeypoints->total];
best_corr_idx = new int[imageKeypoints->total];
}
for(i=0; i < imageKeypoints->total; i++)
{
point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,i);
int part_idx = -1;
float prob = 0.0f;
CvRect roi = cvRect((int)(point->pt.x) - patch_width/2,
(int)(point->pt.y) - patch_height/2,
patch_width, patch_height);
cvSetImageROI(test_image, roi);
roi = cvGetImageROI(test_image);
if(roi.width != patch_width || roi.height != patch_height)
{
best_corr_idx[i] = part_idx;
best_corr[i] = prob;
}
else
{
cvSetImageROI(test_image, roi);
IplImage* roi_image =
cvCreateImage(cvSize(roi.width, roi.height),
test_image->depth, test_image->nChannels);
cvCopy(test_image,roi_image);
detector.getSignature(roi_image, signature);
for (int j = 0; j< detector.original_num_classes();j++)
{
if (prob < signature[j])
{
part_idx = j;
prob = signature[j];
}
}
best_corr_idx[i] = part_idx;
best_corr[i] = prob;
if (roi_image)
cvReleaseImage(&roi_image);
}
cvResetImageROI(test_image);
}
..

View File

@@ -737,9 +737,11 @@ protected:
PixelTestFn test_fn_;
};
/****************************************************************************************\
* Distance *
* Distance *
\****************************************************************************************/
template<typename T>
struct CV_EXPORTS Accumulator
{
@@ -757,9 +759,10 @@ template<> struct Accumulator<short> { typedef float Type; };
template<class T>
struct CV_EXPORTS SL2
{
enum { normType = NORM_L2SQR };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const
{
return normL2Sqr<ValueType, ResultType>(a, b, size);
@@ -772,9 +775,10 @@ struct CV_EXPORTS SL2
template<class T>
struct CV_EXPORTS L2
{
enum { normType = NORM_L2 };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const
{
return (ResultType)sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));
@@ -787,9 +791,10 @@ struct CV_EXPORTS L2
template<class T>
struct CV_EXPORTS L1
{
enum { normType = NORM_L1 };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const
{
return normL1<ValueType, ResultType>(a, b, size);
@@ -802,9 +807,10 @@ struct CV_EXPORTS L1
*/
struct CV_EXPORTS Hamming
{
enum { normType = NORM_HAMMING };
typedef unsigned char ValueType;
typedef int ResultType;
/** this will count the bits in a ^ b
*/
ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const
@@ -817,6 +823,7 @@ typedef Hamming HammingLUT;
template<int cellsize> struct CV_EXPORTS HammingMultilevel
{
enum { normType = NORM_HAMMING + (cellsize>1) };
typedef unsigned char ValueType;
typedef int ResultType;
@@ -824,7 +831,7 @@ template<int cellsize> struct CV_EXPORTS HammingMultilevel
{
return normHamming(a, b, size, cellsize);
}
};
};
/****************************************************************************************\
* DMatch *

View File

@@ -96,10 +96,11 @@ void DescriptorExtractor::removeBorderKeypoints( vector<KeyPoint>& keypoints,
Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)
{
if( descriptorExtractorType.find("Opponent") == 0)
if( descriptorExtractorType.find("Opponent") == 0 )
{
size_t pos = string("Opponent").size();
return DescriptorExtractor::create(descriptorExtractorType.substr(pos));
string type = descriptorExtractorType.substr(pos);
return new OpponentColorDescriptorExtractor(DescriptorExtractor::create(type));
}
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
@@ -231,9 +232,9 @@ void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, vector<
cp[1] < channelKeypoints[1].size() &&
cp[2] < channelKeypoints[2].size() )
{
const int maxInitIdx = std::max( channelKeypoints[0][idxs[0][cp[0]]].class_id,
std::max( channelKeypoints[1][idxs[1][cp[1]]].class_id,
channelKeypoints[2][idxs[2][cp[2]]].class_id ) );
const int maxInitIdx = std::max( 0, std::max( channelKeypoints[0][idxs[0][cp[0]]].class_id,
std::max( channelKeypoints[1][idxs[1][cp[1]]].class_id,
channelKeypoints[2][idxs[2][cp[2]]].class_id ) ) );
while( channelKeypoints[0][idxs[0][cp[0]]].class_id < maxInitIdx && cp[0] < channelKeypoints[0].size() ) { cp[0]++; }
while( channelKeypoints[1][idxs[1][cp[1]]].class_id < maxInitIdx && cp[1] < channelKeypoints[1].size() ) { cp[1]++; }

View File

@@ -144,40 +144,6 @@ void GFTTDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, co
}
}
static Algorithm* createGFTT() { return new GFTTDetector; }
static Algorithm* createHarris()
{
GFTTDetector* d = new GFTTDetector;
d->set("useHarris", true);
return d;
}
static AlgorithmInfo gftt_info("Feature2D.GFTT", createGFTT);
static AlgorithmInfo harris_info("Feature2D.HARRIS", createHarris);
AlgorithmInfo* GFTTDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
GFTTDetector obj;
gftt_info.addParam(obj, "nfeatures", obj.nfeatures);
gftt_info.addParam(obj, "qualityLevel", obj.qualityLevel);
gftt_info.addParam(obj, "minDistance", obj.minDistance);
gftt_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
gftt_info.addParam(obj, "k", obj.k);
harris_info.addParam(obj, "nfeatures", obj.nfeatures);
harris_info.addParam(obj, "qualityLevel", obj.qualityLevel);
harris_info.addParam(obj, "minDistance", obj.minDistance);
harris_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
harris_info.addParam(obj, "k", obj.k);
initialized = true;
}
return &gftt_info;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
@@ -215,29 +181,6 @@ void DenseFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypo
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
static Algorithm* createDense() { return new DenseFeatureDetector; }
static AlgorithmInfo dense_info("Feature2D.Dense", createDense);
AlgorithmInfo* DenseFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
DenseFeatureDetector obj;
dense_info.addParam(obj, "initFeatureScale", obj.initFeatureScale);
dense_info.addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
dense_info.addParam(obj, "featureScaleMul", obj.featureScaleMul);
dense_info.addParam(obj, "initXyStep", obj.initXyStep);
dense_info.addParam(obj, "initImgBound", obj.initImgBound);
dense_info.addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
dense_info.addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale);
initialized = true;
}
return &dense_info;
}
/*
* GridAdaptedFeatureDetector
@@ -359,161 +302,6 @@ void PyramidAdaptedFeatureDetector::detectImpl( const Mat& image, vector<KeyPoin
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
/* NOTE!!!
All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d().
Otherwise, linker may throw away some seemingly unused stuff.
*/
static Algorithm* createBRIEF() { return new BriefDescriptorExtractor; }
static AlgorithmInfo& brief_info()
{
static AlgorithmInfo brief_info_var("Feature2D.BRIEF", createBRIEF);
return brief_info_var;
}
static AlgorithmInfo& brief_info_auto = brief_info();
AlgorithmInfo* BriefDescriptorExtractor::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BriefDescriptorExtractor brief;
brief_info().addParam(brief, "bytes", brief.bytes_);
initialized = true;
}
return &brief_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createFAST() { return new FastFeatureDetector; }
static AlgorithmInfo& fast_info()
{
static AlgorithmInfo fast_info_var("Feature2D.FAST", createFAST);
return fast_info_var;
}
static AlgorithmInfo& fast_info_auto = fast_info();
AlgorithmInfo* FastFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
FastFeatureDetector obj;
fast_info().addParam(obj, "threshold", obj.threshold);
fast_info().addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
initialized = true;
}
return &fast_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createStarDetector() { return new StarDetector; }
static AlgorithmInfo& star_info()
{
static AlgorithmInfo star_info_var("Feature2D.STAR", createStarDetector);
return star_info_var;
}
static AlgorithmInfo& star_info_auto = star_info();
AlgorithmInfo* StarDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
StarDetector obj;
star_info().addParam(obj, "maxSize", obj.maxSize);
star_info().addParam(obj, "responseThreshold", obj.responseThreshold);
star_info().addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
star_info().addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
star_info().addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize);
initialized = true;
}
return &star_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMSER() { return new MSER; }
static AlgorithmInfo& mser_info()
{
static AlgorithmInfo mser_info_var("Feature2D.MSER", createMSER);
return mser_info_var;
}
static AlgorithmInfo& mser_info_auto = mser_info();
AlgorithmInfo* MSER::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
MSER obj;
mser_info().addParam(obj, "delta", obj.delta);
mser_info().addParam(obj, "minArea", obj.minArea);
mser_info().addParam(obj, "maxArea", obj.maxArea);
mser_info().addParam(obj, "maxVariation", obj.maxVariation);
mser_info().addParam(obj, "minDiversity", obj.minDiversity);
mser_info().addParam(obj, "maxEvolution", obj.maxEvolution);
mser_info().addParam(obj, "areaThreshold", obj.areaThreshold);
mser_info().addParam(obj, "minMargin", obj.minMargin);
mser_info().addParam(obj, "edgeBlurSize", obj.edgeBlurSize);
initialized = true;
}
return &mser_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createORB() { return new ORB; }
static AlgorithmInfo& orb_info()
{
static AlgorithmInfo orb_info_var("Feature2D.ORB", createORB);
return orb_info_var;
}
static AlgorithmInfo& orb_info_auto = orb_info();
AlgorithmInfo* ORB::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
ORB obj;
orb_info().addParam(obj, "nFeatures", obj.nfeatures);
orb_info().addParam(obj, "scaleFactor", obj.scaleFactor);
orb_info().addParam(obj, "nLevels", obj.nlevels);
orb_info().addParam(obj, "firstLevel", obj.firstLevel);
orb_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
orb_info().addParam(obj, "patchSize", obj.patchSize);
orb_info().addParam(obj, "WTA_K", obj.WTA_K);
orb_info().addParam(obj, "scoreType", obj.scoreType);
initialized = true;
}
return &orb_info();
}
bool initModule_features2d(void)
{
Ptr<Algorithm> brief = createBRIEF(), orb = createORB(),
star = createStarDetector(), fastd = createFAST(), mser = createMSER();
return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
fastd->info() != 0 && mser->info() != 0;
}
}

View File

@@ -0,0 +1,263 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
/* NOTE!!!
All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d().
Otherwise, linker may throw away some seemingly unused stuff.
*/
static Algorithm* createBRIEF() { return new BriefDescriptorExtractor; }
static AlgorithmInfo& brief_info()
{
static AlgorithmInfo brief_info_var("Feature2D.BRIEF", createBRIEF);
return brief_info_var;
}
static AlgorithmInfo& brief_info_auto = brief_info();
AlgorithmInfo* BriefDescriptorExtractor::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BriefDescriptorExtractor brief;
brief_info().addParam(brief, "bytes", brief.bytes_);
initialized = true;
}
return &brief_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createFAST() { return new FastFeatureDetector; }
static AlgorithmInfo& fast_info()
{
static AlgorithmInfo fast_info_var("Feature2D.FAST", createFAST);
return fast_info_var;
}
static AlgorithmInfo& fast_info_auto = fast_info();
AlgorithmInfo* FastFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
FastFeatureDetector obj;
fast_info().addParam(obj, "threshold", obj.threshold);
fast_info().addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
initialized = true;
}
return &fast_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createStarDetector() { return new StarDetector; }
static AlgorithmInfo& star_info()
{
static AlgorithmInfo star_info_var("Feature2D.STAR", createStarDetector);
return star_info_var;
}
static AlgorithmInfo& star_info_auto = star_info();
AlgorithmInfo* StarDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
StarDetector obj;
star_info().addParam(obj, "maxSize", obj.maxSize);
star_info().addParam(obj, "responseThreshold", obj.responseThreshold);
star_info().addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
star_info().addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
star_info().addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize);
initialized = true;
}
return &star_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMSER() { return new MSER; }
static AlgorithmInfo& mser_info()
{
static AlgorithmInfo mser_info_var("Feature2D.MSER", createMSER);
return mser_info_var;
}
static AlgorithmInfo& mser_info_auto = mser_info();
AlgorithmInfo* MSER::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
MSER obj;
mser_info().addParam(obj, "delta", obj.delta);
mser_info().addParam(obj, "minArea", obj.minArea);
mser_info().addParam(obj, "maxArea", obj.maxArea);
mser_info().addParam(obj, "maxVariation", obj.maxVariation);
mser_info().addParam(obj, "minDiversity", obj.minDiversity);
mser_info().addParam(obj, "maxEvolution", obj.maxEvolution);
mser_info().addParam(obj, "areaThreshold", obj.areaThreshold);
mser_info().addParam(obj, "minMargin", obj.minMargin);
mser_info().addParam(obj, "edgeBlurSize", obj.edgeBlurSize);
initialized = true;
}
return &mser_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createORB() { return new ORB; }
static AlgorithmInfo& orb_info()
{
static AlgorithmInfo orb_info_var("Feature2D.ORB", createORB);
return orb_info_var;
}
static AlgorithmInfo& orb_info_auto = orb_info();
AlgorithmInfo* ORB::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
ORB obj;
orb_info().addParam(obj, "nFeatures", obj.nfeatures);
orb_info().addParam(obj, "scaleFactor", obj.scaleFactor);
orb_info().addParam(obj, "nLevels", obj.nlevels);
orb_info().addParam(obj, "firstLevel", obj.firstLevel);
orb_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
orb_info().addParam(obj, "patchSize", obj.patchSize);
orb_info().addParam(obj, "WTA_K", obj.WTA_K);
orb_info().addParam(obj, "scoreType", obj.scoreType);
initialized = true;
}
return &orb_info();
}
static Algorithm* createGFTT() { return new GFTTDetector; }
static Algorithm* createHarris()
{
GFTTDetector* d = new GFTTDetector;
d->set("useHarris", true);
return d;
}
static AlgorithmInfo gftt_info("Feature2D.GFTT", createGFTT);
static AlgorithmInfo harris_info("Feature2D.HARRIS", createHarris);
AlgorithmInfo* GFTTDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
GFTTDetector obj;
gftt_info.addParam(obj, "nfeatures", obj.nfeatures);
gftt_info.addParam(obj, "qualityLevel", obj.qualityLevel);
gftt_info.addParam(obj, "minDistance", obj.minDistance);
gftt_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
gftt_info.addParam(obj, "k", obj.k);
harris_info.addParam(obj, "nfeatures", obj.nfeatures);
harris_info.addParam(obj, "qualityLevel", obj.qualityLevel);
harris_info.addParam(obj, "minDistance", obj.minDistance);
harris_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
harris_info.addParam(obj, "k", obj.k);
initialized = true;
}
return &gftt_info;
}
static Algorithm* createDense() { return new DenseFeatureDetector; }
static AlgorithmInfo dense_info("Feature2D.Dense", createDense);
AlgorithmInfo* DenseFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
DenseFeatureDetector obj;
dense_info.addParam(obj, "initFeatureScale", obj.initFeatureScale);
dense_info.addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
dense_info.addParam(obj, "featureScaleMul", obj.featureScaleMul);
dense_info.addParam(obj, "initXyStep", obj.initXyStep);
dense_info.addParam(obj, "initImgBound", obj.initImgBound);
dense_info.addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
dense_info.addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale);
initialized = true;
}
return &dense_info;
}
bool initModule_features2d(void)
{
Ptr<Algorithm> brief = createBRIEF(), orb = createORB(),
star = createStarDetector(), fastd = createFAST(), mser = createMSER(),
dense = createDense(), gftt = createGFTT(), harris = createHarris();
return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
fastd->info() != 0 && mser->info() != 0 && dense->info() != 0 &&
gftt->info() != 0 && harris->info() != 0;
}
}

View File

@@ -1,122 +0,0 @@
#include "test_precomp.hpp"
#if 0
using namespace cv;
class BruteForceMatcherTest : public cvtest::BaseTest
{
public:
BruteForceMatcherTest();
protected:
void run( int );
};
struct CV_EXPORTS L2Fake : public L2<float>
{
};
BruteForceMatcherTest::BruteForceMatcherTest() : cvtest::BaseTest( "BruteForceMatcher", "BruteForceMatcher::matchImpl")
{
support_testing_modes = cvtest::TS::TIMING_MODE;
}
void BruteForceMatcherTest::run( int )
{
const int dimensions = 64;
const int descriptorsNumber = 5000;
Mat train = Mat( descriptorsNumber, dimensions, CV_32FC1);
Mat query = Mat( descriptorsNumber, dimensions, CV_32FC1);
Mat permutation( 1, descriptorsNumber, CV_32SC1 );
for( int i=0;i<descriptorsNumber;i++ )
permutation.at<int>( 0, i ) = i;
//RNG rng = RNG( cvGetTickCount() );
RNG rng = RNG( *ts->get_rng() );
randShuffle( permutation, 1, &rng );
float boundary = 500.f;
for( int row=0;row<descriptorsNumber;row++ )
{
for( int col=0;col<dimensions;col++ )
{
int bit = rng( 2 );
train.at<float>( permutation.at<int>( 0, row ), col ) = bit*boundary + rng.uniform( 0.f, boundary );
query.at<float>( row, col ) = bit*boundary + rng.uniform( 0.f, boundary );
}
}
vector<DMatch> specMatches, genericMatches;
BruteForceMatcher<L2<float> > specMatcher;
BruteForceMatcher<L2Fake > genericMatcher;
int64 time0 = cvGetTickCount();
specMatcher.match( query, train, specMatches );
int64 time1 = cvGetTickCount();
genericMatcher.match( query, train, genericMatches );
int64 time2 = cvGetTickCount();
float specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time s: %f, us per pair: %f\n",
specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
float genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time s: %f, us per pair: %f\n",
genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
if( (int)specMatches.size() != descriptorsNumber || (int)genericMatches.size() != descriptorsNumber )
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
for( int i=0;i<descriptorsNumber;i++ )
{
float epsilon = 0.01f;
bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
specMatches[i].queryIdx == genericMatches[i].queryIdx &&
specMatches[i].trainIdx == genericMatches[i].trainIdx;
if( !isEquiv || specMatches[i].trainIdx != permutation.at<int>( 0, i ) )
{
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
break;
}
}
//Test mask
Mat mask( query.rows, train.rows, CV_8UC1 );
rng.fill( mask, RNG::UNIFORM, 0, 2 );
time0 = cvGetTickCount();
specMatcher.match( query, train, specMatches, mask );
time1 = cvGetTickCount();
genericMatcher.match( query, train, genericMatches, mask );
time2 = cvGetTickCount();
specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time with mask s: %f, us per pair: %f\n",
specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time with mask s: %f, us per pair: %f\n",
genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
if( specMatches.size() != genericMatches.size() )
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
for( size_t i=0;i<specMatches.size();i++ )
{
//float epsilon = 1e-2;
float epsilon = 10000000;
bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
specMatches[i].queryIdx == genericMatches[i].queryIdx &&
specMatches[i].trainIdx == genericMatches[i].trainIdx;
if( !isEquiv )
{
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
break;
}
}
}
BruteForceMatcherTest taBruteForceMatcherTest;
#endif

View File

@@ -1059,13 +1059,6 @@ TEST( Features2d_DescriptorExtractor_BRIEF, regression )
test.safe_run();
}
/*TEST( Features2d_DescriptorExtractor_OpponentSIFT, regression )
{
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-sift", 0.18f,
DescriptorExtractor::create("OpponentSIFT"), 8.06652f );
test.safe_run();
}*/
#if CV_SSE2
TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
{