merged 2.4 into trunk
This commit is contained in:
@@ -0,0 +1,33 @@
|
||||
Common Interfaces of Descriptor Extractors
|
||||
==========================================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to easily switch
|
||||
between different algorithms solving the same problem. This section is devoted to computing descriptors
|
||||
represented as vectors in a multidimensional space. All objects that implement the ``vector``
|
||||
descriptor extractors inherit the
|
||||
:ocv:class:`DescriptorExtractor` interface.
|
||||
|
||||
|
||||
|
||||
CalonderDescriptorExtractor
|
||||
---------------------------
|
||||
.. ocv:class:: CalonderDescriptorExtractor
|
||||
|
||||
Wrapping class for computing descriptors by using the
|
||||
:ocv:class:`RTreeClassifier` class. ::
|
||||
|
||||
template<typename T>
|
||||
class CalonderDescriptorExtractor : public DescriptorExtractor
|
||||
{
|
||||
public:
|
||||
CalonderDescriptorExtractor( const string& classifierFile );
|
||||
|
||||
virtual void read( const FileNode &fn );
|
||||
virtual void write( FileStorage &fs ) const;
|
||||
virtual int descriptorSize() const;
|
||||
virtual int descriptorType() const;
|
||||
protected:
|
||||
...
|
||||
}
|
||||
@@ -0,0 +1,118 @@
|
||||
Common Interfaces of Generic Descriptor Matchers
|
||||
================================================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
OneWayDescriptorMatcher
|
||||
-----------------------
|
||||
.. ocv:class:: OneWayDescriptorMatcher
|
||||
|
||||
Wrapping class for computing, matching, and classifying descriptors using the
|
||||
:ocv:class:`OneWayDescriptorBase` class. ::
|
||||
|
||||
class OneWayDescriptorMatcher : public GenericDescriptorMatcher
|
||||
{
|
||||
public:
|
||||
class Params
|
||||
{
|
||||
public:
|
||||
static const int POSE_COUNT = 500;
|
||||
static const int PATCH_WIDTH = 24;
|
||||
static const int PATCH_HEIGHT = 24;
|
||||
static float GET_MIN_SCALE() { return 0.7f; }
|
||||
static float GET_MAX_SCALE() { return 1.5f; }
|
||||
static float GET_STEP_SCALE() { return 1.2f; }
|
||||
|
||||
Params( int poseCount = POSE_COUNT,
|
||||
Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
|
||||
string pcaFilename = string(),
|
||||
string trainPath = string(), string trainImagesList = string(),
|
||||
float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
|
||||
float stepScale = GET_STEP_SCALE() );
|
||||
|
||||
int poseCount;
|
||||
Size patchSize;
|
||||
string pcaFilename;
|
||||
string trainPath;
|
||||
string trainImagesList;
|
||||
|
||||
float minScale, maxScale, stepScale;
|
||||
};
|
||||
|
||||
OneWayDescriptorMatcher( const Params& params=Params() );
|
||||
virtual ~OneWayDescriptorMatcher();
|
||||
|
||||
void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
|
||||
|
||||
// Clears keypoints stored in collection and OneWayDescriptorBase
|
||||
virtual void clear();
|
||||
|
||||
virtual void train();
|
||||
|
||||
virtual bool isMaskSupported();
|
||||
|
||||
virtual void read( const FileNode &fn );
|
||||
virtual void write( FileStorage& fs ) const;
|
||||
|
||||
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
|
||||
protected:
|
||||
...
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
FernDescriptorMatcher
|
||||
---------------------
|
||||
.. ocv:class:: FernDescriptorMatcher
|
||||
|
||||
Wrapping class for computing, matching, and classifying descriptors using the
|
||||
:ocv:class:`FernClassifier` class. ::
|
||||
|
||||
class FernDescriptorMatcher : public GenericDescriptorMatcher
|
||||
{
|
||||
public:
|
||||
class Params
|
||||
{
|
||||
public:
|
||||
Params( int nclasses=0,
|
||||
int patchSize=FernClassifier::PATCH_SIZE,
|
||||
int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
|
||||
int nstructs=FernClassifier::DEFAULT_STRUCTS,
|
||||
int structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
|
||||
int nviews=FernClassifier::DEFAULT_VIEWS,
|
||||
int compressionMethod=FernClassifier::COMPRESSION_NONE,
|
||||
const PatchGenerator& patchGenerator=PatchGenerator() );
|
||||
|
||||
Params( const string& filename );
|
||||
|
||||
int nclasses;
|
||||
int patchSize;
|
||||
int signatureSize;
|
||||
int nstructs;
|
||||
int structSize;
|
||||
int nviews;
|
||||
int compressionMethod;
|
||||
PatchGenerator patchGenerator;
|
||||
|
||||
string filename;
|
||||
};
|
||||
|
||||
FernDescriptorMatcher( const Params& params=Params() );
|
||||
virtual ~FernDescriptorMatcher();
|
||||
|
||||
virtual void clear();
|
||||
|
||||
virtual void train();
|
||||
|
||||
virtual bool isMaskSupported();
|
||||
|
||||
virtual void read( const FileNode &fn );
|
||||
virtual void write( FileStorage& fs ) const;
|
||||
|
||||
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
|
||||
|
||||
protected:
|
||||
...
|
||||
};
|
||||
|
||||
204
modules/legacy/doc/expectation_maximization.rst
Normal file
204
modules/legacy/doc/expectation_maximization.rst
Normal file
@@ -0,0 +1,204 @@
|
||||
Expectation Maximization
|
||||
========================
|
||||
|
||||
This section describes obsolete ``C`` interface of EM algorithm. Details of the algorithm and its ``C++`` interface can be found in the other section :ref:`ML_Expectation Maximization`.
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
|
||||
CvEMParams
|
||||
----------
|
||||
.. ocv:class:: CvEMParams
|
||||
|
||||
Parameters of the EM algorithm. All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
|
||||
|
||||
CvEMParams::CvEMParams
|
||||
----------------------
|
||||
The constructors
|
||||
|
||||
.. ocv:function:: CvEMParams::CvEMParams()
|
||||
|
||||
.. ocv:function:: CvEMParams::CvEMParams( int nclusters, int cov_mat_type=CvEM::COV_MAT_DIAGONAL, int start_step=CvEM::START_AUTO_STEP, CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 )
|
||||
|
||||
:param nclusters: The number of mixture components in the Gaussian mixture model. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
|
||||
|
||||
:param cov_mat_type: Constraint on covariance matrices which defines type of matrices. Possible values are:
|
||||
|
||||
* **CvEM::COV_MAT_SPHERICAL** A scaled identity matrix :math:`\mu_k * I`. There is the only parameter :math:`\mu_k` to be estimated for each matrix. The option may be used in special cases, when the constraint is relevant, or as a first step in the optimization (for example in case when the data is preprocessed with PCA). The results of such preliminary estimation may be passed again to the optimization procedure, this time with ``cov_mat_type=CvEM::COV_MAT_DIAGONAL``.
|
||||
|
||||
* **CvEM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of free parameters is ``d`` for each matrix. This is most commonly used option yielding good estimation results.
|
||||
|
||||
* **CvEM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free parameters in each matrix is about :math:`d^2/2`. It is not recommended to use this option, unless there is pretty accurate initial estimation of the parameters and/or a huge number of training samples.
|
||||
|
||||
:param start_step: The start step of the EM algorithm:
|
||||
|
||||
* **CvEM::START_E_STEP** Start with Expectation step. You need to provide means :math:`a_k` of mixture components to use this option. Optionally you can pass weights :math:`\pi_k` and covariance matrices :math:`S_k` of mixture components.
|
||||
* **CvEM::START_M_STEP** Start with Maximization step. You need to provide initial probabilities :math:`p_{i,k}` to use this option.
|
||||
* **CvEM::START_AUTO_STEP** Start with Expectation step. You need not provide any parameters because they will be estimated by the kmeans algorithm.
|
||||
|
||||
:param term_crit: The termination criteria of the EM algorithm. The EM algorithm can be terminated by the number of iterations ``term_crit.max_iter`` (number of M-steps) or when relative change of likelihood logarithm is less than ``term_crit.epsilon``.
|
||||
|
||||
:param probs: Initial probabilities :math:`p_{i,k}` of sample :math:`i` to belong to mixture component :math:`k`. It is a floating-point matrix of :math:`nsamples \times nclusters` size. It is used and must be not NULL only when ``start_step=CvEM::START_M_STEP``.
|
||||
|
||||
:param weights: Initial weights :math:`\pi_k` of mixture components. It is a floating-point vector with :math:`nclusters` elements. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
|
||||
|
||||
:param means: Initial means :math:`a_k` of mixture components. It is a floating-point matrix of :math:`nclusters \times dims` size. It is used used and must be not NULL only when ``start_step=CvEM::START_E_STEP``.
|
||||
|
||||
:param covs: Initial covariance matrices :math:`S_k` of mixture components. Each of covariance matrices is a valid square floating-point matrix of :math:`dims \times dims` size. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
|
||||
|
||||
The default constructor represents a rough rule-of-the-thumb:
|
||||
|
||||
::
|
||||
|
||||
CvEMParams() : nclusters(10), cov_mat_type(1/*CvEM::COV_MAT_DIAGONAL*/),
|
||||
start_step(0/*CvEM::START_AUTO_STEP*/), probs(0), weights(0), means(0), covs(0)
|
||||
{
|
||||
term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON );
|
||||
}
|
||||
|
||||
|
||||
With another constructor it is possible to override a variety of parameters from a single number of mixtures (the only essential problem-dependent parameter) to initial values for the mixture parameters.
|
||||
|
||||
|
||||
CvEM
|
||||
----
|
||||
.. ocv:class:: CvEM
|
||||
|
||||
The class implements the EM algorithm as described in the beginning of the section :ref:`ML_Expectation Maximization`.
|
||||
|
||||
|
||||
CvEM::train
|
||||
-----------
|
||||
Estimates the Gaussian mixture parameters from a sample set.
|
||||
|
||||
.. ocv:function:: void CvEM::train( const Mat& samples, const Mat& sample_idx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 )
|
||||
|
||||
.. ocv:function:: bool CvEM::train( const CvMat* samples, const CvMat* sampleIdx=0, CvEMParams params=CvEMParams(), CvMat* labels=0 )
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.train(samples[, sampleIdx[, params]]) -> retval, labels
|
||||
|
||||
:param samples: Samples from which the Gaussian mixture model will be estimated.
|
||||
|
||||
:param sample_idx: Mask of samples to use. All samples are used by default.
|
||||
|
||||
:param params: Parameters of the EM algorithm.
|
||||
|
||||
:param labels: The optional output "class label" for each sample: :math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample).
|
||||
|
||||
Unlike many of the ML models, EM is an unsupervised learning algorithm and it does not take responses (class labels or function values) as input. Instead, it computes the
|
||||
*Maximum Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the parameters inside the structure:
|
||||
:math:`p_{i,k}` in ``probs``,
|
||||
:math:`a_k` in ``means`` ,
|
||||
:math:`S_k` in ``covs[k]``,
|
||||
:math:`\pi_k` in ``weights`` , and optionally computes the output "class label" for each sample:
|
||||
:math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample).
|
||||
|
||||
The trained model can be used further for prediction, just like any other classifier. The trained model is similar to the
|
||||
:ocv:class:`CvNormalBayesClassifier`.
|
||||
|
||||
For an example of clustering random samples of the multi-Gaussian distribution using EM, see ``em.cpp`` sample in the OpenCV distribution.
|
||||
|
||||
|
||||
CvEM::predict
|
||||
-------------
|
||||
Returns a mixture component index of a sample.
|
||||
|
||||
.. ocv:function:: float CvEM::predict( const Mat& sample, Mat* probs=0 ) const
|
||||
|
||||
.. ocv:function:: float CvEM::predict( const CvMat* sample, CvMat* probs ) const
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.predict(sample) -> retval, probs
|
||||
|
||||
:param sample: A sample for classification.
|
||||
|
||||
:param probs: If it is not null then the method will write posterior probabilities of each component given the sample data to this parameter.
|
||||
|
||||
|
||||
CvEM::getNClusters
|
||||
------------------
|
||||
Returns the number of mixture components :math:`M` in the Gaussian mixture model.
|
||||
|
||||
.. ocv:function:: int CvEM::getNClusters() const
|
||||
|
||||
.. ocv:function:: int CvEM::get_nclusters() const
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.getNClusters() -> retval
|
||||
|
||||
|
||||
CvEM::getMeans
|
||||
------------------
|
||||
Returns mixture means :math:`a_k`.
|
||||
|
||||
.. ocv:function:: Mat CvEM::getMeans() const
|
||||
|
||||
.. ocv:function:: const CvMat* CvEM::get_means() const
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.getMeans() -> means
|
||||
|
||||
|
||||
CvEM::getCovs
|
||||
-------------
|
||||
Returns mixture covariance matrices :math:`S_k`.
|
||||
|
||||
.. ocv:function:: void CvEM::getCovs(std::vector<cv::Mat>& covs) const
|
||||
|
||||
.. ocv:function:: const CvMat** CvEM::get_covs() const
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.getCovs([covs]) -> covs
|
||||
|
||||
|
||||
CvEM::getWeights
|
||||
----------------
|
||||
Returns mixture weights :math:`\pi_k`.
|
||||
|
||||
.. ocv:function:: Mat CvEM::getWeights() const
|
||||
|
||||
.. ocv:function:: const CvMat* CvEM::get_weights() const
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.getWeights() -> weights
|
||||
|
||||
|
||||
CvEM::getProbs
|
||||
--------------
|
||||
Returns vectors of probabilities for each training sample.
|
||||
|
||||
.. ocv:function:: Mat CvEM::getProbs() const
|
||||
|
||||
.. ocv:function:: const CvMat* CvEM::get_probs() const
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.getProbs() -> probs
|
||||
|
||||
For each training sample :math:`i` (that have been passed to the constructor or to :ocv:func:`CvEM::train`) returns probabilities :math:`p_{i,k}` to belong to a mixture component :math:`k`.
|
||||
|
||||
|
||||
CvEM::getLikelihood
|
||||
-------------------
|
||||
Returns logarithm of likelihood.
|
||||
|
||||
.. ocv:function:: double CvEM::getLikelihood() const
|
||||
|
||||
.. ocv:function:: double CvEM::get_log_likelihood() const
|
||||
|
||||
.. ocv:pyfunction:: cv2.EM.getLikelihood() -> likelihood
|
||||
|
||||
|
||||
CvEM::write
|
||||
-----------
|
||||
Writes the trained Gaussian mixture model to the file storage.
|
||||
|
||||
.. ocv:function:: void CvEM::write( CvFileStorage* fs, const char* name ) const
|
||||
|
||||
:param fs: A file storage where the model will be written.
|
||||
:param name: A name of the file node where the model data will be written.
|
||||
|
||||
|
||||
CvEM::read
|
||||
-----------------
|
||||
Reads the trained Gaussian mixture model from the file storage.
|
||||
|
||||
.. ocv:function:: void CvEM::read( CvFileStorage* fs, CvFileNode* node )
|
||||
|
||||
:param fs: A file storage with the trained model.
|
||||
|
||||
:param node: The parent map. If it is NULL, the function searches a node with parameters in all the top-level nodes (streams), starting with the first one.
|
||||
|
||||
433
modules/legacy/doc/feature_detection_and_description.rst
Normal file
433
modules/legacy/doc/feature_detection_and_description.rst
Normal file
@@ -0,0 +1,433 @@
|
||||
Feature Detection and Description
|
||||
=================================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
RandomizedTree
|
||||
--------------
|
||||
.. ocv:class:: RandomizedTree
|
||||
|
||||
Class containing a base structure for ``RTreeClassifier``. ::
|
||||
|
||||
class CV_EXPORTS RandomizedTree
|
||||
{
|
||||
public:
|
||||
friend class RTreeClassifier;
|
||||
|
||||
RandomizedTree();
|
||||
~RandomizedTree();
|
||||
|
||||
void train(std::vector<BaseKeypoint> const& base_set,
|
||||
RNG &rng, int depth, int views,
|
||||
size_t reduced_num_dim, int num_quant_bits);
|
||||
void train(std::vector<BaseKeypoint> const& base_set,
|
||||
RNG &rng, PatchGenerator &make_patch, int depth,
|
||||
int views, size_t reduced_num_dim, int num_quant_bits);
|
||||
|
||||
// next two functions are EXPERIMENTAL
|
||||
//(do not use unless you know exactly what you do)
|
||||
static void quantizeVector(float *vec, int dim, int N, float bnds[2],
|
||||
int clamp_mode=0);
|
||||
static void quantizeVector(float *src, int dim, int N, float bnds[2],
|
||||
uchar *dst);
|
||||
|
||||
// patch_data must be a 32x32 array (no row padding)
|
||||
float* getPosterior(uchar* patch_data);
|
||||
const float* getPosterior(uchar* patch_data) const;
|
||||
uchar* getPosterior2(uchar* patch_data);
|
||||
|
||||
void read(const char* file_name, int num_quant_bits);
|
||||
void read(std::istream &is, int num_quant_bits);
|
||||
void write(const char* file_name) const;
|
||||
void write(std::ostream &os) const;
|
||||
|
||||
int classes() { return classes_; }
|
||||
int depth() { return depth_; }
|
||||
|
||||
void discardFloatPosteriors() { freePosteriors(1); }
|
||||
|
||||
inline void applyQuantization(int num_quant_bits)
|
||||
{ makePosteriors2(num_quant_bits); }
|
||||
|
||||
private:
|
||||
int classes_;
|
||||
int depth_;
|
||||
int num_leaves_;
|
||||
std::vector<RTreeNode> nodes_;
|
||||
float **posteriors_; // 16-byte aligned posteriors
|
||||
uchar **posteriors2_; // 16-byte aligned posteriors
|
||||
std::vector<int> leaf_counts_;
|
||||
|
||||
void createNodes(int num_nodes, RNG &rng);
|
||||
void allocPosteriorsAligned(int num_leaves, int num_classes);
|
||||
void freePosteriors(int which);
|
||||
// which: 1=posteriors_, 2=posteriors2_, 3=both
|
||||
void init(int classes, int depth, RNG &rng);
|
||||
void addExample(int class_id, uchar* patch_data);
|
||||
void finalize(size_t reduced_num_dim, int num_quant_bits);
|
||||
int getIndex(uchar* patch_data) const;
|
||||
inline float* getPosteriorByIndex(int index);
|
||||
inline uchar* getPosteriorByIndex2(int index);
|
||||
inline const float* getPosteriorByIndex(int index) const;
|
||||
void convertPosteriorsToChar();
|
||||
void makePosteriors2(int num_quant_bits);
|
||||
void compressLeaves(size_t reduced_num_dim);
|
||||
void estimateQuantPercForPosteriors(float perc[2]);
|
||||
};
|
||||
|
||||
|
||||
|
||||
RandomizedTree::train
|
||||
-------------------------
|
||||
Trains a randomized tree using an input set of keypoints.
|
||||
|
||||
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
|
||||
|
||||
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
|
||||
|
||||
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
|
||||
|
||||
:param rng: Random-number generator used for training.
|
||||
|
||||
:param make_patch: Patch generator used for training.
|
||||
|
||||
:param depth: Maximum tree depth.
|
||||
|
||||
:param views: Number of random views of each keypoint neighborhood to generate.
|
||||
|
||||
:param reduced_num_dim: Number of dimensions used in the compressed signature.
|
||||
|
||||
:param num_quant_bits: Number of bits used for quantization.
|
||||
|
||||
|
||||
|
||||
RandomizedTree::read
|
||||
------------------------
|
||||
Reads a pre-saved randomized tree from a file or stream.
|
||||
|
||||
.. ocv:function:: read(const char* file_name, int num_quant_bits)
|
||||
|
||||
.. ocv:function:: read(std::istream &is, int num_quant_bits)
|
||||
|
||||
:param file_name: Name of the file that contains randomized tree data.
|
||||
|
||||
:param is: Input stream associated with the file that contains randomized tree data.
|
||||
|
||||
:param num_quant_bits: Number of bits used for quantization.
|
||||
|
||||
|
||||
|
||||
RandomizedTree::write
|
||||
-------------------------
|
||||
Writes the current randomized tree to a file or stream.
|
||||
|
||||
.. ocv:function:: void write(const char* file_name) const
|
||||
|
||||
.. ocv:function:: void write(std::ostream &os) const
|
||||
|
||||
:param file_name: Name of the file where randomized tree data is stored.
|
||||
|
||||
:param os: Output stream associated with the file where randomized tree data is stored.
|
||||
|
||||
|
||||
|
||||
RandomizedTree::applyQuantization
|
||||
-------------------------------------
|
||||
.. ocv:function:: void applyQuantization(int num_quant_bits)
|
||||
|
||||
Applies quantization to the current randomized tree.
|
||||
|
||||
:param num_quant_bits: Number of bits used for quantization.
|
||||
|
||||
|
||||
RTreeNode
|
||||
---------
|
||||
.. ocv:class:: RTreeNode
|
||||
|
||||
Class containing a base structure for ``RandomizedTree``. ::
|
||||
|
||||
struct RTreeNode
|
||||
{
|
||||
short offset1, offset2;
|
||||
|
||||
RTreeNode() {}
|
||||
|
||||
RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
|
||||
: offset1(y1*PATCH_SIZE + x1),
|
||||
offset2(y2*PATCH_SIZE + x2)
|
||||
{}
|
||||
|
||||
//! Left child on 0, right child on 1
|
||||
inline bool operator() (uchar* patch_data) const
|
||||
{
|
||||
return patch_data[offset1] > patch_data[offset2];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
RTreeClassifier
|
||||
---------------
|
||||
.. ocv:class:: RTreeClassifier
|
||||
|
||||
Class containing ``RTreeClassifier``. It represents the Calonder descriptor originally introduced by Michael Calonder. ::
|
||||
|
||||
class CV_EXPORTS RTreeClassifier
|
||||
{
|
||||
public:
|
||||
static const int DEFAULT_TREES = 48;
|
||||
static const size_t DEFAULT_NUM_QUANT_BITS = 4;
|
||||
|
||||
RTreeClassifier();
|
||||
|
||||
void train(std::vector<BaseKeypoint> const& base_set,
|
||||
RNG &rng,
|
||||
int num_trees = RTreeClassifier::DEFAULT_TREES,
|
||||
int depth = DEFAULT_DEPTH,
|
||||
int views = DEFAULT_VIEWS,
|
||||
size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
|
||||
int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
|
||||
bool print_status = true);
|
||||
void train(std::vector<BaseKeypoint> const& base_set,
|
||||
RNG &rng,
|
||||
PatchGenerator &make_patch,
|
||||
int num_trees = RTreeClassifier::DEFAULT_TREES,
|
||||
int depth = DEFAULT_DEPTH,
|
||||
int views = DEFAULT_VIEWS,
|
||||
size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
|
||||
int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
|
||||
bool print_status = true);
|
||||
|
||||
// sig must point to a memory block of at least
|
||||
//classes()*sizeof(float|uchar) bytes
|
||||
void getSignature(IplImage *patch, uchar *sig);
|
||||
void getSignature(IplImage *patch, float *sig);
|
||||
void getSparseSignature(IplImage *patch, float *sig,
|
||||
float thresh);
|
||||
|
||||
static int countNonZeroElements(float *vec, int n, double tol=1e-10);
|
||||
static inline void safeSignatureAlloc(uchar **sig, int num_sig=1,
|
||||
int sig_len=176);
|
||||
static inline uchar* safeSignatureAlloc(int num_sig=1,
|
||||
int sig_len=176);
|
||||
|
||||
inline int classes() { return classes_; }
|
||||
inline int original_num_classes()
|
||||
{ return original_num_classes_; }
|
||||
|
||||
void setQuantization(int num_quant_bits);
|
||||
void discardFloatPosteriors();
|
||||
|
||||
void read(const char* file_name);
|
||||
void read(std::istream &is);
|
||||
void write(const char* file_name) const;
|
||||
void write(std::ostream &os) const;
|
||||
|
||||
std::vector<RandomizedTree> trees_;
|
||||
|
||||
private:
|
||||
int classes_;
|
||||
int num_quant_bits_;
|
||||
uchar **posteriors_;
|
||||
ushort *ptemp_;
|
||||
int original_num_classes_;
|
||||
bool keep_floats_;
|
||||
};
|
||||
|
||||
|
||||
|
||||
RTreeClassifier::train
|
||||
--------------------------
|
||||
Trains a randomized tree classifier using an input set of keypoints.
|
||||
|
||||
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
|
||||
|
||||
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
|
||||
|
||||
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
|
||||
|
||||
:param rng: Random-number generator used for training.
|
||||
|
||||
:param make_patch: Patch generator used for training.
|
||||
|
||||
:param num_trees: Number of randomized trees used in ``RTreeClassificator`` .
|
||||
|
||||
:param depth: Maximum tree depth.
|
||||
|
||||
:param views: Number of random views of each keypoint neighborhood to generate.
|
||||
|
||||
:param reduced_num_dim: Number of dimensions used in the compressed signature.
|
||||
|
||||
:param num_quant_bits: Number of bits used for quantization.
|
||||
|
||||
:param print_status: Current status of training printed on the console.
|
||||
|
||||
|
||||
|
||||
RTreeClassifier::getSignature
|
||||
---------------------------------
|
||||
Returns a signature for an image patch.
|
||||
|
||||
.. ocv:function:: void getSignature(IplImage *patch, uchar *sig)
|
||||
|
||||
.. ocv:function:: void getSignature(IplImage *patch, float *sig)
|
||||
|
||||
:param patch: Image patch to calculate the signature for.
|
||||
:param sig: Output signature (array dimension is ``reduced_num_dim)`` .
|
||||
|
||||
|
||||
|
||||
RTreeClassifier::getSparseSignature
|
||||
---------------------------------------
|
||||
Returns a sparse signature for an image patch
|
||||
|
||||
.. ocv:function:: void getSparseSignature(IplImage *patch, float *sig, float thresh)
|
||||
|
||||
:param patch: Image patch to calculate the signature for.
|
||||
|
||||
:param sig: Output signature (array dimension is ``reduced_num_dim)`` .
|
||||
|
||||
:param thresh: Threshold used for compressing the signature.
|
||||
|
||||
Returns a signature for an image patch similarly to ``getSignature`` but uses a threshold for removing all signature elements below the threshold so that the signature is compressed.
|
||||
|
||||
|
||||
RTreeClassifier::countNonZeroElements
|
||||
-----------------------------------------
|
||||
Returns the number of non-zero elements in an input array.
|
||||
|
||||
.. ocv:function:: static int countNonZeroElements(float *vec, int n, double tol=1e-10)
|
||||
|
||||
:param vec: Input vector containing float elements.
|
||||
|
||||
:param n: Input vector size.
|
||||
|
||||
:param tol: Threshold used for counting elements. All elements less than ``tol`` are considered as zero elements.
|
||||
|
||||
|
||||
|
||||
RTreeClassifier::read
|
||||
-------------------------
|
||||
Reads a pre-saved ``RTreeClassifier`` from a file or stream.
|
||||
|
||||
.. ocv:function:: read(const char* file_name)
|
||||
|
||||
.. ocv:function:: read(std::istream& is)
|
||||
|
||||
:param file_name: Name of the file that contains randomized tree data.
|
||||
|
||||
:param is: Input stream associated with the file that contains randomized tree data.
|
||||
|
||||
|
||||
|
||||
RTreeClassifier::write
|
||||
--------------------------
|
||||
Writes the current ``RTreeClassifier`` to a file or stream.
|
||||
|
||||
.. ocv:function:: void write(const char* file_name) const
|
||||
|
||||
.. ocv:function:: void write(std::ostream &os) const
|
||||
|
||||
:param file_name: Name of the file where randomized tree data is stored.
|
||||
|
||||
:param os: Output stream associated with the file where randomized tree data is stored.
|
||||
|
||||
|
||||
|
||||
RTreeClassifier::setQuantization
|
||||
------------------------------------
|
||||
Applies quantization to the current randomized tree.
|
||||
|
||||
.. ocv:function:: void setQuantization(int num_quant_bits)
|
||||
|
||||
:param num_quant_bits: Number of bits used for quantization.
|
||||
|
||||
The example below demonstrates the usage of ``RTreeClassifier`` for matching the features. The features are extracted from the test and train images with SURF. Output is
|
||||
:math:`best\_corr` and
|
||||
:math:`best\_corr\_idx` arrays that keep the best probabilities and corresponding features indices for every train feature. ::
|
||||
|
||||
CvMemStorage* storage = cvCreateMemStorage(0);
|
||||
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
|
||||
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
|
||||
CvSURFParams params = cvSURFParams(500, 1);
|
||||
cvExtractSURF( test_image, 0, &imageKeypoints, &imageDescriptors,
|
||||
storage, params );
|
||||
cvExtractSURF( train_image, 0, &objectKeypoints, &objectDescriptors,
|
||||
storage, params );
|
||||
|
||||
RTreeClassifier detector;
|
||||
int patch_width = PATCH_SIZE;
|
||||
iint patch_height = PATCH_SIZE;
|
||||
vector<BaseKeypoint> base_set;
|
||||
int i=0;
|
||||
CvSURFPoint* point;
|
||||
for (i=0;i<(n_points > 0 ? n_points : objectKeypoints->total);i++)
|
||||
{
|
||||
point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,i);
|
||||
base_set.push_back(
|
||||
BaseKeypoint(point->pt.x,point->pt.y,train_image));
|
||||
}
|
||||
|
||||
//Detector training
|
||||
RNG rng( cvGetTickCount() );
|
||||
PatchGenerator gen(0,255,2,false,0.7,1.3,-CV_PI/3,CV_PI/3,
|
||||
-CV_PI/3,CV_PI/3);
|
||||
|
||||
printf("RTree Classifier training...n");
|
||||
detector.train(base_set,rng,gen,24,DEFAULT_DEPTH,2000,
|
||||
(int)base_set.size(), detector.DEFAULT_NUM_QUANT_BITS);
|
||||
printf("Donen");
|
||||
|
||||
float* signature = new float[detector.original_num_classes()];
|
||||
float* best_corr;
|
||||
int* best_corr_idx;
|
||||
if (imageKeypoints->total > 0)
|
||||
{
|
||||
best_corr = new float[imageKeypoints->total];
|
||||
best_corr_idx = new int[imageKeypoints->total];
|
||||
}
|
||||
|
||||
for(i=0; i < imageKeypoints->total; i++)
|
||||
{
|
||||
point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,i);
|
||||
int part_idx = -1;
|
||||
float prob = 0.0f;
|
||||
|
||||
CvRect roi = cvRect((int)(point->pt.x) - patch_width/2,
|
||||
(int)(point->pt.y) - patch_height/2,
|
||||
patch_width, patch_height);
|
||||
cvSetImageROI(test_image, roi);
|
||||
roi = cvGetImageROI(test_image);
|
||||
if(roi.width != patch_width || roi.height != patch_height)
|
||||
{
|
||||
best_corr_idx[i] = part_idx;
|
||||
best_corr[i] = prob;
|
||||
}
|
||||
else
|
||||
{
|
||||
cvSetImageROI(test_image, roi);
|
||||
IplImage* roi_image =
|
||||
cvCreateImage(cvSize(roi.width, roi.height),
|
||||
test_image->depth, test_image->nChannels);
|
||||
cvCopy(test_image,roi_image);
|
||||
|
||||
detector.getSignature(roi_image, signature);
|
||||
for (int j = 0; j< detector.original_num_classes();j++)
|
||||
{
|
||||
if (prob < signature[j])
|
||||
{
|
||||
part_idx = j;
|
||||
prob = signature[j];
|
||||
}
|
||||
}
|
||||
|
||||
best_corr_idx[i] = part_idx;
|
||||
best_corr[i] = prob;
|
||||
|
||||
if (roi_image)
|
||||
cvReleaseImage(&roi_image);
|
||||
}
|
||||
cvResetImageROI(test_image);
|
||||
}
|
||||
|
||||
..
|
||||
@@ -8,3 +8,8 @@ legacy. Deprecated stuff
|
||||
:maxdepth: 2
|
||||
|
||||
motion_analysis
|
||||
expectation_maximization
|
||||
planar_subdivisions
|
||||
feature_detection_and_description
|
||||
common_interfaces_of_descriptor_extractors
|
||||
common_interfaces_of_generic_descriptor_matchers
|
||||
|
||||
BIN
modules/legacy/doc/pics/quadedge.png
Normal file
BIN
modules/legacy/doc/pics/quadedge.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.6 KiB |
BIN
modules/legacy/doc/pics/subdiv.png
Normal file
BIN
modules/legacy/doc/pics/subdiv.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.7 KiB |
325
modules/legacy/doc/planar_subdivisions.rst
Normal file
325
modules/legacy/doc/planar_subdivisions.rst
Normal file
@@ -0,0 +1,325 @@
|
||||
Planar Subdivisions (C API)
|
||||
============================
|
||||
|
||||
.. highlight:: c
|
||||
|
||||
CvSubdiv2D
|
||||
----------
|
||||
|
||||
.. ocv:struct:: CvSubdiv2D
|
||||
|
||||
Planar subdivision.
|
||||
|
||||
::
|
||||
|
||||
#define CV_SUBDIV2D_FIELDS() \
|
||||
CV_GRAPH_FIELDS() \
|
||||
int quad_edges; \
|
||||
int is_geometry_valid; \
|
||||
CvSubdiv2DEdge recent_edge; \
|
||||
CvPoint2D32f topleft; \
|
||||
CvPoint2D32f bottomright;
|
||||
|
||||
typedef struct CvSubdiv2D
|
||||
{
|
||||
CV_SUBDIV2D_FIELDS()
|
||||
}
|
||||
CvSubdiv2D;
|
||||
|
||||
..
|
||||
|
||||
Planar subdivision is the subdivision of a plane into a set of
|
||||
non-overlapped regions (facets) that cover the whole plane. The above
|
||||
structure describes a subdivision built on a 2D point set, where the points
|
||||
are linked together and form a planar graph, which, together with a few
|
||||
edges connecting the exterior subdivision points (namely, convex hull points)
|
||||
with infinity, subdivides a plane into facets by its edges.
|
||||
|
||||
For every subdivision, there is a dual subdivision in which facets and
|
||||
points (subdivision vertices) swap their roles. This means that a facet is
|
||||
treated as a vertex (called a virtual point below) of the dual subdivision and
|
||||
the original subdivision vertices become facets. In the figure below, the
|
||||
original subdivision is marked with solid lines and dual subdivision -
|
||||
with dotted lines.
|
||||
|
||||
.. image:: pics/subdiv.png
|
||||
|
||||
OpenCV subdivides a plane into triangles using the Delaunay's
|
||||
algorithm. Subdivision is built iteratively starting from a dummy
|
||||
triangle that includes all the subdivision points for sure. In this
|
||||
case, the dual subdivision is a Voronoi diagram of the input 2D point set. The
|
||||
subdivisions can be used for the 3D piece-wise transformation of a plane,
|
||||
morphing, fast location of points on the plane, building special graphs
|
||||
(such as NNG,RNG), and so forth.
|
||||
|
||||
CvQuadEdge2D
|
||||
------------
|
||||
|
||||
.. ocv:struct:: CvQuadEdge2D
|
||||
|
||||
Quad-edge of a planar subdivision.
|
||||
|
||||
::
|
||||
|
||||
/* one of edges within quad-edge, lower 2 bits is index (0..3)
|
||||
and upper bits are quad-edge pointer */
|
||||
typedef long CvSubdiv2DEdge;
|
||||
|
||||
/* quad-edge structure fields */
|
||||
#define CV_QUADEDGE2D_FIELDS() \
|
||||
int flags; \
|
||||
struct CvSubdiv2DPoint* pt[4]; \
|
||||
CvSubdiv2DEdge next[4];
|
||||
|
||||
typedef struct CvQuadEdge2D
|
||||
{
|
||||
CV_QUADEDGE2D_FIELDS()
|
||||
}
|
||||
CvQuadEdge2D;
|
||||
|
||||
..
|
||||
|
||||
Quad-edge is a basic element of a subdivision containing four edges (e, eRot, reversed e, and reversed eRot):
|
||||
|
||||
.. image:: pics/quadedge.png
|
||||
|
||||
CvSubdiv2DPoint
|
||||
---------------
|
||||
|
||||
.. ocv:struct:: CvSubdiv2DPoint
|
||||
|
||||
Point of an original or dual subdivision.
|
||||
|
||||
::
|
||||
|
||||
#define CV_SUBDIV2D_POINT_FIELDS()\
|
||||
int flags; \
|
||||
CvSubdiv2DEdge first; \
|
||||
CvPoint2D32f pt; \
|
||||
int id;
|
||||
|
||||
#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
|
||||
|
||||
typedef struct CvSubdiv2DPoint
|
||||
{
|
||||
CV_SUBDIV2D_POINT_FIELDS()
|
||||
}
|
||||
CvSubdiv2DPoint;
|
||||
|
||||
..
|
||||
|
||||
* id
|
||||
This integer can be used to index auxiliary data associated with each vertex of the planar subdivision.
|
||||
|
||||
CalcSubdivVoronoi2D
|
||||
-------------------
|
||||
Calculates the coordinates of the Voronoi diagram cells.
|
||||
|
||||
.. ocv:cfunction:: void cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv )
|
||||
.. ocv:pyoldfunction:: cv.CalcSubdivVoronoi2D(subdiv)-> None
|
||||
|
||||
:param subdiv: Delaunay subdivision, in which all the points are already added.
|
||||
|
||||
The function calculates the coordinates
|
||||
of virtual points. All virtual points corresponding to a vertex of the
|
||||
original subdivision form (when connected together) a boundary of the Voronoi
|
||||
cell at that point.
|
||||
|
||||
ClearSubdivVoronoi2D
|
||||
--------------------
|
||||
Removes all virtual points.
|
||||
|
||||
.. ocv:cfunction:: void cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv )
|
||||
.. ocv:pyoldfunction:: cv.ClearSubdivVoronoi2D(subdiv)-> None
|
||||
|
||||
:param subdiv: Delaunay subdivision.
|
||||
|
||||
The function removes all of the virtual points. It
|
||||
is called internally in
|
||||
:ocv:cfunc:`CalcSubdivVoronoi2D`
|
||||
if the subdivision
|
||||
was modified after the previous call to the function.
|
||||
|
||||
CreateSubdivDelaunay2D
|
||||
----------------------
|
||||
Creates an empty Delaunay triangulation.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
|
||||
.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage)-> emptyDelaunayTriangulation
|
||||
|
||||
:param rect: Rectangle that includes all of the 2D points that are to be added to the subdivision.
|
||||
|
||||
:param storage: Container for the subdivision.
|
||||
|
||||
The function creates an empty Delaunay
|
||||
subdivision where 2D points can be added using the function
|
||||
:ocv:cfunc:`SubdivDelaunay2DInsert`
|
||||
. All of the points to be added must be within
|
||||
the specified rectangle, otherwise a runtime error is raised.
|
||||
|
||||
Note that the triangulation is a single large triangle that covers the given rectangle. Hence the three vertices of this triangle are outside the rectangle
|
||||
``rect``
|
||||
.
|
||||
|
||||
FindNearestPoint2D
|
||||
------------------
|
||||
Finds the subdivision vertex closest to the given point.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2DPoint* cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt )
|
||||
.. ocv:pyoldfunction:: cv.FindNearestPoint2D(subdiv, pt)-> point
|
||||
|
||||
:param subdiv: Delaunay or another subdivision.
|
||||
|
||||
:param pt: Input point.
|
||||
|
||||
The function is another function that
|
||||
locates the input point within the subdivision. It finds the subdivision vertex that
|
||||
is the closest to the input point. It is not necessarily one of vertices
|
||||
of the facet containing the input point, though the facet (located using
|
||||
:ocv:cfunc:`Subdiv2DLocate`
|
||||
) is used as a starting
|
||||
point. The function returns a pointer to the found subdivision vertex.
|
||||
|
||||
Subdiv2DEdgeDst
|
||||
---------------
|
||||
Returns the edge destination.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge )
|
||||
.. ocv:pyoldfunction:: cv.Subdiv2DEdgeDst(edge)-> point
|
||||
|
||||
:param edge: Subdivision edge (not a quad-edge).
|
||||
|
||||
The function returns the edge destination. The
|
||||
returned pointer may be NULL if the edge is from a dual subdivision and
|
||||
the virtual point coordinates are not calculated yet. The virtual points
|
||||
can be calculated using the function
|
||||
:ocv:cfunc:`CalcSubdivVoronoi2D`.
|
||||
|
||||
Subdiv2DGetEdge
|
||||
---------------
|
||||
Returns one of the edges related to the given edge.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type )
|
||||
.. ocv:pyoldfunction:: cv.Subdiv2DGetEdge(edge, type)-> CvSubdiv2DEdge
|
||||
|
||||
:param edge: Subdivision edge (not a quad-edge).
|
||||
|
||||
:param type: Parameter specifying which of the related edges to return. The following values are possible:
|
||||
|
||||
* **CV_NEXT_AROUND_ORG** next around the edge origin ( ``eOnext`` on the picture below if ``e`` is the input edge)
|
||||
|
||||
* **CV_NEXT_AROUND_DST** next around the edge vertex ( ``eDnext`` )
|
||||
|
||||
* **CV_PREV_AROUND_ORG** previous around the edge origin (reversed ``eRnext`` )
|
||||
|
||||
* **CV_PREV_AROUND_DST** previous around the edge destination (reversed ``eLnext`` )
|
||||
|
||||
* **CV_NEXT_AROUND_LEFT** next around the left facet ( ``eLnext`` )
|
||||
|
||||
* **CV_NEXT_AROUND_RIGHT** next around the right facet ( ``eRnext`` )
|
||||
|
||||
* **CV_PREV_AROUND_LEFT** previous around the left facet (reversed ``eOnext`` )
|
||||
|
||||
* **CV_PREV_AROUND_RIGHT** previous around the right facet (reversed ``eDnext`` )
|
||||
|
||||
.. image:: pics/quadedge.png
|
||||
|
||||
The function returns one of the edges related to the input edge.
|
||||
|
||||
Subdiv2DNextEdge
|
||||
----------------
|
||||
Returns next edge around the edge origin.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge )
|
||||
.. ocv:pyoldfunction:: cv.Subdiv2DNextEdge(edge)-> CvSubdiv2DEdge
|
||||
|
||||
:param edge: Subdivision edge (not a quad-edge).
|
||||
|
||||
The function returns the next edge around the edge origin:
|
||||
``eOnext``
|
||||
on the picture above if
|
||||
``e``
|
||||
is the input edge).
|
||||
|
||||
Subdiv2DLocate
|
||||
--------------
|
||||
Returns the location of a point within a Delaunay triangulation.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2DPointLocation cvSubdiv2DLocate( CvSubdiv2D* subdiv, CvPoint2D32f pt, CvSubdiv2DEdge* edge, CvSubdiv2DPoint** vertex=NULL )
|
||||
.. ocv:pyoldfunction:: cv.Subdiv2DLocate(subdiv, pt) -> (loc, where)
|
||||
|
||||
:param subdiv: Delaunay or another subdivision.
|
||||
|
||||
:param pt: Point to locate.
|
||||
|
||||
:param edge: Output edge that the point belongs to or is located to the right of it.
|
||||
|
||||
:param vertex: Optional output vertex double pointer the input point coincides with.
|
||||
|
||||
The function locates the input point within the subdivision. There are five cases:
|
||||
|
||||
*
|
||||
The point falls into some facet. The function returns
|
||||
``CV_PTLOC_INSIDE``
|
||||
and
|
||||
``*edge``
|
||||
will contain one of edges of the facet.
|
||||
|
||||
*
|
||||
The point falls onto the edge. The function returns
|
||||
``CV_PTLOC_ON_EDGE``
|
||||
and
|
||||
``*edge``
|
||||
will contain this edge.
|
||||
|
||||
*
|
||||
The point coincides with one of the subdivision vertices. The function returns
|
||||
``CV_PTLOC_VERTEX``
|
||||
and
|
||||
``*vertex``
|
||||
will contain a pointer to the vertex.
|
||||
|
||||
*
|
||||
The point is outside the subdivision reference rectangle. The function returns
|
||||
``CV_PTLOC_OUTSIDE_RECT``
|
||||
and no pointers are filled.
|
||||
|
||||
*
|
||||
One of input arguments is invalid. A runtime error is raised or, if silent or "parent" error processing mode is selected,
|
||||
``CV_PTLOC_ERROR``
|
||||
is returnd.
|
||||
|
||||
Subdiv2DRotateEdge
|
||||
------------------
|
||||
Returns another edge of the same quad-edge.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate )
|
||||
.. ocv:pyoldfunction:: cv.Subdiv2DRotateEdge(edge, rotate)-> CvSubdiv2DEdge
|
||||
|
||||
:param edge: Subdivision edge (not a quad-edge).
|
||||
|
||||
:param rotate: Parameter specifying which of the edges of the same quad-edge as the input one to return. The following values are possible:
|
||||
|
||||
* **0** the input edge ( ``e`` on the picture below if ``e`` is the input edge)
|
||||
|
||||
* **1** the rotated edge ( ``eRot`` )
|
||||
|
||||
* **2** the reversed edge (reversed ``e`` (in green))
|
||||
|
||||
* **3** the reversed rotated edge (reversed ``eRot`` (in green))
|
||||
|
||||
The function returns one of the edges of the same quad-edge as the input edge.
|
||||
|
||||
SubdivDelaunay2DInsert
|
||||
----------------------
|
||||
Inserts a single point into a Delaunay triangulation.
|
||||
|
||||
.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt)
|
||||
.. ocv:pyoldfunction:: cv.SubdivDelaunay2DInsert(subdiv, pt)-> point
|
||||
|
||||
:param subdiv: Delaunay subdivision created by the function :ocv:cfunc:`CreateSubdivDelaunay2D`.
|
||||
|
||||
:param pt: Inserted point.
|
||||
|
||||
The function inserts a single point into a subdivision and modifies the subdivision topology appropriately. If a point with the same coordinates exists already, no new point is added. The function returns a pointer to the allocated point. No virtual point coordinates are calculated at this stage.
|
||||
|
||||
@@ -1826,7 +1826,7 @@ public:
|
||||
CV_WRAP cv::Mat getWeights() const;
|
||||
CV_WRAP cv::Mat getProbs() const;
|
||||
|
||||
CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? likelihood : DBL_MAX; }
|
||||
CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? logLikelihood : DBL_MAX; }
|
||||
#endif
|
||||
|
||||
CV_WRAP virtual void clear();
|
||||
@@ -1847,7 +1847,7 @@ protected:
|
||||
|
||||
cv::EM emObj;
|
||||
cv::Mat probs;
|
||||
double likelihood;
|
||||
double logLikelihood;
|
||||
|
||||
CvMat meansHdr;
|
||||
std::vector<CvMat> covsHdrs;
|
||||
@@ -2840,6 +2840,18 @@ bool CalonderDescriptorExtractor<T>::empty() const
|
||||
return classifier_.trees_.empty();
|
||||
}
|
||||
|
||||
|
||||
////////////////////// Brute Force Matcher //////////////////////////
|
||||
|
||||
template<class Distance>
|
||||
class CV_EXPORTS BruteForceMatcher : public BFMatcher
|
||||
{
|
||||
public:
|
||||
BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {}
|
||||
virtual ~BruteForceMatcher() {}
|
||||
};
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Planar Object Detection *
|
||||
\****************************************************************************************/
|
||||
@@ -3365,6 +3377,7 @@ typedef struct CvGaussBGModel
|
||||
CvGaussBGStatModelParams params;
|
||||
CvGaussBGPoint* g_point;
|
||||
int countFrames;
|
||||
void* mog;
|
||||
} CvGaussBGModel;
|
||||
|
||||
|
||||
|
||||
@@ -50,10 +50,9 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model )
|
||||
|
||||
if( *bg_model )
|
||||
{
|
||||
delete (cv::Mat*)((*bg_model)->g_point);
|
||||
delete (cv::BackgroundSubtractorMOG*)((*bg_model)->mog);
|
||||
cvReleaseImage( &(*bg_model)->background );
|
||||
cvReleaseImage( &(*bg_model)->foreground );
|
||||
cvReleaseMemStorage(&(*bg_model)->storage);
|
||||
memset( *bg_model, 0, sizeof(**bg_model) );
|
||||
delete *bg_model;
|
||||
*bg_model = 0;
|
||||
@@ -64,70 +63,15 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model )
|
||||
static int CV_CDECL
|
||||
icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel* bg_model, double learningRate )
|
||||
{
|
||||
int region_count = 0;
|
||||
|
||||
cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground);
|
||||
|
||||
cv::BackgroundSubtractorMOG mog;
|
||||
mog.bgmodel = *(cv::Mat*)bg_model->g_point;
|
||||
mog.frameSize = mog.bgmodel.data ? cv::Size(cvGetSize(curr_frame)) : cv::Size();
|
||||
mog.frameType = image.type();
|
||||
cv::BackgroundSubtractorMOG* mog = (cv::BackgroundSubtractorMOG*)(bg_model->mog);
|
||||
CV_Assert(mog != 0);
|
||||
|
||||
mog.nframes = bg_model->countFrames;
|
||||
mog.history = bg_model->params.win_size;
|
||||
mog.nmixtures = bg_model->params.n_gauss;
|
||||
mog.varThreshold = bg_model->params.std_threshold*bg_model->params.std_threshold;
|
||||
mog.backgroundRatio = bg_model->params.bg_threshold;
|
||||
(*mog)(image, mask, learningRate);
|
||||
bg_model->countFrames++;
|
||||
|
||||
mog(image, mask, learningRate);
|
||||
|
||||
bg_model->countFrames = mog.nframes;
|
||||
if( ((cv::Mat*)bg_model->g_point)->data != mog.bgmodel.data )
|
||||
*((cv::Mat*)bg_model->g_point) = mog.bgmodel;
|
||||
|
||||
//foreground filtering
|
||||
|
||||
//filter small regions
|
||||
cvClearMemStorage(bg_model->storage);
|
||||
|
||||
//cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_OPEN, 1 );
|
||||
//cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_CLOSE, 1 );
|
||||
|
||||
#if 0
|
||||
CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
|
||||
cvFindContours( bg_model->foreground, bg_model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
|
||||
for( seq = first_seq; seq; seq = seq->h_next )
|
||||
{
|
||||
CvContour* cnt = (CvContour*)seq;
|
||||
if( cnt->rect.width * cnt->rect.height < bg_model->params.minArea )
|
||||
{
|
||||
//delete small contour
|
||||
prev_seq = seq->h_prev;
|
||||
if( prev_seq )
|
||||
{
|
||||
prev_seq->h_next = seq->h_next;
|
||||
if( seq->h_next ) seq->h_next->h_prev = prev_seq;
|
||||
}
|
||||
else
|
||||
{
|
||||
first_seq = seq->h_next;
|
||||
if( seq->h_next ) seq->h_next->h_prev = NULL;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
region_count++;
|
||||
}
|
||||
}
|
||||
bg_model->foreground_regions = first_seq;
|
||||
cvZero(bg_model->foreground);
|
||||
cvDrawContours(bg_model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
|
||||
#endif
|
||||
|
||||
CvMat _mask = mask;
|
||||
cvCopy(&_mask, bg_model->foreground);
|
||||
|
||||
return region_count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
CV_IMPL CvBGStatModel*
|
||||
@@ -161,15 +105,17 @@ cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parame
|
||||
|
||||
bg_model->params = params;
|
||||
|
||||
//prepare storages
|
||||
bg_model->g_point = (CvGaussBGPoint*)new cv::Mat();
|
||||
cv::BackgroundSubtractorMOG* mog =
|
||||
new cv::BackgroundSubtractorMOG(params.win_size,
|
||||
params.n_gauss,
|
||||
params.bg_threshold,
|
||||
params.variance_init);
|
||||
|
||||
bg_model->background = cvCreateImage(cvSize(first_frame->width,
|
||||
first_frame->height), IPL_DEPTH_8U, first_frame->nChannels);
|
||||
bg_model->foreground = cvCreateImage(cvSize(first_frame->width,
|
||||
first_frame->height), IPL_DEPTH_8U, 1);
|
||||
bg_model->mog = mog;
|
||||
|
||||
bg_model->storage = cvCreateMemStorage();
|
||||
CvSize sz = cvGetSize(first_frame);
|
||||
bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels);
|
||||
bg_model->foreground = cvCreateImage(sz, IPL_DEPTH_8U, 1);
|
||||
|
||||
bg_model->countFrames = 0;
|
||||
|
||||
|
||||
@@ -1,67 +1,67 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright( C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
//(including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort(including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even ifadvised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright( C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
//(including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort(including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even ifadvised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
CvEMParams::CvEMParams() : nclusters(10), cov_mat_type(CvEM::COV_MAT_DIAGONAL),
|
||||
start_step(CvEM::START_AUTO_STEP), probs(0), weights(0), means(0), covs(0)
|
||||
start_step(CvEM::START_AUTO_STEP), probs(0), weights(0), means(0), covs(0)
|
||||
{
|
||||
term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON );
|
||||
}
|
||||
|
||||
CvEMParams::CvEMParams( int _nclusters, int _cov_mat_type, int _start_step,
|
||||
CvTermCriteria _term_crit, const CvMat* _probs,
|
||||
const CvMat* _weights, const CvMat* _means, const CvMat** _covs ) :
|
||||
nclusters(_nclusters), cov_mat_type(_cov_mat_type), start_step(_start_step),
|
||||
probs(_probs), weights(_weights), means(_means), covs(_covs), term_crit(_term_crit)
|
||||
CvTermCriteria _term_crit, const CvMat* _probs,
|
||||
const CvMat* _weights, const CvMat* _means, const CvMat** _covs ) :
|
||||
nclusters(_nclusters), cov_mat_type(_cov_mat_type), start_step(_start_step),
|
||||
probs(_probs), weights(_weights), means(_means), covs(_covs), term_crit(_term_crit)
|
||||
{}
|
||||
|
||||
CvEM::CvEM() : likelihood(DBL_MAX)
|
||||
CvEM::CvEM() : logLikelihood(DBL_MAX)
|
||||
{
|
||||
}
|
||||
|
||||
CvEM::CvEM( const CvMat* samples, const CvMat* sample_idx,
|
||||
CvEMParams params, CvMat* labels ) : likelihood(DBL_MAX)
|
||||
CvEMParams params, CvMat* labels ) : logLikelihood(DBL_MAX)
|
||||
{
|
||||
train(samples, sample_idx, params, labels);
|
||||
}
|
||||
@@ -96,16 +96,14 @@ void CvEM::write( CvFileStorage* _fs, const char* name ) const
|
||||
|
||||
double CvEM::calcLikelihood( const Mat &input_sample ) const
|
||||
{
|
||||
double likelihood;
|
||||
emObj.predict(input_sample, noArray(), &likelihood);
|
||||
return likelihood;
|
||||
return emObj.predict(input_sample)[0];
|
||||
}
|
||||
|
||||
float
|
||||
CvEM::predict( const CvMat* _sample, CvMat* _probs ) const
|
||||
{
|
||||
Mat prbs0 = cvarrToMat(_probs), prbs = prbs0, sample = cvarrToMat(_sample);
|
||||
int cls = emObj.predict(sample, _probs ? _OutputArray(prbs) : cv::noArray());
|
||||
int cls = static_cast<int>(emObj.predict(sample, _probs ? _OutputArray(prbs) : cv::noArray())[1]);
|
||||
if(_probs)
|
||||
{
|
||||
if( prbs.data != prbs0.data )
|
||||
@@ -144,7 +142,7 @@ void init_params(const CvEMParams& src,
|
||||
prbs = src.probs;
|
||||
weights = src.weights;
|
||||
means = src.means;
|
||||
|
||||
|
||||
if(src.covs)
|
||||
{
|
||||
covsHdrs.resize(src.nclusters);
|
||||
@@ -154,7 +152,7 @@ void init_params(const CvEMParams& src,
|
||||
}
|
||||
|
||||
bool CvEM::train( const CvMat* _samples, const CvMat* _sample_idx,
|
||||
CvEMParams _params, CvMat* _labels )
|
||||
CvEMParams _params, CvMat* _labels )
|
||||
{
|
||||
CV_Assert(_sample_idx == 0);
|
||||
Mat samples = cvarrToMat(_samples), labels0, labels;
|
||||
@@ -163,7 +161,7 @@ bool CvEM::train( const CvMat* _samples, const CvMat* _sample_idx,
|
||||
|
||||
bool isOk = train(samples, Mat(), _params, _labels ? &labels : 0);
|
||||
CV_Assert( labels0.data == labels.data );
|
||||
|
||||
|
||||
return isOk;
|
||||
}
|
||||
|
||||
@@ -203,40 +201,37 @@ bool CvEM::train( const Mat& _samples, const Mat& _sample_idx,
|
||||
CvEMParams _params, Mat* _labels )
|
||||
{
|
||||
CV_Assert(_sample_idx.empty());
|
||||
Mat prbs, weights, means, likelihoods;
|
||||
Mat prbs, weights, means, logLikelihoods;
|
||||
std::vector<Mat> covsHdrs;
|
||||
init_params(_params, prbs, weights, means, covsHdrs);
|
||||
|
||||
|
||||
emObj = EM(_params.nclusters, _params.cov_mat_type, _params.term_crit);
|
||||
bool isOk = false;
|
||||
if( _params.start_step == EM::START_AUTO_STEP )
|
||||
isOk = emObj.train(_samples, _labels ? _OutputArray(*_labels) : cv::noArray(),
|
||||
probs, likelihoods);
|
||||
isOk = emObj.train(_samples,
|
||||
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
|
||||
else if( _params.start_step == EM::START_E_STEP )
|
||||
isOk = emObj.trainE(_samples, means, covsHdrs, weights,
|
||||
_labels ? _OutputArray(*_labels) : cv::noArray(),
|
||||
probs, likelihoods);
|
||||
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
|
||||
else if( _params.start_step == EM::START_M_STEP )
|
||||
isOk = emObj.trainM(_samples, prbs,
|
||||
_labels ? _OutputArray(*_labels) : cv::noArray(),
|
||||
probs, likelihoods);
|
||||
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
|
||||
else
|
||||
CV_Error(CV_StsBadArg, "Bad start type of EM algorithm");
|
||||
|
||||
if(isOk)
|
||||
{
|
||||
likelihoods = sum(likelihoods).val[0];
|
||||
logLikelihood = sum(logLikelihoods).val[0];
|
||||
set_mat_hdrs();
|
||||
}
|
||||
|
||||
|
||||
return isOk;
|
||||
}
|
||||
|
||||
float
|
||||
CvEM::predict( const Mat& _sample, Mat* _probs ) const
|
||||
{
|
||||
int cls = emObj.predict(_sample, _probs ? _OutputArray(*_probs) : cv::noArray());
|
||||
return (float)cls;
|
||||
return static_cast<float>(emObj.predict(_sample, _probs ? _OutputArray(*_probs) : cv::noArray())[1]);
|
||||
}
|
||||
|
||||
int CvEM::getNClusters() const
|
||||
|
||||
@@ -95,7 +95,7 @@ cvExtractSURF( const CvArr* _img, const CvArr* _mask,
|
||||
{
|
||||
if( _keypoints )
|
||||
{
|
||||
CvSURFPoint pt = cvSURFPoint(kpt[i].pt, kpt[i].class_id, cvRound(kpt[i].size));
|
||||
CvSURFPoint pt = cvSURFPoint(kpt[i].pt, kpt[i].class_id, cvRound(kpt[i].size), kpt[i].angle, kpt[i].response);
|
||||
cvSeqPush(*_keypoints, &pt);
|
||||
}
|
||||
if( _descriptors )
|
||||
|
||||
115
modules/legacy/test/test_bruteforcematcher.cpp
Normal file
115
modules/legacy/test/test_bruteforcematcher.cpp
Normal file
@@ -0,0 +1,115 @@
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
struct CV_EXPORTS L2Fake : public L2<float>
|
||||
{
|
||||
enum { normType = NORM_L2 };
|
||||
};
|
||||
|
||||
class CV_BruteForceMatcherTest : public cvtest::BaseTest
|
||||
{
|
||||
public:
|
||||
CV_BruteForceMatcherTest() {}
|
||||
protected:
|
||||
void run( int )
|
||||
{
|
||||
const int dimensions = 64;
|
||||
const int descriptorsNumber = 5000;
|
||||
|
||||
Mat train = Mat( descriptorsNumber, dimensions, CV_32FC1);
|
||||
Mat query = Mat( descriptorsNumber, dimensions, CV_32FC1);
|
||||
|
||||
Mat permutation( 1, descriptorsNumber, CV_32SC1 );
|
||||
for( int i=0;i<descriptorsNumber;i++ )
|
||||
permutation.at<int>( 0, i ) = i;
|
||||
|
||||
//RNG rng = RNG( cvGetTickCount() );
|
||||
RNG rng;
|
||||
randShuffle( permutation, 1, &rng );
|
||||
|
||||
float boundary = 500.f;
|
||||
for( int row=0;row<descriptorsNumber;row++ )
|
||||
{
|
||||
for( int col=0;col<dimensions;col++ )
|
||||
{
|
||||
int bit = rng( 2 );
|
||||
train.at<float>( permutation.at<int>( 0, row ), col ) = bit*boundary + rng.uniform( 0.f, boundary );
|
||||
query.at<float>( row, col ) = bit*boundary + rng.uniform( 0.f, boundary );
|
||||
}
|
||||
}
|
||||
|
||||
vector<DMatch> specMatches, genericMatches;
|
||||
BruteForceMatcher<L2<float> > specMatcher;
|
||||
BruteForceMatcher<L2Fake > genericMatcher;
|
||||
|
||||
int64 time0 = cvGetTickCount();
|
||||
specMatcher.match( query, train, specMatches );
|
||||
int64 time1 = cvGetTickCount();
|
||||
genericMatcher.match( query, train, genericMatches );
|
||||
int64 time2 = cvGetTickCount();
|
||||
|
||||
float specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
|
||||
ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time s: %f, us per pair: %f\n",
|
||||
specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
|
||||
|
||||
float genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
|
||||
ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time s: %f, us per pair: %f\n",
|
||||
genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
|
||||
|
||||
if( (int)specMatches.size() != descriptorsNumber || (int)genericMatches.size() != descriptorsNumber )
|
||||
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
|
||||
for( int i=0;i<descriptorsNumber;i++ )
|
||||
{
|
||||
float epsilon = 0.01f;
|
||||
bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
|
||||
specMatches[i].queryIdx == genericMatches[i].queryIdx &&
|
||||
specMatches[i].trainIdx == genericMatches[i].trainIdx;
|
||||
if( !isEquiv || specMatches[i].trainIdx != permutation.at<int>( 0, i ) )
|
||||
{
|
||||
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Test mask
|
||||
Mat mask( query.rows, train.rows, CV_8UC1 );
|
||||
rng.fill( mask, RNG::UNIFORM, 0, 2 );
|
||||
|
||||
|
||||
time0 = cvGetTickCount();
|
||||
specMatcher.match( query, train, specMatches, mask );
|
||||
time1 = cvGetTickCount();
|
||||
genericMatcher.match( query, train, genericMatches, mask );
|
||||
time2 = cvGetTickCount();
|
||||
|
||||
specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
|
||||
ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time with mask s: %f, us per pair: %f\n",
|
||||
specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
|
||||
|
||||
genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
|
||||
ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time with mask s: %f, us per pair: %f\n",
|
||||
genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
|
||||
|
||||
if( specMatches.size() != genericMatches.size() )
|
||||
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
|
||||
|
||||
for( size_t i=0;i<specMatches.size();i++ )
|
||||
{
|
||||
//float epsilon = 1e-2;
|
||||
float epsilon = 10000000;
|
||||
bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
|
||||
specMatches[i].queryIdx == genericMatches[i].queryIdx &&
|
||||
specMatches[i].trainIdx == genericMatches[i].trainIdx;
|
||||
if( !isEquiv )
|
||||
{
|
||||
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Legacy_BruteForceMatcher, accuracy) { CV_BruteForceMatcherTest test; test.safe_run(); }
|
||||
|
||||
@@ -259,6 +259,6 @@ int CV_KDTreeTest_C::checkFindBoxed()
|
||||
}
|
||||
|
||||
|
||||
TEST(Features2d_LSH, regression) { CV_LSHTest test; test.safe_run(); }
|
||||
TEST(Features2d_SpillTree, regression) { CV_SpillTreeTest_C test; test.safe_run(); }
|
||||
TEST(Features2d_KDTree_C, regression) { CV_KDTreeTest_C test; test.safe_run(); }
|
||||
TEST(Legacy_LSH, regression) { CV_LSHTest test; test.safe_run(); }
|
||||
TEST(Legacy_SpillTree, regression) { CV_SpillTreeTest_C test; test.safe_run(); }
|
||||
TEST(Legacy_KDTree_C, regression) { CV_KDTreeTest_C test; test.safe_run(); }
|
||||
|
||||
@@ -88,7 +88,7 @@ void calcOpticalFlowLK( const Mat& prev, const Mat& curr, Size winSize, Mat& flo
|
||||
|
||||
void calcOpticalFlowBM( const Mat& prev, const Mat& curr, Size bSize, Size shiftSize, Size maxRange, int usePrevious, Mat& flow )
|
||||
{
|
||||
Size sz((curr.cols - bSize.width)/shiftSize.width, (curr.rows - bSize.height)/shiftSize.height);
|
||||
Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
|
||||
Mat velx(sz, CV_32F), vely(sz, CV_32F);
|
||||
|
||||
CvMat cvvelx = velx; CvMat cvvely = vely;
|
||||
@@ -351,6 +351,6 @@ void CV_OptFlowTest::run( int /* start_from */)
|
||||
}
|
||||
|
||||
|
||||
TEST(Video_OpticalFlow, accuracy) { CV_OptFlowTest test; test.safe_run(); }
|
||||
TEST(Legacy_OpticalFlow, accuracy) { CV_OptFlowTest test; test.safe_run(); }
|
||||
|
||||
|
||||
|
||||
@@ -199,6 +199,6 @@ _exit_:
|
||||
ts->set_failed_test_info( code );
|
||||
}
|
||||
|
||||
TEST(Imgproc_PyrSegmentation, regression) { CV_PyrSegmentationTest test; test.safe_run(); }
|
||||
TEST(Legacy_PyrSegmentation, regression) { CV_PyrSegmentationTest test; test.safe_run(); }
|
||||
|
||||
/* End of file. */
|
||||
|
||||
@@ -719,4 +719,4 @@ protected:
|
||||
};
|
||||
|
||||
|
||||
TEST(Calib3d_StereoGC, regression) { CV_StereoGCTest test; test.safe_run(); }
|
||||
TEST(Legacy_StereoGC, regression) { CV_StereoGCTest test; test.safe_run(); }
|
||||
|
||||
@@ -335,7 +335,7 @@ _exit_:
|
||||
return code;
|
||||
}
|
||||
|
||||
TEST(Imgproc_Subdiv, correctness) { CV_SubdivTest test; test.safe_run(); }
|
||||
TEST(Legacy_Subdiv, correctness) { CV_SubdivTest test; test.safe_run(); }
|
||||
|
||||
/* End of file. */
|
||||
|
||||
|
||||
Reference in New Issue
Block a user