lot's of changes; nonfree & photo modules added; SIFT & SURF -> nonfree module; Inpainting -> photo; refactored features2d (ORB is still failing tests), optimized brute-force matcher and made it non-template.
This commit is contained in:
parent
6300215b94
commit
957e80abbd
@ -48,6 +48,7 @@
|
|||||||
#include "opencv2/flann/miniflann.hpp"
|
#include "opencv2/flann/miniflann.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
#include "opencv2/imgproc/imgproc.hpp"
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
#include "opencv2/photo/photo.hpp"
|
||||||
#include "opencv2/video/video.hpp"
|
#include "opencv2/video/video.hpp"
|
||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
#include "opencv2/objdetect/objdetect.hpp"
|
#include "opencv2/objdetect/objdetect.hpp"
|
||||||
|
5
modules/contrib/doc/contrib.rst
Normal file
5
modules/contrib/doc/contrib.rst
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
***************************************
|
||||||
|
contrib. Contributed/Experimental Stuff
|
||||||
|
***************************************
|
||||||
|
|
||||||
|
The module contains some recently added functionality that has not been stabilized, or functionality that is considered optional.
|
@ -145,9 +145,8 @@ public:
|
|||||||
class CV_EXPORTS CvFeatureTracker
|
class CV_EXPORTS CvFeatureTracker
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
FeatureDetector* detector;
|
Ptr<Feature2D> dd;
|
||||||
DescriptorExtractor* descriptor;
|
Ptr<DescriptorMatcher> matcher;
|
||||||
DescriptorMatcher* matcher;
|
|
||||||
vector<DMatch> matches;
|
vector<DMatch> matches;
|
||||||
|
|
||||||
Mat prev_image;
|
Mat prev_image;
|
||||||
|
@ -53,22 +53,24 @@ CvFeatureTracker::CvFeatureTracker(CvFeatureTrackerParams _params) :
|
|||||||
switch (params.feature_type)
|
switch (params.feature_type)
|
||||||
{
|
{
|
||||||
case CvFeatureTrackerParams::SIFT:
|
case CvFeatureTrackerParams::SIFT:
|
||||||
detector = new SiftFeatureDetector(
|
dd = Algorithm::create<Feature2D>("Feature2D.SIFT");
|
||||||
SIFT::DetectorParams::GET_DEFAULT_THRESHOLD(),
|
if( dd.empty() )
|
||||||
SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD() + 0.7,
|
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SIFT support");
|
||||||
SIFT::CommonParams::DEFAULT_NOCTAVES + 4,
|
dd->set("nOctaveLayers", 5);
|
||||||
SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS + 2,
|
dd->set("contrastThreshold", 0.04);
|
||||||
SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
|
dd->set("edgeThreshold", 10.7);
|
||||||
SIFT::CommonParams::FIRST_ANGLE);
|
|
||||||
case CvFeatureTrackerParams::SURF:
|
case CvFeatureTrackerParams::SURF:
|
||||||
detector = new SurfFeatureDetector(400, 3, 4);
|
dd = Algorithm::create<Feature2D>("Feature2D.SURF");
|
||||||
default:
|
if( dd.empty() )
|
||||||
detector = new GoodFeaturesToTrackDetector();
|
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SURF support");
|
||||||
|
dd->set("hessianThreshold", 400);
|
||||||
|
dd->set("nOctaves", 3);
|
||||||
|
dd->set("nOctaveLayers", 4);
|
||||||
|
default:
|
||||||
|
CV_Error(CV_StsBadArg, "Unknown feature type");
|
||||||
}
|
}
|
||||||
|
|
||||||
descriptor = new SurfDescriptorExtractor(3, 4, false);
|
matcher = new BFMatcher(NORM_L2);
|
||||||
|
|
||||||
matcher = new BruteForceMatcher<L2<float> > ();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CvFeatureTracker::~CvFeatureTracker()
|
CvFeatureTracker::~CvFeatureTracker()
|
||||||
@ -105,7 +107,7 @@ Rect CvFeatureTracker::updateTrackingWindowWithSIFT(Mat image)
|
|||||||
rectangle(mask, Point(window.x, window.y), Point(window.x + window.width,
|
rectangle(mask, Point(window.x, window.y), Point(window.x + window.width,
|
||||||
window.y + window.height), Scalar(255), CV_FILLED);
|
window.y + window.height), Scalar(255), CV_FILLED);
|
||||||
|
|
||||||
detector->detect(prev_image, prev_keypoints, mask);
|
dd->operator()(prev_image, mask, prev_keypoints, prev_desc);
|
||||||
|
|
||||||
window.x -= params.window_size;
|
window.x -= params.window_size;
|
||||||
window.y -= params.window_size;
|
window.y -= params.window_size;
|
||||||
@ -114,12 +116,12 @@ Rect CvFeatureTracker::updateTrackingWindowWithSIFT(Mat image)
|
|||||||
rectangle(mask, Point(window.x, window.y), Point(window.x + window.width,
|
rectangle(mask, Point(window.x, window.y), Point(window.x + window.width,
|
||||||
window.y + window.height), Scalar(255), CV_FILLED);
|
window.y + window.height), Scalar(255), CV_FILLED);
|
||||||
|
|
||||||
detector->detect(image, curr_keypoints, mask);
|
dd->operator()(image, mask, curr_keypoints, curr_desc);
|
||||||
|
|
||||||
if (prev_keypoints.size() > 4 && curr_keypoints.size() > 4)
|
if (prev_keypoints.size() > 4 && curr_keypoints.size() > 4)
|
||||||
{
|
{
|
||||||
descriptor->compute(prev_image, prev_keypoints, prev_desc);
|
//descriptor->compute(prev_image, prev_keypoints, prev_desc);
|
||||||
descriptor->compute(image, curr_keypoints, curr_desc);
|
//descriptor->compute(image, curr_keypoints, curr_desc);
|
||||||
|
|
||||||
matcher->match(prev_desc, curr_desc, matches);
|
matcher->match(prev_desc, curr_desc, matches);
|
||||||
|
|
||||||
|
3
modules/contrib/test/test_main.cpp
Normal file
3
modules/contrib/test/test_main.cpp
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "test_precomp.hpp"
|
||||||
|
|
||||||
|
CV_TEST_MAIN("cv")
|
1
modules/contrib/test/test_precomp.cpp
Normal file
1
modules/contrib/test/test_precomp.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "test_precomp.hpp"
|
9
modules/contrib/test/test_precomp.hpp
Normal file
9
modules/contrib/test/test_precomp.hpp
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/ts/ts.hpp"
|
||||||
|
#include "opencv2/contrib/contrib.hpp"
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
@ -118,7 +118,7 @@ CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0));
|
|||||||
|
|
||||||
// matrix decomposition types
|
// matrix decomposition types
|
||||||
enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 };
|
enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 };
|
||||||
enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32};
|
enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_L2SQR=5, NORM_HAMMING=6, NORM_HAMMING2=7, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32 };
|
||||||
enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 };
|
enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 };
|
||||||
enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 };
|
enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 };
|
||||||
enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32,
|
enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32,
|
||||||
@ -2057,6 +2057,14 @@ CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=
|
|||||||
//! computes norm of selected part of the difference between two arrays
|
//! computes norm of selected part of the difference between two arrays
|
||||||
CV_EXPORTS_W double norm(InputArray src1, InputArray src2,
|
CV_EXPORTS_W double norm(InputArray src1, InputArray src2,
|
||||||
int normType=NORM_L2, InputArray mask=noArray());
|
int normType=NORM_L2, InputArray mask=noArray());
|
||||||
|
|
||||||
|
//! naive nearest neighbor finder
|
||||||
|
CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2,
|
||||||
|
OutputArray dist, int dtype, OutputArray nidx,
|
||||||
|
int normType=NORM_L2, int K=0,
|
||||||
|
InputArray mask=noArray(), int update=0,
|
||||||
|
bool crosscheck=false);
|
||||||
|
|
||||||
//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
|
//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
|
||||||
CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0,
|
CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0,
|
||||||
int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray());
|
int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray());
|
||||||
@ -4244,10 +4252,20 @@ public:
|
|||||||
|
|
||||||
template<typename _Tp> typename ParamType<_Tp>::member_type get(const string& name) const;
|
template<typename _Tp> typename ParamType<_Tp>::member_type get(const string& name) const;
|
||||||
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
|
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
|
||||||
template<typename _Tp> void set(const string& name,
|
void set(const string& name, int value);
|
||||||
typename ParamType<_Tp>::const_param_type value);
|
void set(const string& name, double value);
|
||||||
template<typename _Tp> void set(const char* name,
|
void set(const string& name, bool value);
|
||||||
typename ParamType<_Tp>::const_param_type value);
|
void set(const string& name, const string& value);
|
||||||
|
void set(const string& name, const Mat& value);
|
||||||
|
void set(const string& name, const Ptr<Algorithm>& value);
|
||||||
|
|
||||||
|
void set(const char* name, int value);
|
||||||
|
void set(const char* name, double value);
|
||||||
|
void set(const char* name, bool value);
|
||||||
|
void set(const char* name, const string& value);
|
||||||
|
void set(const char* name, const Mat& value);
|
||||||
|
void set(const char* name, const Ptr<Algorithm>& value);
|
||||||
|
|
||||||
string paramHelp(const string& name) const;
|
string paramHelp(const string& name) const;
|
||||||
int paramType(const char* name) const;
|
int paramType(const char* name) const;
|
||||||
int paramType(const string& name) const;
|
int paramType(const string& name) const;
|
||||||
@ -4293,6 +4311,11 @@ public:
|
|||||||
int (Algorithm::*getter)()=0,
|
int (Algorithm::*getter)()=0,
|
||||||
void (Algorithm::*setter)(int)=0,
|
void (Algorithm::*setter)(int)=0,
|
||||||
const string& help=string());
|
const string& help=string());
|
||||||
|
void addParam(const Algorithm* algo, const char* name,
|
||||||
|
const bool& value, bool readOnly=false,
|
||||||
|
int (Algorithm::*getter)()=0,
|
||||||
|
void (Algorithm::*setter)(int)=0,
|
||||||
|
const string& help=string());
|
||||||
void addParam(const Algorithm* algo, const char* name,
|
void addParam(const Algorithm* algo, const char* name,
|
||||||
const double& value, bool readOnly=false,
|
const double& value, bool readOnly=false,
|
||||||
double (Algorithm::*getter)()=0,
|
double (Algorithm::*getter)()=0,
|
||||||
|
@ -3823,18 +3823,6 @@ template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::ge
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename _Tp> inline void Algorithm::set(const string& name,
|
|
||||||
typename ParamType<_Tp>::const_param_type value)
|
|
||||||
{
|
|
||||||
info()->set(this, name.c_str(), ParamType<_Tp>::type, &value);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline void Algorithm::set(const char* name,
|
|
||||||
typename ParamType<_Tp>::const_param_type value)
|
|
||||||
{
|
|
||||||
info()->set(this, name, ParamType<_Tp>::type, &value);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // __cplusplus
|
#endif // __cplusplus
|
||||||
|
@ -77,7 +77,7 @@ template<typename _KeyTp, typename _ValueTp> struct sorted_vector
|
|||||||
b = c;
|
b = c;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( vec[a].first == key )
|
if( a < vec.size() && vec[a].first == key )
|
||||||
{
|
{
|
||||||
value = vec[a].second;
|
value = vec[a].second;
|
||||||
return true;
|
return true;
|
||||||
@ -176,6 +176,66 @@ string Algorithm::name() const
|
|||||||
{
|
{
|
||||||
return info()->name();
|
return info()->name();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const string& name, int value)
|
||||||
|
{
|
||||||
|
info()->set(this, name.c_str(), ParamType<int>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const string& name, double value)
|
||||||
|
{
|
||||||
|
info()->set(this, name.c_str(), ParamType<double>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const string& name, bool value)
|
||||||
|
{
|
||||||
|
info()->set(this, name.c_str(), ParamType<bool>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const string& name, const string& value)
|
||||||
|
{
|
||||||
|
info()->set(this, name.c_str(), ParamType<string>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const string& name, const Mat& value)
|
||||||
|
{
|
||||||
|
info()->set(this, name.c_str(), ParamType<Mat>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const string& name, const Ptr<Algorithm>& value)
|
||||||
|
{
|
||||||
|
info()->set(this, name.c_str(), ParamType<Algorithm>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const char* name, int value)
|
||||||
|
{
|
||||||
|
info()->set(this, name, ParamType<int>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const char* name, double value)
|
||||||
|
{
|
||||||
|
info()->set(this, name, ParamType<double>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const char* name, bool value)
|
||||||
|
{
|
||||||
|
info()->set(this, name, ParamType<bool>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const char* name, const string& value)
|
||||||
|
{
|
||||||
|
info()->set(this, name, ParamType<string>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const char* name, const Mat& value)
|
||||||
|
{
|
||||||
|
info()->set(this, name, ParamType<Mat>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Algorithm::set(const char* name, const Ptr<Algorithm>& value)
|
||||||
|
{
|
||||||
|
info()->set(this, name, ParamType<Algorithm>::type, &value);
|
||||||
|
}
|
||||||
|
|
||||||
string Algorithm::paramHelp(const string& name) const
|
string Algorithm::paramHelp(const string& name) const
|
||||||
{
|
{
|
||||||
@ -261,25 +321,25 @@ void AlgorithmInfo::read(Algorithm* algo, const FileNode& fn) const
|
|||||||
if( n.empty() )
|
if( n.empty() )
|
||||||
continue;
|
continue;
|
||||||
if( p.type == Param::INT )
|
if( p.type == Param::INT )
|
||||||
algo->set<int>(pname, (int)n);
|
algo->set(pname, (int)n);
|
||||||
else if( p.type == Param::BOOLEAN )
|
else if( p.type == Param::BOOLEAN )
|
||||||
algo->set<bool>(pname, (int)n != 0);
|
algo->set(pname, (int)n != 0);
|
||||||
else if( p.type == Param::REAL )
|
else if( p.type == Param::REAL )
|
||||||
algo->set<double>(pname, (double)n);
|
algo->set(pname, (double)n);
|
||||||
else if( p.type == Param::STRING )
|
else if( p.type == Param::STRING )
|
||||||
algo->set<string>(pname, (string)n);
|
algo->set(pname, (string)n);
|
||||||
else if( p.type == Param::MAT )
|
else if( p.type == Param::MAT )
|
||||||
{
|
{
|
||||||
Mat m;
|
Mat m;
|
||||||
cv::read(fn, m);
|
cv::read(fn, m);
|
||||||
algo->set<Mat>(pname, m);
|
algo->set(pname, m);
|
||||||
}
|
}
|
||||||
else if( p.type == Param::ALGORITHM )
|
else if( p.type == Param::ALGORITHM )
|
||||||
{
|
{
|
||||||
Ptr<Algorithm> nestedAlgo = Algorithm::_create((string)n["name"]);
|
Ptr<Algorithm> nestedAlgo = Algorithm::_create((string)n["name"]);
|
||||||
CV_Assert( !nestedAlgo.empty() );
|
CV_Assert( !nestedAlgo.empty() );
|
||||||
nestedAlgo->read(n);
|
nestedAlgo->read(n);
|
||||||
algo->set<Algorithm>(pname, nestedAlgo);
|
algo->set(pname, nestedAlgo);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported parameter type");
|
CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported parameter type");
|
||||||
@ -505,6 +565,16 @@ void AlgorithmInfo::addParam(const Algorithm* algo, const char* name,
|
|||||||
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
|
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AlgorithmInfo::addParam(const Algorithm* algo, const char* name,
|
||||||
|
const bool& value, bool readOnly,
|
||||||
|
int (Algorithm::*getter)(),
|
||||||
|
void (Algorithm::*setter)(int),
|
||||||
|
const string& help)
|
||||||
|
{
|
||||||
|
addParam_(algo, name, ParamType<bool>::type, &value, readOnly,
|
||||||
|
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
|
||||||
|
}
|
||||||
|
|
||||||
void AlgorithmInfo::addParam(const Algorithm* algo, const char* name,
|
void AlgorithmInfo::addParam(const Algorithm* algo, const char* name,
|
||||||
const double& value, bool readOnly,
|
const double& value, bool readOnly,
|
||||||
double (Algorithm::*getter)(),
|
double (Algorithm::*getter)(),
|
||||||
|
@ -113,11 +113,13 @@ namespace
|
|||||||
|
|
||||||
const CvOpenGlFuncTab* g_glFuncTab = 0;
|
const CvOpenGlFuncTab* g_glFuncTab = 0;
|
||||||
|
|
||||||
|
#ifdef HAVE_CUDA
|
||||||
const CvOpenGlFuncTab* glFuncTab()
|
const CvOpenGlFuncTab* glFuncTab()
|
||||||
{
|
{
|
||||||
static EmptyGlFuncTab empty;
|
static EmptyGlFuncTab empty;
|
||||||
return g_glFuncTab ? g_glFuncTab : ∅
|
return g_glFuncTab ? g_glFuncTab : ∅
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
CvOpenGlFuncTab::~CvOpenGlFuncTab()
|
CvOpenGlFuncTab::~CvOpenGlFuncTab()
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include <climits>
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
@ -1425,6 +1426,339 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////// batch distance ///////////////////////////////////////
|
||||||
|
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename _Tp, typename _Rt>
|
||||||
|
void batchDistL1_(const _Tp* src1, const _Tp* src2, size_t step2,
|
||||||
|
int nvecs, int len, _Rt* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
step2 /= sizeof(src2[0]);
|
||||||
|
if( !mask )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = normL1<_Tp, _Rt>(src1, src2 + step2*i, len);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
_Rt val0 = std::numeric_limits<_Rt>::max();
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = mask[i] ? normL1<_Tp, _Rt>(src1, src2 + step2*i, len) : val0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename _Tp, typename _Rt>
|
||||||
|
void batchDistL2Sqr_(const _Tp* src1, const _Tp* src2, size_t step2,
|
||||||
|
int nvecs, int len, _Rt* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
step2 /= sizeof(src2[0]);
|
||||||
|
if( !mask )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = normL2Sqr<_Tp, _Rt>(src1, src2 + step2*i, len);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
_Rt val0 = std::numeric_limits<_Rt>::max();
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = mask[i] ? normL2Sqr<_Tp, _Rt>(src1, src2 + step2*i, len) : val0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename _Tp, typename _Rt>
|
||||||
|
void batchDistL2_(const _Tp* src1, const _Tp* src2, size_t step2,
|
||||||
|
int nvecs, int len, _Rt* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
step2 /= sizeof(src2[0]);
|
||||||
|
if( !mask )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = std::sqrt(normL2Sqr<_Tp, _Rt>(src1, src2 + step2*i, len));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
_Rt val0 = std::numeric_limits<_Rt>::max();
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = mask[i] ? std::sqrt(normL2Sqr<_Tp, _Rt>(src1, src2 + step2*i, len)) : val0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistHamming(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, int* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
step2 /= sizeof(src2[0]);
|
||||||
|
if( !mask )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = normHamming(src1, src2 + step2*i, len);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int val0 = INT_MAX;
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = mask[i] ? normHamming(src1, src2 + step2*i, len) : val0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistHamming2(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, int* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
step2 /= sizeof(src2[0]);
|
||||||
|
if( !mask )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = normHamming(src1, src2 + step2*i, len, 2);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int val0 = INT_MAX;
|
||||||
|
for( int i = 0; i < nvecs; i++ )
|
||||||
|
dist[i] = mask[i] ? normHamming(src1, src2 + step2*i, len, 2) : val0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL1_8u32s(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, int* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL1_<uchar, int>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL1_8u32f(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, float* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL1_<uchar, float>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL2Sqr_8u32s(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, int* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL2Sqr_<uchar, int>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL2Sqr_8u32f(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, float* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL2Sqr_<uchar, float>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL2_8u32f(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, float* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL2_<uchar, float>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL1_32f(const float* src1, const float* src2, size_t step2,
|
||||||
|
int nvecs, int len, float* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL1_<float, float>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL2Sqr_32f(const float* src1, const float* src2, size_t step2,
|
||||||
|
int nvecs, int len, float* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL2Sqr_<float, float>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batchDistL2_32f(const float* src1, const float* src2, size_t step2,
|
||||||
|
int nvecs, int len, float* dist, const uchar* mask)
|
||||||
|
{
|
||||||
|
batchDistL2_<float, float>(src1, src2, step2, nvecs, len, dist, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef void (*BatchDistFunc)(const uchar* src1, const uchar* src2, size_t step2,
|
||||||
|
int nvecs, int len, uchar* dist, const uchar* mask);
|
||||||
|
|
||||||
|
|
||||||
|
struct BatchDistInvoker
|
||||||
|
{
|
||||||
|
BatchDistInvoker( const Mat& _src1, const Mat& _src2,
|
||||||
|
Mat& _dist, Mat& _nidx, int _K,
|
||||||
|
const Mat& _mask, int _update,
|
||||||
|
BatchDistFunc _func)
|
||||||
|
{
|
||||||
|
src1 = &_src1;
|
||||||
|
src2 = &_src2;
|
||||||
|
dist = &_dist;
|
||||||
|
nidx = &_nidx;
|
||||||
|
K = _K;
|
||||||
|
mask = &_mask;
|
||||||
|
update = _update;
|
||||||
|
func = _func;
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator()(const BlockedRange& range) const
|
||||||
|
{
|
||||||
|
AutoBuffer<int> buf(src2->rows);
|
||||||
|
int* bufptr = buf;
|
||||||
|
Cv32suf val0;
|
||||||
|
if( dist->type() == CV_32S )
|
||||||
|
val0.i = INT_MAX;
|
||||||
|
else
|
||||||
|
val0.f = FLT_MAX;
|
||||||
|
|
||||||
|
for( int i = range.begin(); i < range.end(); i++ )
|
||||||
|
{
|
||||||
|
func(src1->ptr(i), src2->ptr(), src2->step, src2->rows, src2->cols,
|
||||||
|
K > 0 ? (uchar*)bufptr : dist->ptr(i), mask->data ? mask->ptr(i) : 0);
|
||||||
|
|
||||||
|
if( K > 0 )
|
||||||
|
{
|
||||||
|
int* nidxptr = nidx->ptr<int>(i);
|
||||||
|
// since positive float's can be compared just like int's,
|
||||||
|
// we handle both CV_32S and CV_32F cases with a single branch
|
||||||
|
int* distptr = (int*)dist->ptr(i);
|
||||||
|
|
||||||
|
int k, k0, k0_, j;
|
||||||
|
for( k0 = 0; k0 < K; k0++ )
|
||||||
|
if( nidxptr[k0] < 0 )
|
||||||
|
break;
|
||||||
|
k0_ = std::max(k0, 1);
|
||||||
|
|
||||||
|
for( j = 0; j < src2->rows; j++ )
|
||||||
|
{
|
||||||
|
int d = bufptr[j];
|
||||||
|
if( d < distptr[k0_-1] )
|
||||||
|
{
|
||||||
|
for( k = std::min(k0-1, K-2); k >= 0 && distptr[k] > d; k-- )
|
||||||
|
{
|
||||||
|
nidxptr[k+1] = nidxptr[k];
|
||||||
|
distptr[k+1] = distptr[k];
|
||||||
|
}
|
||||||
|
nidxptr[k+1] = j + update;
|
||||||
|
distptr[k+1] = d;
|
||||||
|
k0_ = k0 = std::min(k0 + 1, K);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const Mat *src1;
|
||||||
|
const Mat *src2;
|
||||||
|
Mat *dist;
|
||||||
|
Mat *nidx;
|
||||||
|
const Mat *mask;
|
||||||
|
int K;
|
||||||
|
int update;
|
||||||
|
BatchDistFunc func;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void cv::batchDistance( InputArray _src1, InputArray _src2,
|
||||||
|
OutputArray _dist, int dtype, OutputArray _nidx,
|
||||||
|
int normType, int K, InputArray _mask,
|
||||||
|
int update, bool crosscheck )
|
||||||
|
{
|
||||||
|
Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat();
|
||||||
|
int type = src1.type();
|
||||||
|
CV_Assert( type == src2.type() && src1.cols == src2.cols &&
|
||||||
|
(type == CV_32F || type == CV_8U));
|
||||||
|
CV_Assert( _nidx.needed() == (K > 0) );
|
||||||
|
|
||||||
|
if( dtype == -1 )
|
||||||
|
{
|
||||||
|
dtype = normType == NORM_HAMMING || normType == NORM_HAMMING2 ? CV_32S : CV_32F;
|
||||||
|
}
|
||||||
|
CV_Assert( (type == CV_8U && dtype == CV_32S) || dtype == CV_32F);
|
||||||
|
|
||||||
|
K = std::min(K, src2.rows);
|
||||||
|
|
||||||
|
_dist.create(src1.rows, (K > 0 ? K : src2.rows), dtype);
|
||||||
|
Mat dist = _dist.getMat(), nidx;
|
||||||
|
if( _nidx.needed() )
|
||||||
|
{
|
||||||
|
_nidx.create(dist.size(), CV_32S);
|
||||||
|
nidx = _nidx.getMat();
|
||||||
|
}
|
||||||
|
|
||||||
|
if( update == 0 && K > 0 )
|
||||||
|
{
|
||||||
|
dist = Scalar::all(dtype == CV_32S ? (double)INT_MAX : (double)FLT_MAX);
|
||||||
|
nidx = Scalar::all(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if( crosscheck )
|
||||||
|
{
|
||||||
|
CV_Assert( K == 1 && update == 0 && mask.empty() );
|
||||||
|
Mat tdist, tidx;
|
||||||
|
batchDistance(src2, src1, tdist, dtype, tidx, normType, K, mask, 0, false);
|
||||||
|
|
||||||
|
// if an idx-th element from src1 appeared to be the nearest to i-th element of src2,
|
||||||
|
// we update the minimum mutual distance between idx-th element of src1 and the whole src2 set.
|
||||||
|
// As a result, if nidx[idx] = i*, it means that idx-th element of src1 is the nearest
|
||||||
|
// to i*-th element of src2 and i*-th element of src2 is the closest to idx-th element of src1.
|
||||||
|
// If nidx[idx] = -1, it means that there is no such ideal couple for it in src2.
|
||||||
|
// This O(N) procedure is called cross-check and it helps to eliminate some false matches.
|
||||||
|
if( dtype == CV_32S )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < tdist.rows; i++ )
|
||||||
|
{
|
||||||
|
int idx = tidx.at<int>(i);
|
||||||
|
int d = tdist.at<int>(i), d0 = dist.at<int>(idx);
|
||||||
|
if( d < d0 )
|
||||||
|
{
|
||||||
|
dist.at<int>(idx) = d0;
|
||||||
|
nidx.at<int>(idx) = i + update;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
for( int i = 0; i < tdist.rows; i++ )
|
||||||
|
{
|
||||||
|
int idx = tidx.at<int>(i);
|
||||||
|
float d = tdist.at<float>(i), d0 = dist.at<float>(idx);
|
||||||
|
if( d < d0 )
|
||||||
|
{
|
||||||
|
dist.at<float>(idx) = d0;
|
||||||
|
nidx.at<int>(idx) = i + update;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
BatchDistFunc func = 0;
|
||||||
|
if( type == CV_8U )
|
||||||
|
{
|
||||||
|
if( normType == NORM_L1 && dtype == CV_32S )
|
||||||
|
func = (BatchDistFunc)batchDistL1_8u32s;
|
||||||
|
else if( normType == NORM_L1 && dtype == CV_32F )
|
||||||
|
func = (BatchDistFunc)batchDistL1_8u32f;
|
||||||
|
else if( normType == NORM_L2SQR && dtype == CV_32S )
|
||||||
|
func = (BatchDistFunc)batchDistL2Sqr_8u32s;
|
||||||
|
else if( normType == NORM_L2SQR && dtype == CV_32F )
|
||||||
|
func = (BatchDistFunc)batchDistL2Sqr_8u32f;
|
||||||
|
else if( normType == NORM_L2 && dtype == CV_32F )
|
||||||
|
func = (BatchDistFunc)batchDistL2_8u32f;
|
||||||
|
else if( normType == NORM_HAMMING && dtype == CV_32S )
|
||||||
|
func = (BatchDistFunc)batchDistHamming;
|
||||||
|
else if( normType == NORM_HAMMING2 && dtype == CV_32S )
|
||||||
|
func = (BatchDistFunc)batchDistHamming2;
|
||||||
|
}
|
||||||
|
else if( type == CV_32F && dtype == CV_32F )
|
||||||
|
{
|
||||||
|
if( normType == NORM_L1 )
|
||||||
|
func = (BatchDistFunc)batchDistL1_32f;
|
||||||
|
else if( normType == NORM_L2SQR )
|
||||||
|
func = (BatchDistFunc)batchDistL2Sqr_32f;
|
||||||
|
else if( normType == NORM_L2 )
|
||||||
|
func = (BatchDistFunc)batchDistL2_32f;
|
||||||
|
}
|
||||||
|
|
||||||
|
if( func == 0 )
|
||||||
|
CV_Error_(CV_StsUnsupportedFormat,
|
||||||
|
("The combination of type=%d, dtype=%d and normType=%d is not supported",
|
||||||
|
type, dtype, normType));
|
||||||
|
|
||||||
|
parallel_for(BlockedRange(0, src1.rows),
|
||||||
|
BatchDistInvoker(src1, src2, dist, nidx, K, mask, update, func));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
CV_IMPL CvScalar cvSum( const CvArr* srcarr )
|
CV_IMPL CvScalar cvSum( const CvArr* srcarr )
|
||||||
{
|
{
|
||||||
cv::Scalar sum = cv::sum(cv::cvarrToMat(srcarr, false, true, 1));
|
cv::Scalar sum = cv::sum(cv::cvarrToMat(srcarr, false, true, 1));
|
||||||
|
@ -92,177 +92,6 @@ Finds keypoints in an image
|
|||||||
|
|
||||||
:param params: The algorithm parameters stored in ``CvStarDetectorParams`` (OpenCV 1.x API only)
|
:param params: The algorithm parameters stored in ``CvStarDetectorParams`` (OpenCV 1.x API only)
|
||||||
|
|
||||||
|
|
||||||
SIFT
|
|
||||||
----
|
|
||||||
.. ocv:class:: SIFT
|
|
||||||
|
|
||||||
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) approach. ::
|
|
||||||
|
|
||||||
class CV_EXPORTS SIFT
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
struct CommonParams
|
|
||||||
{
|
|
||||||
static const int DEFAULT_NOCTAVES = 4;
|
|
||||||
static const int DEFAULT_NOCTAVE_LAYERS = 3;
|
|
||||||
static const int DEFAULT_FIRST_OCTAVE = -1;
|
|
||||||
enum{ FIRST_ANGLE = 0, AVERAGE_ANGLE = 1 };
|
|
||||||
|
|
||||||
CommonParams();
|
|
||||||
CommonParams( int _nOctaves, int _nOctaveLayers, int _firstOctave,
|
|
||||||
int _angleMode );
|
|
||||||
int nOctaves, nOctaveLayers, firstOctave;
|
|
||||||
int angleMode;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct DetectorParams
|
|
||||||
{
|
|
||||||
static double GET_DEFAULT_THRESHOLD()
|
|
||||||
{ return 0.04 / SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS / 2.0; }
|
|
||||||
static double GET_DEFAULT_EDGE_THRESHOLD() { return 10.0; }
|
|
||||||
|
|
||||||
DetectorParams();
|
|
||||||
DetectorParams( double _threshold, double _edgeThreshold );
|
|
||||||
double threshold, edgeThreshold;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct DescriptorParams
|
|
||||||
{
|
|
||||||
static double GET_DEFAULT_MAGNIFICATION() { return 3.0; }
|
|
||||||
static const bool DEFAULT_IS_NORMALIZE = true;
|
|
||||||
static const int DESCRIPTOR_SIZE = 128;
|
|
||||||
|
|
||||||
DescriptorParams();
|
|
||||||
DescriptorParams( double _magnification, bool _isNormalize,
|
|
||||||
bool _recalculateAngles );
|
|
||||||
double magnification;
|
|
||||||
bool isNormalize;
|
|
||||||
bool recalculateAngles;
|
|
||||||
};
|
|
||||||
|
|
||||||
SIFT();
|
|
||||||
//! sift-detector constructor
|
|
||||||
SIFT( double _threshold, double _edgeThreshold,
|
|
||||||
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
|
|
||||||
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
|
|
||||||
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
|
|
||||||
int _angleMode=CommonParams::FIRST_ANGLE );
|
|
||||||
//! sift-descriptor constructor
|
|
||||||
SIFT( double _magnification, bool _isNormalize=true,
|
|
||||||
bool _recalculateAngles = true,
|
|
||||||
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
|
|
||||||
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
|
|
||||||
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
|
|
||||||
int _angleMode=CommonParams::FIRST_ANGLE );
|
|
||||||
SIFT( const CommonParams& _commParams,
|
|
||||||
const DetectorParams& _detectorParams = DetectorParams(),
|
|
||||||
const DescriptorParams& _descriptorParams = DescriptorParams() );
|
|
||||||
|
|
||||||
//! returns the descriptor size in floats (128)
|
|
||||||
int descriptorSize() const { return DescriptorParams::DESCRIPTOR_SIZE; }
|
|
||||||
//! finds the keypoints using the SIFT algorithm
|
|
||||||
void operator()(const Mat& img, const Mat& mask,
|
|
||||||
vector<KeyPoint>& keypoints) const;
|
|
||||||
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
|
|
||||||
//! Optionally it can compute descriptors for the user-provided keypoints
|
|
||||||
void operator()(const Mat& img, const Mat& mask,
|
|
||||||
vector<KeyPoint>& keypoints,
|
|
||||||
Mat& descriptors,
|
|
||||||
bool useProvidedKeypoints=false) const;
|
|
||||||
|
|
||||||
CommonParams getCommonParams () const { return commParams; }
|
|
||||||
DetectorParams getDetectorParams () const { return detectorParams; }
|
|
||||||
DescriptorParams getDescriptorParams () const { return descriptorParams; }
|
|
||||||
protected:
|
|
||||||
...
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
SURF
|
|
||||||
----
|
|
||||||
.. ocv:class:: SURF
|
|
||||||
|
|
||||||
Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
|
|
||||||
|
|
||||||
.. ocv:member:: int extended
|
|
||||||
|
|
||||||
* 0 means that the basic descriptors (64 elements each) shall be computed
|
|
||||||
* 1 means that the extended descriptors (128 elements each) shall be computed
|
|
||||||
|
|
||||||
.. ocv:member:: int upright
|
|
||||||
|
|
||||||
* 0 means that detector computes orientation of each feature.
|
|
||||||
* 1 means that the orientation is not computed (which is much, much faster). For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting ``upright=1``.
|
|
||||||
|
|
||||||
.. ocv:member:: double hessianThreshold
|
|
||||||
|
|
||||||
Threshold for the keypoint detector. Only features, whose hessian is larger than ``hessianThreshold`` are retained by the detector. Therefore, the larger the value, the less keypoints you will get. A good default value could be from 300 to 500, depending from the image contrast.
|
|
||||||
|
|
||||||
.. ocv:member:: int nOctaves
|
|
||||||
|
|
||||||
The number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default. If you want to get very large features, use the larger value. If you want just small features, decrease it.
|
|
||||||
|
|
||||||
.. ocv:member:: int nOctaveLayers
|
|
||||||
|
|
||||||
The number of images within each octave of a gaussian pyramid. It is set to 2 by default.
|
|
||||||
|
|
||||||
|
|
||||||
.. [Bay06] Bay, H. and Tuytelaars, T. and Van Gool, L. "SURF: Speeded Up Robust Features", 9th European Conference on Computer Vision, 2006
|
|
||||||
|
|
||||||
|
|
||||||
SURF::SURF
|
|
||||||
----------
|
|
||||||
The SURF extractor constructors.
|
|
||||||
|
|
||||||
.. ocv:function:: SURF::SURF()
|
|
||||||
|
|
||||||
.. ocv:function:: SURF::SURF(double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=false, bool upright=false)
|
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.SURF(_hessianThreshold[, _nOctaves[, _nOctaveLayers[, _extended[, _upright]]]]) -> <SURF object>
|
|
||||||
|
|
||||||
:param hessianThreshold: Threshold for hessian keypoint detector used in SURF.
|
|
||||||
|
|
||||||
:param nOctaves: Number of pyramid octaves the keypoint detector will use.
|
|
||||||
|
|
||||||
:param nOctaveLayers: Number of octave layers within each octave.
|
|
||||||
|
|
||||||
:param extended: Extended descriptor flag (true - use extended 128-element descriptors; false - use 64-element descriptors).
|
|
||||||
|
|
||||||
:param upright: Up-right or rotated features flag (true - do not compute orientation of features; false - compute orientation).
|
|
||||||
|
|
||||||
|
|
||||||
SURF::operator()
|
|
||||||
----------------
|
|
||||||
Detects keypoints and computes SURF descriptors for them.
|
|
||||||
|
|
||||||
.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints)
|
|
||||||
.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints, vector<float>& descriptors, bool useProvidedKeypoints=false)
|
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.SURF.detect(img, mask) -> keypoints
|
|
||||||
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, useProvidedKeypoints]) -> keypoints, descriptors
|
|
||||||
|
|
||||||
.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
|
|
||||||
|
|
||||||
.. ocv:pyoldfunction:: cv.ExtractSURF(image, mask, storage, params)-> (keypoints, descriptors)
|
|
||||||
|
|
||||||
:param image: Input 8-bit grayscale image
|
|
||||||
|
|
||||||
:param mask: Optional input mask that marks the regions where we should detect features.
|
|
||||||
|
|
||||||
:param keypoints: The input/output vector of keypoints
|
|
||||||
|
|
||||||
:param descriptors: The output concatenated vectors of descriptors. Each descriptor is 64- or 128-element vector, as returned by ``SURF::descriptorSize()``. So the total size of ``descriptors`` will be ``keypoints.size()*descriptorSize()``.
|
|
||||||
|
|
||||||
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
|
|
||||||
|
|
||||||
:param storage: Memory storage for the output keypoints and descriptors in OpenCV 1.x API.
|
|
||||||
|
|
||||||
:param params: SURF algorithm parameters in OpenCV 1.x API.
|
|
||||||
|
|
||||||
|
|
||||||
ORB
|
ORB
|
||||||
----
|
----
|
||||||
.. ocv:class:: ORB
|
.. ocv:class:: ORB
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -22,7 +22,7 @@ PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES))
|
|||||||
|
|
||||||
Mat mask;
|
Mat mask;
|
||||||
declare.in(frame);
|
declare.in(frame);
|
||||||
ORB detector(1500, ORB::CommonParams(1.3f, 5));
|
ORB detector(1500, 1.3f, 5);
|
||||||
vector<KeyPoint> points;
|
vector<KeyPoint> points;
|
||||||
|
|
||||||
TEST_CYCLE() detector(frame, mask, points);
|
TEST_CYCLE() detector(frame, mask, points);
|
||||||
@ -39,7 +39,7 @@ PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES))
|
|||||||
Mat mask;
|
Mat mask;
|
||||||
declare.in(frame);
|
declare.in(frame);
|
||||||
|
|
||||||
ORB detector(1500, ORB::CommonParams(1.3f, 5));
|
ORB detector(1500, 1.3f, 5);
|
||||||
vector<KeyPoint> points;
|
vector<KeyPoint> points;
|
||||||
detector(frame, mask, points);
|
detector(frame, mask, points);
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES))
|
|||||||
|
|
||||||
Mat mask;
|
Mat mask;
|
||||||
declare.in(frame);
|
declare.in(frame);
|
||||||
ORB detector(1500, ORB::CommonParams(1.3f, 5));
|
ORB detector(1500, 1.3f, 5);
|
||||||
|
|
||||||
vector<KeyPoint> points;
|
vector<KeyPoint> points;
|
||||||
Mat descriptors;
|
Mat descriptors;
|
||||||
|
@ -77,11 +77,11 @@ void DescriptorExtractor::compute( const vector<Mat>& imageCollection, vector<ve
|
|||||||
compute( imageCollection[i], pointCollection[i], descCollection[i] );
|
compute( imageCollection[i], pointCollection[i], descCollection[i] );
|
||||||
}
|
}
|
||||||
|
|
||||||
void DescriptorExtractor::read( const FileNode& )
|
/*void DescriptorExtractor::read( const FileNode& )
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void DescriptorExtractor::write( FileStorage& ) const
|
void DescriptorExtractor::write( FileStorage& ) const
|
||||||
{}
|
{}*/
|
||||||
|
|
||||||
bool DescriptorExtractor::empty() const
|
bool DescriptorExtractor::empty() const
|
||||||
{
|
{
|
||||||
@ -96,184 +96,16 @@ void DescriptorExtractor::removeBorderKeypoints( vector<KeyPoint>& keypoints,
|
|||||||
|
|
||||||
Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)
|
Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)
|
||||||
{
|
{
|
||||||
DescriptorExtractor* de = 0;
|
if( descriptorExtractorType.find("Opponent") == 0)
|
||||||
|
|
||||||
size_t pos = 0;
|
|
||||||
if (!descriptorExtractorType.compare("SIFT"))
|
|
||||||
{
|
{
|
||||||
de = new SiftDescriptorExtractor();
|
size_t pos = string("Opponent").size();
|
||||||
|
return DescriptorExtractor::create(descriptorExtractorType.substr(pos));
|
||||||
}
|
}
|
||||||
else if (!descriptorExtractorType.compare("SURF"))
|
|
||||||
{
|
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
|
||||||
de = new SurfDescriptorExtractor();
|
|
||||||
}
|
|
||||||
else if (!descriptorExtractorType.compare("ORB"))
|
|
||||||
{
|
|
||||||
de = new OrbDescriptorExtractor();
|
|
||||||
}
|
|
||||||
else if (!descriptorExtractorType.compare("BRIEF"))
|
|
||||||
{
|
|
||||||
de = new BriefDescriptorExtractor();
|
|
||||||
}
|
|
||||||
else if ( (pos=descriptorExtractorType.find("Opponent")) == 0)
|
|
||||||
{
|
|
||||||
pos += string("Opponent").size();
|
|
||||||
de = new OpponentColorDescriptorExtractor( DescriptorExtractor::create(descriptorExtractorType.substr(pos)) );
|
|
||||||
}
|
|
||||||
return de;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************************\
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
* SiftDescriptorExtractor *
|
|
||||||
\****************************************************************************************/
|
|
||||||
SiftDescriptorExtractor::SiftDescriptorExtractor(const SIFT::DescriptorParams& descriptorParams,
|
|
||||||
const SIFT::CommonParams& commonParams)
|
|
||||||
: sift( descriptorParams.magnification, descriptorParams.isNormalize, descriptorParams.recalculateAngles,
|
|
||||||
commonParams.nOctaves, commonParams.nOctaveLayers, commonParams.firstOctave, commonParams.angleMode )
|
|
||||||
{}
|
|
||||||
|
|
||||||
SiftDescriptorExtractor::SiftDescriptorExtractor( double magnification, bool isNormalize, bool recalculateAngles,
|
|
||||||
int nOctaves, int nOctaveLayers, int firstOctave, int angleMode )
|
|
||||||
: sift( magnification, isNormalize, recalculateAngles, nOctaves, nOctaveLayers, firstOctave, angleMode )
|
|
||||||
{}
|
|
||||||
|
|
||||||
void SiftDescriptorExtractor::computeImpl( const Mat& image,
|
|
||||||
vector<KeyPoint>& keypoints,
|
|
||||||
Mat& descriptors) const
|
|
||||||
{
|
|
||||||
bool useProvidedKeypoints = true;
|
|
||||||
Mat grayImage = image;
|
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
|
||||||
|
|
||||||
sift(grayImage, Mat(), keypoints, descriptors, useProvidedKeypoints);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SiftDescriptorExtractor::read (const FileNode &fn)
|
|
||||||
{
|
|
||||||
double magnification = fn["magnification"];
|
|
||||||
bool isNormalize = (int)fn["isNormalize"] != 0;
|
|
||||||
bool recalculateAngles = (int)fn["recalculateAngles"] != 0;
|
|
||||||
int nOctaves = fn["nOctaves"];
|
|
||||||
int nOctaveLayers = fn["nOctaveLayers"];
|
|
||||||
int firstOctave = fn["firstOctave"];
|
|
||||||
int angleMode = fn["angleMode"];
|
|
||||||
|
|
||||||
sift = SIFT( magnification, isNormalize, recalculateAngles, nOctaves, nOctaveLayers, firstOctave, angleMode );
|
|
||||||
}
|
|
||||||
|
|
||||||
void SiftDescriptorExtractor::write (FileStorage &fs) const
|
|
||||||
{
|
|
||||||
// fs << "algorithm" << getAlgorithmName ();
|
|
||||||
|
|
||||||
SIFT::CommonParams commParams = sift.getCommonParams ();
|
|
||||||
SIFT::DescriptorParams descriptorParams = sift.getDescriptorParams ();
|
|
||||||
fs << "magnification" << descriptorParams.magnification;
|
|
||||||
fs << "isNormalize" << descriptorParams.isNormalize;
|
|
||||||
fs << "recalculateAngles" << descriptorParams.recalculateAngles;
|
|
||||||
fs << "nOctaves" << commParams.nOctaves;
|
|
||||||
fs << "nOctaveLayers" << commParams.nOctaveLayers;
|
|
||||||
fs << "firstOctave" << commParams.firstOctave;
|
|
||||||
fs << "angleMode" << commParams.angleMode;
|
|
||||||
}
|
|
||||||
|
|
||||||
int SiftDescriptorExtractor::descriptorSize() const
|
|
||||||
{
|
|
||||||
return sift.descriptorSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
int SiftDescriptorExtractor::descriptorType() const
|
|
||||||
{
|
|
||||||
return CV_32FC1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* SurfDescriptorExtractor *
|
|
||||||
\****************************************************************************************/
|
|
||||||
SurfDescriptorExtractor::SurfDescriptorExtractor( int nOctaves,
|
|
||||||
int nOctaveLayers, bool extended, bool upright )
|
|
||||||
: surf( 0.0, nOctaves, nOctaveLayers, extended, upright )
|
|
||||||
{}
|
|
||||||
|
|
||||||
void SurfDescriptorExtractor::computeImpl( const Mat& image,
|
|
||||||
vector<KeyPoint>& keypoints,
|
|
||||||
Mat& descriptors) const
|
|
||||||
{
|
|
||||||
// Compute descriptors for given keypoints
|
|
||||||
vector<float> _descriptors;
|
|
||||||
Mat mask;
|
|
||||||
bool useProvidedKeypoints = true;
|
|
||||||
Mat grayImage = image;
|
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
|
||||||
|
|
||||||
surf(grayImage, mask, keypoints, _descriptors, useProvidedKeypoints);
|
|
||||||
|
|
||||||
descriptors.create((int)keypoints.size(), (int)surf.descriptorSize(), CV_32FC1);
|
|
||||||
assert( (int)_descriptors.size() == descriptors.rows * descriptors.cols );
|
|
||||||
std::copy(_descriptors.begin(), _descriptors.end(), descriptors.begin<float>());
|
|
||||||
}
|
|
||||||
|
|
||||||
void SurfDescriptorExtractor::read( const FileNode &fn )
|
|
||||||
{
|
|
||||||
int nOctaves = fn["nOctaves"];
|
|
||||||
int nOctaveLayers = fn["nOctaveLayers"];
|
|
||||||
bool extended = (int)fn["extended"] != 0;
|
|
||||||
bool upright = (int)fn["upright"] != 0;
|
|
||||||
|
|
||||||
surf = SURF( 0.0, nOctaves, nOctaveLayers, extended, upright );
|
|
||||||
}
|
|
||||||
|
|
||||||
void SurfDescriptorExtractor::write( FileStorage &fs ) const
|
|
||||||
{
|
|
||||||
// fs << "algorithm" << getAlgorithmName ();
|
|
||||||
|
|
||||||
fs << "nOctaves" << surf.nOctaves;
|
|
||||||
fs << "nOctaveLayers" << surf.nOctaveLayers;
|
|
||||||
fs << "extended" << surf.extended;
|
|
||||||
fs << "upright" << surf.upright;
|
|
||||||
}
|
|
||||||
|
|
||||||
int SurfDescriptorExtractor::descriptorSize() const
|
|
||||||
{
|
|
||||||
return surf.descriptorSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
int SurfDescriptorExtractor::descriptorType() const
|
|
||||||
{
|
|
||||||
return CV_32FC1;
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
|
|
||||||
/** Default constructor */
|
|
||||||
OrbDescriptorExtractor::OrbDescriptorExtractor(ORB::CommonParams params)
|
|
||||||
{
|
|
||||||
orb_ = ORB(0, params);
|
|
||||||
}
|
|
||||||
void OrbDescriptorExtractor::computeImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints,
|
|
||||||
cv::Mat& descriptors) const
|
|
||||||
{
|
|
||||||
cv::Mat empty_mask;
|
|
||||||
orb_(image, empty_mask, keypoints, descriptors, true);
|
|
||||||
}
|
|
||||||
void OrbDescriptorExtractor::read(const cv::FileNode& fn)
|
|
||||||
{
|
|
||||||
orb_.read(fn);
|
|
||||||
}
|
|
||||||
void OrbDescriptorExtractor::write(cv::FileStorage& fs) const
|
|
||||||
{
|
|
||||||
orb_.write(fs);
|
|
||||||
}
|
|
||||||
int OrbDescriptorExtractor::descriptorSize() const
|
|
||||||
{
|
|
||||||
return orb_.descriptorSize();
|
|
||||||
}
|
|
||||||
int OrbDescriptorExtractor::descriptorType() const
|
|
||||||
{
|
|
||||||
return CV_8UC1;
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* OpponentColorDescriptorExtractor *
|
* OpponentColorDescriptorExtractor *
|
||||||
|
@ -45,6 +45,7 @@ using namespace std;
|
|||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FeatureDetector
|
* FeatureDetector
|
||||||
*/
|
*/
|
||||||
@ -71,11 +72,11 @@ void FeatureDetector::detect(const vector<Mat>& imageCollection, vector<vector<K
|
|||||||
detect( imageCollection[i], pointCollection[i], masks.empty() ? Mat() : masks[i] );
|
detect( imageCollection[i], pointCollection[i], masks.empty() ? Mat() : masks[i] );
|
||||||
}
|
}
|
||||||
|
|
||||||
void FeatureDetector::read( const FileNode& )
|
/*void FeatureDetector::read( const FileNode& )
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void FeatureDetector::write( FileStorage& ) const
|
void FeatureDetector::write( FileStorage& ) const
|
||||||
{}
|
{}*/
|
||||||
|
|
||||||
bool FeatureDetector::empty() const
|
bool FeatureDetector::empty() const
|
||||||
{
|
{
|
||||||
@ -89,387 +90,91 @@ void FeatureDetector::removeInvalidPoints( const Mat& mask, vector<KeyPoint>& ke
|
|||||||
|
|
||||||
Ptr<FeatureDetector> FeatureDetector::create( const string& detectorType )
|
Ptr<FeatureDetector> FeatureDetector::create( const string& detectorType )
|
||||||
{
|
{
|
||||||
FeatureDetector* fd = 0;
|
if( detectorType.find("Grid") == 0 )
|
||||||
size_t pos = 0;
|
|
||||||
|
|
||||||
if( !detectorType.compare( "FAST" ) )
|
|
||||||
{
|
{
|
||||||
fd = new FastFeatureDetector();
|
return new GridAdaptedFeatureDetector(FeatureDetector::create(
|
||||||
|
detectorType.substr(strlen("Grid"))));
|
||||||
}
|
}
|
||||||
else if( !detectorType.compare( "STAR" ) )
|
|
||||||
|
if( detectorType.find("Pyramid") == 0 )
|
||||||
{
|
{
|
||||||
fd = new StarFeatureDetector();
|
return new PyramidAdaptedFeatureDetector(FeatureDetector::create(
|
||||||
|
detectorType.substr(strlen("Pyramid"))));
|
||||||
}
|
}
|
||||||
else if( !detectorType.compare( "SIFT" ) )
|
|
||||||
|
if( detectorType.find("Dynamic") == 0 )
|
||||||
{
|
{
|
||||||
fd = new SiftFeatureDetector();
|
return new DynamicAdaptedFeatureDetector(AdjusterAdapter::create(
|
||||||
|
detectorType.substr(strlen("Dynamic"))));
|
||||||
}
|
}
|
||||||
else if( !detectorType.compare( "SURF" ) )
|
|
||||||
|
if( detectorType.compare( "HARRIS" ) == 0 )
|
||||||
{
|
{
|
||||||
fd = new SurfFeatureDetector();
|
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
|
||||||
}
|
fd->set("useHarrisDetector", true);
|
||||||
else if( !detectorType.compare( "ORB" ) )
|
return fd;
|
||||||
{
|
|
||||||
fd = new OrbFeatureDetector();
|
|
||||||
}
|
|
||||||
else if( !detectorType.compare( "MSER" ) )
|
|
||||||
{
|
|
||||||
fd = new MserFeatureDetector();
|
|
||||||
}
|
|
||||||
else if( !detectorType.compare( "GFTT" ) )
|
|
||||||
{
|
|
||||||
fd = new GoodFeaturesToTrackDetector();
|
|
||||||
}
|
|
||||||
else if( !detectorType.compare( "HARRIS" ) )
|
|
||||||
{
|
|
||||||
GoodFeaturesToTrackDetector::Params params;
|
|
||||||
params.useHarrisDetector = true;
|
|
||||||
fd = new GoodFeaturesToTrackDetector(params);
|
|
||||||
}
|
|
||||||
else if( !detectorType.compare( "Dense" ) )
|
|
||||||
{
|
|
||||||
fd = new DenseFeatureDetector();
|
|
||||||
}
|
|
||||||
else if( !detectorType.compare( "SimpleBlob" ) )
|
|
||||||
{
|
|
||||||
fd = new SimpleBlobDetector();
|
|
||||||
}
|
|
||||||
else if( (pos=detectorType.find("Grid")) == 0 )
|
|
||||||
{
|
|
||||||
pos += string("Grid").size();
|
|
||||||
fd = new GridAdaptedFeatureDetector( FeatureDetector::create(detectorType.substr(pos)) );
|
|
||||||
}
|
|
||||||
else if( (pos=detectorType.find("Pyramid")) == 0 )
|
|
||||||
{
|
|
||||||
pos += string("Pyramid").size();
|
|
||||||
fd = new PyramidAdaptedFeatureDetector( FeatureDetector::create(detectorType.substr(pos)) );
|
|
||||||
}
|
|
||||||
else if( (pos=detectorType.find("Dynamic")) == 0 )
|
|
||||||
{
|
|
||||||
pos += string("Dynamic").size();
|
|
||||||
fd = new DynamicAdaptedFeatureDetector( AdjusterAdapter::create(detectorType.substr(pos)) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fd;
|
return Algorithm::create<FeatureDetector>("Feature2D." + detectorType);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* FastFeatureDetector
|
|
||||||
*/
|
|
||||||
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression )
|
|
||||||
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void FastFeatureDetector::read (const FileNode& fn)
|
GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel,
|
||||||
{
|
double _minDistance, int _blockSize,
|
||||||
threshold = fn["threshold"];
|
bool _useHarrisDetector, double _k )
|
||||||
nonmaxSuppression = (int)fn["nonmaxSuppression"] ? true : false;
|
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
|
||||||
}
|
|
||||||
|
|
||||||
void FastFeatureDetector::write (FileStorage& fs) const
|
|
||||||
{
|
|
||||||
fs << "threshold" << threshold;
|
|
||||||
fs << "nonmaxSuppression" << nonmaxSuppression;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FastFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
|
||||||
{
|
|
||||||
Mat grayImage = image;
|
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
|
||||||
FAST( grayImage, keypoints, threshold, nonmaxSuppression );
|
|
||||||
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* GoodFeaturesToTrackDetector
|
|
||||||
*/
|
|
||||||
GoodFeaturesToTrackDetector::Params::Params( int _maxCorners, double _qualityLevel, double _minDistance,
|
|
||||||
int _blockSize, bool _useHarrisDetector, double _k ) :
|
|
||||||
maxCorners(_maxCorners), qualityLevel(_qualityLevel), minDistance(_minDistance),
|
|
||||||
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
|
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
|
||||||
{}
|
|
||||||
|
|
||||||
void GoodFeaturesToTrackDetector::Params::read (const FileNode& fn)
|
|
||||||
{
|
{
|
||||||
maxCorners = fn["maxCorners"];
|
|
||||||
qualityLevel = fn["qualityLevel"];
|
|
||||||
minDistance = fn["minDistance"];
|
|
||||||
blockSize = fn["blockSize"];
|
|
||||||
useHarrisDetector = (int)fn["useHarrisDetector"] != 0;
|
|
||||||
k = fn["k"];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GoodFeaturesToTrackDetector::Params::write (FileStorage& fs) const
|
void GFTTDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
|
||||||
{
|
|
||||||
fs << "maxCorners" << maxCorners;
|
|
||||||
fs << "qualityLevel" << qualityLevel;
|
|
||||||
fs << "minDistance" << minDistance;
|
|
||||||
fs << "blockSize" << blockSize;
|
|
||||||
fs << "useHarrisDetector" << useHarrisDetector;
|
|
||||||
fs << "k" << k;
|
|
||||||
}
|
|
||||||
|
|
||||||
GoodFeaturesToTrackDetector::GoodFeaturesToTrackDetector( const Params& _params ) : params(_params)
|
|
||||||
{}
|
|
||||||
|
|
||||||
GoodFeaturesToTrackDetector::GoodFeaturesToTrackDetector( int maxCorners, double qualityLevel,
|
|
||||||
double minDistance, int blockSize,
|
|
||||||
bool useHarrisDetector, double k )
|
|
||||||
{
|
|
||||||
params = Params( maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, k );
|
|
||||||
}
|
|
||||||
|
|
||||||
void GoodFeaturesToTrackDetector::read (const FileNode& fn)
|
|
||||||
{
|
|
||||||
params.read(fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GoodFeaturesToTrackDetector::write (FileStorage& fs) const
|
|
||||||
{
|
|
||||||
params.write(fs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GoodFeaturesToTrackDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
|
|
||||||
{
|
{
|
||||||
Mat grayImage = image;
|
Mat grayImage = image;
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
||||||
|
|
||||||
vector<Point2f> corners;
|
vector<Point2f> corners;
|
||||||
goodFeaturesToTrack( grayImage, corners, params.maxCorners, params.qualityLevel, params.minDistance, mask,
|
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, mask,
|
||||||
params.blockSize, params.useHarrisDetector, params.k );
|
blockSize, useHarrisDetector, k );
|
||||||
keypoints.resize(corners.size());
|
keypoints.resize(corners.size());
|
||||||
vector<Point2f>::const_iterator corner_it = corners.begin();
|
vector<Point2f>::const_iterator corner_it = corners.begin();
|
||||||
vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
|
vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
|
||||||
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
|
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
|
||||||
{
|
{
|
||||||
*keypoint_it = KeyPoint( *corner_it, (float)params.blockSize );
|
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static Algorithm* createGFTT() { return new GFTTDetector; }
|
||||||
* MserFeatureDetector
|
static Algorithm* createHarris()
|
||||||
*/
|
|
||||||
MserFeatureDetector::MserFeatureDetector( int delta, int minArea, int maxArea,
|
|
||||||
double maxVariation, double minDiversity,
|
|
||||||
int maxEvolution, double areaThreshold,
|
|
||||||
double minMargin, int edgeBlurSize )
|
|
||||||
: mser( delta, minArea, maxArea, maxVariation, minDiversity,
|
|
||||||
maxEvolution, areaThreshold, minMargin, edgeBlurSize )
|
|
||||||
{}
|
|
||||||
|
|
||||||
MserFeatureDetector::MserFeatureDetector( CvMSERParams params )
|
|
||||||
: mser( params.delta, params.minArea, params.maxArea, params.maxVariation, params.minDiversity,
|
|
||||||
params.maxEvolution, params.areaThreshold, params.minMargin, params.edgeBlurSize )
|
|
||||||
{}
|
|
||||||
|
|
||||||
void MserFeatureDetector::read (const FileNode& fn)
|
|
||||||
{
|
{
|
||||||
int delta = fn["delta"];
|
GFTTDetector* d = new GFTTDetector;
|
||||||
int minArea = fn["minArea"];
|
d->set("useHarris", true);
|
||||||
int maxArea = fn["maxArea"];
|
return d;
|
||||||
float maxVariation = fn["maxVariation"];
|
|
||||||
float minDiversity = fn["minDiversity"];
|
|
||||||
int maxEvolution = fn["maxEvolution"];
|
|
||||||
double areaThreshold = fn["areaThreshold"];
|
|
||||||
double minMargin = fn["minMargin"];
|
|
||||||
int edgeBlurSize = fn["edgeBlurSize"];
|
|
||||||
|
|
||||||
mser = MSER( delta, minArea, maxArea, maxVariation, minDiversity,
|
|
||||||
maxEvolution, areaThreshold, minMargin, edgeBlurSize );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MserFeatureDetector::write (FileStorage& fs) const
|
static AlgorithmInfo gftt_info("Feature2D.GFTT", createGFTT);
|
||||||
|
static AlgorithmInfo harris_info("Feature2D.HARRIS", createHarris);
|
||||||
|
|
||||||
|
AlgorithmInfo* GFTTDetector::info() const
|
||||||
{
|
{
|
||||||
//fs << "algorithm" << getAlgorithmName ();
|
static volatile bool initialized = false;
|
||||||
|
if( !initialized )
|
||||||
fs << "delta" << mser.delta;
|
|
||||||
fs << "minArea" << mser.minArea;
|
|
||||||
fs << "maxArea" << mser.maxArea;
|
|
||||||
fs << "maxVariation" << mser.maxVariation;
|
|
||||||
fs << "minDiversity" << mser.minDiversity;
|
|
||||||
fs << "maxEvolution" << mser.maxEvolution;
|
|
||||||
fs << "areaThreshold" << mser.areaThreshold;
|
|
||||||
fs << "minMargin" << mser.minMargin;
|
|
||||||
fs << "edgeBlurSize" << mser.edgeBlurSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void MserFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
|
||||||
{
|
|
||||||
vector<vector<Point> > msers;
|
|
||||||
|
|
||||||
mser(image, msers, mask);
|
|
||||||
|
|
||||||
vector<vector<Point> >::const_iterator contour_it = msers.begin();
|
|
||||||
for( ; contour_it != msers.end(); ++contour_it )
|
|
||||||
{
|
{
|
||||||
// TODO check transformation from MSER region to KeyPoint
|
gftt_info.addParam(this, "nfeatures", nfeatures);
|
||||||
RotatedRect rect = fitEllipse(Mat(*contour_it));
|
gftt_info.addParam(this, "qualityLevel", qualityLevel);
|
||||||
float diam = sqrt(rect.size.height*rect.size.width);
|
gftt_info.addParam(this, "minDistance", minDistance);
|
||||||
|
gftt_info.addParam(this, "useHarrisDetector", useHarrisDetector);
|
||||||
if( diam > std::numeric_limits<float>::epsilon() )
|
gftt_info.addParam(this, "k", k);
|
||||||
keypoints.push_back( KeyPoint( rect.center, diam, rect.angle) );
|
|
||||||
|
harris_info.addParam(this, "nfeatures", nfeatures);
|
||||||
|
harris_info.addParam(this, "qualityLevel", qualityLevel);
|
||||||
|
harris_info.addParam(this, "minDistance", minDistance);
|
||||||
|
harris_info.addParam(this, "useHarrisDetector", useHarrisDetector);
|
||||||
|
harris_info.addParam(this, "k", k);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
}
|
}
|
||||||
}
|
return &gftt_info;
|
||||||
|
|
||||||
/*
|
|
||||||
* StarFeatureDetector
|
|
||||||
*/
|
|
||||||
|
|
||||||
StarFeatureDetector::StarFeatureDetector( const CvStarDetectorParams& params )
|
|
||||||
: star( params.maxSize, params.responseThreshold, params.lineThresholdProjected,
|
|
||||||
params.lineThresholdBinarized, params.suppressNonmaxSize)
|
|
||||||
{}
|
|
||||||
|
|
||||||
StarFeatureDetector::StarFeatureDetector(int maxSize, int responseThreshold,
|
|
||||||
int lineThresholdProjected,
|
|
||||||
int lineThresholdBinarized,
|
|
||||||
int suppressNonmaxSize)
|
|
||||||
: star( maxSize, responseThreshold, lineThresholdProjected,
|
|
||||||
lineThresholdBinarized, suppressNonmaxSize)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void StarFeatureDetector::read (const FileNode& fn)
|
|
||||||
{
|
|
||||||
int maxSize = fn["maxSize"];
|
|
||||||
int responseThreshold = fn["responseThreshold"];
|
|
||||||
int lineThresholdProjected = fn["lineThresholdProjected"];
|
|
||||||
int lineThresholdBinarized = fn["lineThresholdBinarized"];
|
|
||||||
int suppressNonmaxSize = fn["suppressNonmaxSize"];
|
|
||||||
|
|
||||||
star = StarDetector( maxSize, responseThreshold, lineThresholdProjected,
|
|
||||||
lineThresholdBinarized, suppressNonmaxSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
void StarFeatureDetector::write (FileStorage& fs) const
|
|
||||||
{
|
|
||||||
//fs << "algorithm" << getAlgorithmName ();
|
|
||||||
|
|
||||||
fs << "maxSize" << star.maxSize;
|
|
||||||
fs << "responseThreshold" << star.responseThreshold;
|
|
||||||
fs << "lineThresholdProjected" << star.lineThresholdProjected;
|
|
||||||
fs << "lineThresholdBinarized" << star.lineThresholdBinarized;
|
|
||||||
fs << "suppressNonmaxSize" << star.suppressNonmaxSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
void StarFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
|
||||||
{
|
|
||||||
Mat grayImage = image;
|
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
|
||||||
|
|
||||||
star(grayImage, keypoints);
|
|
||||||
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SiftFeatureDetector
|
|
||||||
*/
|
|
||||||
SiftFeatureDetector::SiftFeatureDetector( const SIFT::DetectorParams &detectorParams,
|
|
||||||
const SIFT::CommonParams &commonParams )
|
|
||||||
: sift(detectorParams.threshold, detectorParams.edgeThreshold,
|
|
||||||
commonParams.nOctaves, commonParams.nOctaveLayers, commonParams.firstOctave, commonParams.angleMode)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
SiftFeatureDetector::SiftFeatureDetector( double threshold, double edgeThreshold,
|
|
||||||
int nOctaves, int nOctaveLayers, int firstOctave, int angleMode ) :
|
|
||||||
sift(threshold, edgeThreshold, nOctaves, nOctaveLayers, firstOctave, angleMode)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void SiftFeatureDetector::read( const FileNode& fn )
|
|
||||||
{
|
|
||||||
double threshold = fn["threshold"];
|
|
||||||
double edgeThreshold = fn["edgeThreshold"];
|
|
||||||
int nOctaves = fn["nOctaves"];
|
|
||||||
int nOctaveLayers = fn["nOctaveLayers"];
|
|
||||||
int firstOctave = fn["firstOctave"];
|
|
||||||
int angleMode = fn["angleMode"];
|
|
||||||
|
|
||||||
sift = SIFT(threshold, edgeThreshold, nOctaves, nOctaveLayers, firstOctave, angleMode);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SiftFeatureDetector::write (FileStorage& fs) const
|
|
||||||
{
|
|
||||||
//fs << "algorithm" << getAlgorithmName ();
|
|
||||||
|
|
||||||
SIFT::CommonParams commParams = sift.getCommonParams ();
|
|
||||||
SIFT::DetectorParams detectorParams = sift.getDetectorParams ();
|
|
||||||
fs << "threshold" << detectorParams.threshold;
|
|
||||||
fs << "edgeThreshold" << detectorParams.edgeThreshold;
|
|
||||||
fs << "nOctaves" << commParams.nOctaves;
|
|
||||||
fs << "nOctaveLayers" << commParams.nOctaveLayers;
|
|
||||||
fs << "firstOctave" << commParams.firstOctave;
|
|
||||||
fs << "angleMode" << commParams.angleMode;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void SiftFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
|
||||||
{
|
|
||||||
Mat grayImage = image;
|
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
|
||||||
|
|
||||||
sift(grayImage, mask, keypoints);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SurfFeatureDetector
|
|
||||||
*/
|
|
||||||
SurfFeatureDetector::SurfFeatureDetector( double hessianThreshold, int octaves, int octaveLayers, bool upright )
|
|
||||||
: surf(hessianThreshold, octaves, octaveLayers, false, upright)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void SurfFeatureDetector::read (const FileNode& fn)
|
|
||||||
{
|
|
||||||
double hessianThreshold = fn["hessianThreshold"];
|
|
||||||
int octaves = fn["octaves"];
|
|
||||||
int octaveLayers = fn["octaveLayers"];
|
|
||||||
bool upright = (int)fn["upright"] != 0;
|
|
||||||
|
|
||||||
surf = SURF( hessianThreshold, octaves, octaveLayers, false, upright );
|
|
||||||
}
|
|
||||||
|
|
||||||
void SurfFeatureDetector::write (FileStorage& fs) const
|
|
||||||
{
|
|
||||||
//fs << "algorithm" << getAlgorithmName ();
|
|
||||||
|
|
||||||
fs << "hessianThreshold" << surf.hessianThreshold;
|
|
||||||
fs << "octaves" << surf.nOctaves;
|
|
||||||
fs << "octaveLayers" << surf.nOctaveLayers;
|
|
||||||
fs << "upright" << surf.upright;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SurfFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
|
||||||
{
|
|
||||||
Mat grayImage = image;
|
|
||||||
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
|
||||||
|
|
||||||
surf(grayImage, mask, keypoints);
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
/** Default constructor
|
|
||||||
* @param n_features the number of desired features
|
|
||||||
*/
|
|
||||||
OrbFeatureDetector::OrbFeatureDetector(size_t n_features, ORB::CommonParams params)
|
|
||||||
{
|
|
||||||
orb_ = ORB(n_features, params);
|
|
||||||
}
|
|
||||||
|
|
||||||
void OrbFeatureDetector::read(const FileNode& fn)
|
|
||||||
{
|
|
||||||
orb_.read(fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
void OrbFeatureDetector::write(FileStorage& fs) const
|
|
||||||
{
|
|
||||||
orb_.write(fs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void OrbFeatureDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask) const
|
|
||||||
{
|
|
||||||
orb_(image, mask, keypoints);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -477,7 +182,7 @@ void OrbFeatureDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
|
|||||||
/*
|
/*
|
||||||
* DenseFeatureDetector
|
* DenseFeatureDetector
|
||||||
*/
|
*/
|
||||||
DenseFeatureDetector::Params::Params( float _initFeatureScale, int _featureScaleLevels,
|
DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
|
||||||
float _featureScaleMul, int _initXyStep,
|
float _featureScaleMul, int _initXyStep,
|
||||||
int _initImgBound, bool _varyXyStepWithScale,
|
int _initImgBound, bool _varyXyStepWithScale,
|
||||||
bool _varyImgBoundWithScale ) :
|
bool _varyImgBoundWithScale ) :
|
||||||
@ -486,51 +191,13 @@ DenseFeatureDetector::Params::Params( float _initFeatureScale, int _featureScale
|
|||||||
varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
|
varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void DenseFeatureDetector::Params::read( const FileNode& fn )
|
|
||||||
{
|
|
||||||
initFeatureScale = fn["initFeatureScale"];
|
|
||||||
featureScaleLevels = fn["featureScaleLevels"];
|
|
||||||
featureScaleMul = fn["featureScaleMul"];
|
|
||||||
|
|
||||||
initXyStep = fn["initXyStep"];
|
|
||||||
initImgBound = fn["initImgBound"];
|
|
||||||
|
|
||||||
varyXyStepWithScale = (int)fn["varyXyStepWithScale"] != 0 ? true : false;
|
|
||||||
varyImgBoundWithScale = (int)fn["varyImgBoundWithScale"] != 0 ? true : false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void DenseFeatureDetector::Params::write( FileStorage& fs ) const
|
|
||||||
{
|
|
||||||
fs << "initFeatureScale" << initFeatureScale;
|
|
||||||
fs << "featureScaleLevels" << featureScaleLevels;
|
|
||||||
fs << "featureScaleMul" << featureScaleMul;
|
|
||||||
|
|
||||||
fs << "initXyStep" << initXyStep;
|
|
||||||
fs << "initImgBound" << initImgBound;
|
|
||||||
|
|
||||||
fs << "varyXyStepWithScale" << (int)varyXyStepWithScale;
|
|
||||||
fs << "varyImgBoundWithScale" << (int)varyImgBoundWithScale;
|
|
||||||
}
|
|
||||||
|
|
||||||
DenseFeatureDetector::DenseFeatureDetector(const DenseFeatureDetector::Params &_params) : params(_params)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void DenseFeatureDetector::read( const FileNode &fn )
|
|
||||||
{
|
|
||||||
params.read(fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
void DenseFeatureDetector::write( FileStorage &fs ) const
|
|
||||||
{
|
|
||||||
params.write(fs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void DenseFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
void DenseFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
||||||
{
|
{
|
||||||
float curScale = params.initFeatureScale;
|
float curScale = initFeatureScale;
|
||||||
int curStep = params.initXyStep;
|
int curStep = initXyStep;
|
||||||
int curBound = params.initImgBound;
|
int curBound = initImgBound;
|
||||||
for( int curLevel = 0; curLevel < params.featureScaleLevels; curLevel++ )
|
for( int curLevel = 0; curLevel < featureScaleLevels; curLevel++ )
|
||||||
{
|
{
|
||||||
for( int x = curBound; x < image.cols - curBound; x += curStep )
|
for( int x = curBound; x < image.cols - curBound; x += curStep )
|
||||||
{
|
{
|
||||||
@ -540,13 +207,35 @@ void DenseFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
curScale = curScale * params.featureScaleMul;
|
curScale = curScale * featureScaleMul;
|
||||||
if( params.varyXyStepWithScale ) curStep = static_cast<int>( curStep * params.featureScaleMul + 0.5f );
|
if( varyXyStepWithScale ) curStep = static_cast<int>( curStep * featureScaleMul + 0.5f );
|
||||||
if( params.varyImgBoundWithScale ) curBound = static_cast<int>( curBound * params.featureScaleMul + 0.5f );
|
if( varyImgBoundWithScale ) curBound = static_cast<int>( curBound * featureScaleMul + 0.5f );
|
||||||
}
|
}
|
||||||
|
|
||||||
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static Algorithm* createDense() { return new DenseFeatureDetector; }
|
||||||
|
|
||||||
|
AlgorithmInfo* DenseFeatureDetector::info() const
|
||||||
|
{
|
||||||
|
static AlgorithmInfo info_("Feature2D.Dense", createDense);
|
||||||
|
static volatile bool initialized = false;
|
||||||
|
if( !initialized )
|
||||||
|
{
|
||||||
|
info_.addParam(this, "initFeatureScale", initFeatureScale);
|
||||||
|
info_.addParam(this, "featureScaleLevels", featureScaleLevels);
|
||||||
|
info_.addParam(this, "featureScaleMul", featureScaleMul);
|
||||||
|
info_.addParam(this, "initXyStep", initXyStep);
|
||||||
|
info_.addParam(this, "initImgBound", initImgBound);
|
||||||
|
info_.addParam(this, "varyXyStepWithScale", varyXyStepWithScale);
|
||||||
|
info_.addParam(this, "varyImgBoundWithScale", varyImgBoundWithScale);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
return &info_;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GridAdaptedFeatureDetector
|
* GridAdaptedFeatureDetector
|
||||||
|
@ -169,8 +169,9 @@ SurfAdjuster::SurfAdjuster( double initial_thresh, double min_thresh, double max
|
|||||||
|
|
||||||
void SurfAdjuster::detectImpl(const Mat& image, vector<KeyPoint>& keypoints, const cv::Mat& mask) const
|
void SurfAdjuster::detectImpl(const Mat& image, vector<KeyPoint>& keypoints, const cv::Mat& mask) const
|
||||||
{
|
{
|
||||||
SurfFeatureDetector detector_tmp(thresh_);
|
Ptr<FeatureDetector> surf = FeatureDetector::create("SURF");
|
||||||
detector_tmp.detect(image, keypoints, mask);
|
surf->set("hessianThreshold", thresh_);
|
||||||
|
surf->detect(image, keypoints, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SurfAdjuster::tooFew(int, int)
|
void SurfAdjuster::tooFew(int, int)
|
||||||
|
@ -185,11 +185,10 @@ static int cornerScore(const uchar* ptr, const int pixel[], int threshold)
|
|||||||
return threshold;
|
return threshold;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
void FAST(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression)
|
||||||
void cv::FAST(const Mat& img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression)
|
|
||||||
{
|
{
|
||||||
|
Mat img = _img.getMat();
|
||||||
const int K = 8, N = 16 + K + 1;
|
const int K = 8, N = 16 + K + 1;
|
||||||
int i, j, k, pixel[N];
|
int i, j, k, pixel[N];
|
||||||
makeOffsets(pixel, img.step);
|
makeOffsets(pixel, img.step);
|
||||||
@ -280,7 +279,7 @@ void cv::FAST(const Mat& img, std::vector<KeyPoint>& keypoints, int threshold, b
|
|||||||
{
|
{
|
||||||
cornerpos[ncorners++] = j+k;
|
cornerpos[ncorners++] = j+k;
|
||||||
if(nonmax_suppression)
|
if(nonmax_suppression)
|
||||||
curr[j+k] = (uchar)cornerScore(ptr+k, pixel, threshold);
|
curr[j+k] = cornerScore(ptr+k, pixel, threshold);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -318,7 +317,7 @@ void cv::FAST(const Mat& img, std::vector<KeyPoint>& keypoints, int threshold, b
|
|||||||
{
|
{
|
||||||
cornerpos[ncorners++] = j;
|
cornerpos[ncorners++] = j;
|
||||||
if(nonmax_suppression)
|
if(nonmax_suppression)
|
||||||
curr[j] = (uchar)cornerScore(ptr, pixel, threshold);
|
curr[j] = cornerScore(ptr, pixel, threshold);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -340,7 +339,7 @@ void cv::FAST(const Mat& img, std::vector<KeyPoint>& keypoints, int threshold, b
|
|||||||
{
|
{
|
||||||
cornerpos[ncorners++] = j;
|
cornerpos[ncorners++] = j;
|
||||||
if(nonmax_suppression)
|
if(nonmax_suppression)
|
||||||
curr[j] = (uchar)cornerScore(ptr, pixel, threshold);
|
curr[j] = cornerScore(ptr, pixel, threshold);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -375,3 +374,38 @@ void cv::FAST(const Mat& img, std::vector<KeyPoint>& keypoints, int threshold, b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FastFeatureDetector
|
||||||
|
*/
|
||||||
|
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression )
|
||||||
|
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression)
|
||||||
|
{}
|
||||||
|
|
||||||
|
void FastFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
||||||
|
{
|
||||||
|
Mat grayImage = image;
|
||||||
|
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
||||||
|
FAST( grayImage, keypoints, threshold, nonmaxSuppression );
|
||||||
|
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static Algorithm* createFAST() { return new FastFeatureDetector; }
|
||||||
|
static AlgorithmInfo fast_info("Feature2D.FAST", createFAST);
|
||||||
|
|
||||||
|
AlgorithmInfo* FastFeatureDetector::info() const
|
||||||
|
{
|
||||||
|
static volatile bool initialized = false;
|
||||||
|
if( !initialized )
|
||||||
|
{
|
||||||
|
fast_info.addParam(this, "threshold", threshold);
|
||||||
|
fast_info.addParam(this, "nonmaxSuppression", nonmaxSuppression);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
return &fast_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
@ -165,6 +165,52 @@ float KeyPoint::overlap( const KeyPoint& kp1, const KeyPoint& kp2 )
|
|||||||
|
|
||||||
return ovrl;
|
return ovrl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct KeypointResponseGreaterThanThreshold
|
||||||
|
{
|
||||||
|
KeypointResponseGreaterThanThreshold(float _value) :
|
||||||
|
value(_value)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
inline bool operator()(const KeyPoint& kpt) const
|
||||||
|
{
|
||||||
|
return kpt.response >= value;
|
||||||
|
}
|
||||||
|
float value;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct KeypointResponseGreater
|
||||||
|
{
|
||||||
|
inline bool operator()(const KeyPoint& kp1, const KeyPoint& kp2) const
|
||||||
|
{
|
||||||
|
return kp1.response > kp2.response;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// takes keypoints and culls them by the response
|
||||||
|
void KeyPointsFilter::retainBest(vector<KeyPoint>& keypoints, int n_points)
|
||||||
|
{
|
||||||
|
//this is only necessary if the keypoints size is greater than the number of desired points.
|
||||||
|
if( n_points > 0 && keypoints.size() > (size_t)n_points )
|
||||||
|
{
|
||||||
|
if (n_points==0)
|
||||||
|
{
|
||||||
|
keypoints.clear();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
//first use nth element to partition the keypoints into the best and worst.
|
||||||
|
std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
|
||||||
|
//this is the boundary response, and in the case of FAST may be ambigous
|
||||||
|
float ambiguous_response = keypoints[n_points - 1].response;
|
||||||
|
//use std::partition to grab all of the keypoints with the boundary response.
|
||||||
|
vector<KeyPoint>::const_iterator new_end =
|
||||||
|
std::partition(keypoints.begin() + n_points, keypoints.end(),
|
||||||
|
KeypointResponseGreaterThanThreshold(ambiguous_response));
|
||||||
|
//resize the keypoints, given this new end point. nth_element and partition reordered the points inplace
|
||||||
|
keypoints.resize(new_end - keypoints.begin());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct RoiPredicate
|
struct RoiPredicate
|
||||||
{
|
{
|
||||||
|
@ -314,6 +314,144 @@ bool DescriptorMatcher::isMaskedOut( const vector<Mat>& masks, int queryIdx )
|
|||||||
return !masks.empty() && outCount == masks.size() ;
|
return !masks.empty() && outCount == masks.size() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
BFMatcher::BFMatcher( int _normType, bool _crossCheck )
|
||||||
|
{
|
||||||
|
normType = _normType;
|
||||||
|
crossCheck = _crossCheck;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ptr<DescriptorMatcher> BFMatcher::clone( bool emptyTrainData ) const
|
||||||
|
{
|
||||||
|
BFMatcher* matcher = new BFMatcher(normType, crossCheck);
|
||||||
|
if( !emptyTrainData )
|
||||||
|
{
|
||||||
|
matcher->trainDescCollection.resize(trainDescCollection.size());
|
||||||
|
std::transform( trainDescCollection.begin(), trainDescCollection.end(),
|
||||||
|
matcher->trainDescCollection.begin(), clone_op );
|
||||||
|
}
|
||||||
|
return matcher;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void BFMatcher::knnMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, int knn,
|
||||||
|
const vector<Mat>& masks, bool compactResult )
|
||||||
|
{
|
||||||
|
const int IMGIDX_SHIFT = 18;
|
||||||
|
const int IMGIDX_ONE = (1 << IMGIDX_SHIFT);
|
||||||
|
|
||||||
|
if( queryDescriptors.empty() || trainDescCollection.empty() )
|
||||||
|
{
|
||||||
|
matches.clear();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
CV_Assert( queryDescriptors.type() == trainDescCollection[0].type() );
|
||||||
|
|
||||||
|
matches.reserve(queryDescriptors.rows);
|
||||||
|
|
||||||
|
Mat dist, nidx;
|
||||||
|
|
||||||
|
int iIdx, imgCount = (int)trainDescCollection.size(), update = 0;
|
||||||
|
int dtype = normType == NORM_HAMMING || normType == NORM_HAMMING2 ||
|
||||||
|
(normType == NORM_L1 && queryDescriptors.type() == CV_8U) ? CV_32S : CV_32F;
|
||||||
|
|
||||||
|
CV_Assert( (int64)imgCount*IMGIDX_ONE < INT_MAX );
|
||||||
|
|
||||||
|
for( iIdx = 0; iIdx < imgCount; iIdx++ )
|
||||||
|
{
|
||||||
|
CV_Assert( trainDescCollection[iIdx].rows < IMGIDX_ONE );
|
||||||
|
batchDistance(queryDescriptors, trainDescCollection[iIdx], dist, dtype, nidx,
|
||||||
|
normType, knn, masks.empty() ? Mat() : masks[iIdx], update, crossCheck);
|
||||||
|
update += IMGIDX_ONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if( dtype == CV_32S )
|
||||||
|
{
|
||||||
|
Mat temp;
|
||||||
|
dist.convertTo(temp, CV_32F);
|
||||||
|
dist = temp;
|
||||||
|
}
|
||||||
|
|
||||||
|
for( int qIdx = 0; qIdx < queryDescriptors.rows; qIdx++ )
|
||||||
|
{
|
||||||
|
const float* distptr = dist.ptr<float>(qIdx);
|
||||||
|
const int* nidxptr = nidx.ptr<int>(qIdx);
|
||||||
|
|
||||||
|
matches.push_back( vector<DMatch>() );
|
||||||
|
vector<DMatch>& mq = matches.back();
|
||||||
|
mq.reserve(knn);
|
||||||
|
|
||||||
|
for( int k = 0; k < knn; k++ )
|
||||||
|
{
|
||||||
|
if( nidxptr[k] < 0 )
|
||||||
|
break;
|
||||||
|
mq.push_back( DMatch(qIdx, nidxptr[k] & (IMGIDX_ONE - 1),
|
||||||
|
nidxptr[k] >> IMGIDX_SHIFT, distptr[k]) );
|
||||||
|
}
|
||||||
|
|
||||||
|
if( mq.empty() && compactResult )
|
||||||
|
matches.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void BFMatcher::radiusMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches,
|
||||||
|
float maxDistance, const vector<Mat>& masks, bool compactResult )
|
||||||
|
{
|
||||||
|
if( queryDescriptors.empty() || trainDescCollection.empty() )
|
||||||
|
{
|
||||||
|
matches.clear();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
CV_Assert( queryDescriptors.type() == trainDescCollection[0].type() );
|
||||||
|
|
||||||
|
matches.resize(queryDescriptors.rows);
|
||||||
|
Mat dist, distf;
|
||||||
|
|
||||||
|
int iIdx, imgCount = (int)trainDescCollection.size();
|
||||||
|
int dtype = normType == NORM_HAMMING ||
|
||||||
|
(normType == NORM_L1 && queryDescriptors.type() == CV_8U) ? CV_32S : CV_32F;
|
||||||
|
|
||||||
|
for( iIdx = 0; iIdx < imgCount; iIdx++ )
|
||||||
|
{
|
||||||
|
batchDistance(queryDescriptors, trainDescCollection[iIdx], dist, dtype, noArray(),
|
||||||
|
normType, 0, masks.empty() ? Mat() : masks[iIdx], 0, false);
|
||||||
|
if( dtype == CV_32S )
|
||||||
|
dist.convertTo(distf, CV_32F);
|
||||||
|
else
|
||||||
|
distf = dist;
|
||||||
|
|
||||||
|
for( int qIdx = 0; qIdx < queryDescriptors.rows; qIdx++ )
|
||||||
|
{
|
||||||
|
const float* distptr = dist.ptr<float>(qIdx);
|
||||||
|
|
||||||
|
vector<DMatch>& mq = matches[qIdx];
|
||||||
|
for( int k = 0; k < dist.cols; k++ )
|
||||||
|
{
|
||||||
|
if( distptr[k] <= maxDistance )
|
||||||
|
mq.push_back( DMatch(qIdx, k, iIdx, distptr[k]) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int qIdx0 = 0;
|
||||||
|
for( int qIdx = 0; qIdx < queryDescriptors.rows; qIdx++ )
|
||||||
|
{
|
||||||
|
if( matches[qIdx].empty() && compactResult )
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if( qIdx0 < qIdx )
|
||||||
|
std::swap(matches[qIdx], matches[qIdx0]);
|
||||||
|
|
||||||
|
std::sort( matches[qIdx0].begin(), matches[qIdx0].end() );
|
||||||
|
qIdx0++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Factory function for DescriptorMatcher creating
|
* Factory function for DescriptorMatcher creating
|
||||||
*/
|
*/
|
||||||
@ -326,31 +464,24 @@ Ptr<DescriptorMatcher> DescriptorMatcher::create( const string& descriptorMatche
|
|||||||
}
|
}
|
||||||
else if( !descriptorMatcherType.compare( "BruteForce" ) ) // L2
|
else if( !descriptorMatcherType.compare( "BruteForce" ) ) // L2
|
||||||
{
|
{
|
||||||
dm = new BruteForceMatcher<L2<float> >();
|
dm = new BFMatcher(NORM_L2);
|
||||||
}
|
}
|
||||||
else if( !descriptorMatcherType.compare( "BruteForce-SL2" ) ) // Squared L2
|
else if( !descriptorMatcherType.compare( "BruteForce-SL2" ) ) // Squared L2
|
||||||
{
|
{
|
||||||
dm = new BruteForceMatcher<SL2<float> >();
|
dm = new BFMatcher(NORM_L2SQR);
|
||||||
}
|
}
|
||||||
else if( !descriptorMatcherType.compare( "BruteForce-L1" ) )
|
else if( !descriptorMatcherType.compare( "BruteForce-L1" ) )
|
||||||
{
|
{
|
||||||
dm = new BruteForceMatcher<L1<float> >();
|
dm = new BFMatcher(NORM_L1);
|
||||||
}
|
}
|
||||||
else if( !descriptorMatcherType.compare("BruteForce-Hamming") )
|
else if( !descriptorMatcherType.compare("BruteForce-Hamming") ||
|
||||||
|
!descriptorMatcherType.compare("BruteForce-HammingLUT") )
|
||||||
{
|
{
|
||||||
dm = new BruteForceMatcher<Hamming>();
|
dm = new BFMatcher(NORM_HAMMING);
|
||||||
}
|
|
||||||
else if( !descriptorMatcherType.compare("BruteForce-HammingLUT") )
|
|
||||||
{
|
|
||||||
dm = new BruteForceMatcher<Hamming>();
|
|
||||||
}
|
}
|
||||||
else if( !descriptorMatcherType.compare("BruteForce-Hamming(2)") )
|
else if( !descriptorMatcherType.compare("BruteForce-Hamming(2)") )
|
||||||
{
|
{
|
||||||
dm = new BruteForceMatcher<HammingMultilevel<2> >();
|
dm = new BFMatcher(NORM_HAMMING2);
|
||||||
}
|
|
||||||
else if( !descriptorMatcherType.compare("BruteForce-Hamming(4)") )
|
|
||||||
{
|
|
||||||
dm = new BruteForceMatcher<HammingMultilevel<4> >();
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
CV_Error( CV_StsBadArg, "Unknown matcher name" );
|
CV_Error( CV_StsBadArg, "Unknown matcher name" );
|
||||||
@ -358,199 +489,6 @@ Ptr<DescriptorMatcher> DescriptorMatcher::create( const string& descriptorMatche
|
|||||||
return dm;
|
return dm;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* BruteForce SL2 and L2 specialization
|
|
||||||
*/
|
|
||||||
template<>
|
|
||||||
void BruteForceMatcher<SL2<float> >::knnMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, int knn,
|
|
||||||
const vector<Mat>& masks, bool compactResult )
|
|
||||||
{
|
|
||||||
#ifndef HAVE_EIGEN
|
|
||||||
commonKnnMatchImpl( *this, queryDescriptors, matches, knn, masks, compactResult );
|
|
||||||
#else
|
|
||||||
CV_Assert( queryDescriptors.type() == CV_32FC1 || queryDescriptors.empty() );
|
|
||||||
CV_Assert( masks.empty() || masks.size() == trainDescCollection.size() );
|
|
||||||
|
|
||||||
matches.reserve(queryDescriptors.rows);
|
|
||||||
size_t imgCount = trainDescCollection.size();
|
|
||||||
|
|
||||||
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> e_query_t;
|
|
||||||
vector<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> > e_trainCollection(trainDescCollection.size());
|
|
||||||
vector<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> > e_trainNorms2(trainDescCollection.size());
|
|
||||||
cv2eigen( queryDescriptors.t(), e_query_t);
|
|
||||||
for( size_t i = 0; i < trainDescCollection.size(); i++ )
|
|
||||||
{
|
|
||||||
cv2eigen( trainDescCollection[i], e_trainCollection[i] );
|
|
||||||
e_trainNorms2[i] = e_trainCollection[i].rowwise().squaredNorm() / 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
vector<Eigen::Matrix<float, Eigen::Dynamic, 1> > e_allDists( imgCount ); // distances between one query descriptor and all train descriptors
|
|
||||||
|
|
||||||
for( int qIdx = 0; qIdx < queryDescriptors.rows; qIdx++ )
|
|
||||||
{
|
|
||||||
if( isMaskedOut( masks, qIdx ) )
|
|
||||||
{
|
|
||||||
if( !compactResult ) // push empty vector
|
|
||||||
matches.push_back( vector<DMatch>() );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
float queryNorm2 = e_query_t.col(qIdx).squaredNorm();
|
|
||||||
// 1. compute distances between i-th query descriptor and all train descriptors
|
|
||||||
for( size_t iIdx = 0; iIdx < imgCount; iIdx++ )
|
|
||||||
{
|
|
||||||
CV_Assert( masks.empty() || masks[iIdx].empty() ||
|
|
||||||
( masks[iIdx].rows == queryDescriptors.rows && masks[iIdx].cols == trainDescCollection[iIdx].rows &&
|
|
||||||
masks[iIdx].type() == CV_8UC1 ) );
|
|
||||||
CV_Assert( trainDescCollection[iIdx].type() == CV_32FC1 || trainDescCollection[iIdx].empty() );
|
|
||||||
CV_Assert( queryDescriptors.cols == trainDescCollection[iIdx].cols );
|
|
||||||
|
|
||||||
e_allDists[iIdx] = e_trainCollection[iIdx] *e_query_t.col(qIdx);
|
|
||||||
e_allDists[iIdx] -= e_trainNorms2[iIdx];
|
|
||||||
|
|
||||||
if( !masks.empty() && !masks[iIdx].empty() )
|
|
||||||
{
|
|
||||||
const uchar* maskPtr = (uchar*)masks[iIdx].ptr(qIdx);
|
|
||||||
for( int c = 0; c < masks[iIdx].cols; c++ )
|
|
||||||
{
|
|
||||||
if( maskPtr[c] == 0 )
|
|
||||||
e_allDists[iIdx](c) = -std::numeric_limits<float>::max();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. choose knn nearest matches for query[i]
|
|
||||||
matches.push_back( vector<DMatch>() );
|
|
||||||
vector<vector<DMatch> >::reverse_iterator curMatches = matches.rbegin();
|
|
||||||
for( int k = 0; k < knn; k++ )
|
|
||||||
{
|
|
||||||
float totalMaxCoeff = -std::numeric_limits<float>::max();
|
|
||||||
int bestTrainIdx = -1, bestImgIdx = -1;
|
|
||||||
for( size_t iIdx = 0; iIdx < imgCount; iIdx++ )
|
|
||||||
{
|
|
||||||
int loc;
|
|
||||||
float curMaxCoeff = e_allDists[iIdx].maxCoeff( &loc );
|
|
||||||
if( curMaxCoeff > totalMaxCoeff )
|
|
||||||
{
|
|
||||||
totalMaxCoeff = curMaxCoeff;
|
|
||||||
bestTrainIdx = loc;
|
|
||||||
bestImgIdx = iIdx;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if( bestTrainIdx == -1 )
|
|
||||||
break;
|
|
||||||
|
|
||||||
e_allDists[bestImgIdx](bestTrainIdx) = -std::numeric_limits<float>::max();
|
|
||||||
curMatches->push_back( DMatch(qIdx, bestTrainIdx, bestImgIdx, (-2)*totalMaxCoeff + queryNorm2) );
|
|
||||||
}
|
|
||||||
std::sort( curMatches->begin(), curMatches->end() );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
template<>
|
|
||||||
void BruteForceMatcher<SL2<float> >::radiusMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance,
|
|
||||||
const vector<Mat>& masks, bool compactResult )
|
|
||||||
{
|
|
||||||
#ifndef HAVE_EIGEN
|
|
||||||
commonRadiusMatchImpl( *this, queryDescriptors, matches, maxDistance, masks, compactResult );
|
|
||||||
#else
|
|
||||||
CV_Assert( queryDescriptors.type() == CV_32FC1 || queryDescriptors.empty() );
|
|
||||||
CV_Assert( masks.empty() || masks.size() == trainDescCollection.size() );
|
|
||||||
|
|
||||||
matches.reserve(queryDescriptors.rows);
|
|
||||||
size_t imgCount = trainDescCollection.size();
|
|
||||||
|
|
||||||
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> e_query_t;
|
|
||||||
vector<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> > e_trainCollection(trainDescCollection.size());
|
|
||||||
vector<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> > e_trainNorms2(trainDescCollection.size());
|
|
||||||
cv2eigen( queryDescriptors.t(), e_query_t);
|
|
||||||
for( size_t i = 0; i < trainDescCollection.size(); i++ )
|
|
||||||
{
|
|
||||||
cv2eigen( trainDescCollection[i], e_trainCollection[i] );
|
|
||||||
e_trainNorms2[i] = e_trainCollection[i].rowwise().squaredNorm() / 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
vector<Eigen::Matrix<float, Eigen::Dynamic, 1> > e_allDists( imgCount ); // distances between one query descriptor and all train descriptors
|
|
||||||
|
|
||||||
for( int qIdx = 0; qIdx < queryDescriptors.rows; qIdx++ )
|
|
||||||
{
|
|
||||||
if( isMaskedOut( masks, qIdx ) )
|
|
||||||
{
|
|
||||||
if( !compactResult ) // push empty vector
|
|
||||||
matches.push_back( vector<DMatch>() );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
float queryNorm2 = e_query_t.col(qIdx).squaredNorm();
|
|
||||||
// 1. compute distances between i-th query descriptor and all train descriptors
|
|
||||||
for( size_t iIdx = 0; iIdx < imgCount; iIdx++ )
|
|
||||||
{
|
|
||||||
CV_Assert( masks.empty() || masks[iIdx].empty() ||
|
|
||||||
( masks[iIdx].rows == queryDescriptors.rows && masks[iIdx].cols == trainDescCollection[iIdx].rows &&
|
|
||||||
masks[iIdx].type() == CV_8UC1 ) );
|
|
||||||
CV_Assert( trainDescCollection[iIdx].type() == CV_32FC1 || trainDescCollection[iIdx].empty() );
|
|
||||||
CV_Assert( queryDescriptors.cols == trainDescCollection[iIdx].cols );
|
|
||||||
|
|
||||||
e_allDists[iIdx] = e_trainCollection[iIdx] *e_query_t.col(qIdx);
|
|
||||||
e_allDists[iIdx] -= e_trainNorms2[iIdx];
|
|
||||||
}
|
|
||||||
|
|
||||||
matches.push_back( vector<DMatch>() );
|
|
||||||
vector<vector<DMatch> >::reverse_iterator curMatches = matches.rbegin();
|
|
||||||
for( size_t iIdx = 0; iIdx < imgCount; iIdx++ )
|
|
||||||
{
|
|
||||||
assert( e_allDists[iIdx].rows() == trainDescCollection[iIdx].rows );
|
|
||||||
for( int tIdx = 0; tIdx < e_allDists[iIdx].rows(); tIdx++ )
|
|
||||||
{
|
|
||||||
if( masks.empty() || isPossibleMatch(masks[iIdx], qIdx, tIdx) )
|
|
||||||
{
|
|
||||||
float d = (-2)*e_allDists[iIdx](tIdx) + queryNorm2;
|
|
||||||
if( d < maxDistance )
|
|
||||||
curMatches->push_back( DMatch( qIdx, tIdx, iIdx, d ) );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::sort( curMatches->begin(), curMatches->end() );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void sqrtDistance( vector<vector<DMatch> >& matches )
|
|
||||||
{
|
|
||||||
for( size_t imgIdx = 0; imgIdx < matches.size(); imgIdx++ )
|
|
||||||
{
|
|
||||||
for( size_t matchIdx = 0; matchIdx < matches[imgIdx].size(); matchIdx++ )
|
|
||||||
{
|
|
||||||
matches[imgIdx][matchIdx].distance = std::sqrt( matches[imgIdx][matchIdx].distance );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template<>
|
|
||||||
void BruteForceMatcher<L2<float> >::knnMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, int knn,
|
|
||||||
const vector<Mat>& masks, bool compactResult )
|
|
||||||
{
|
|
||||||
BruteForceMatcher<SL2<float> > matcherSL2;
|
|
||||||
matcherSL2.add( getTrainDescriptors() );
|
|
||||||
matcherSL2.knnMatch( queryDescriptors, matches, knn, masks, compactResult );
|
|
||||||
|
|
||||||
sqrtDistance( matches );
|
|
||||||
}
|
|
||||||
|
|
||||||
template<>
|
|
||||||
void BruteForceMatcher<L2<float> >::radiusMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance,
|
|
||||||
const vector<Mat>& masks, bool compactResult )
|
|
||||||
{
|
|
||||||
const float maxDistance2 = maxDistance * maxDistance;
|
|
||||||
BruteForceMatcher<SL2<float> > matcherSL2;
|
|
||||||
matcherSL2.add( getTrainDescriptors() );
|
|
||||||
matcherSL2.radiusMatch( queryDescriptors, matches, maxDistance2, masks, compactResult );
|
|
||||||
|
|
||||||
sqrtDistance( matches );
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flann based matcher
|
* Flann based matcher
|
||||||
@ -1124,16 +1062,9 @@ bool GenericDescriptorMatcher::empty() const
|
|||||||
Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::create( const string& genericDescritptorMatcherType,
|
Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::create( const string& genericDescritptorMatcherType,
|
||||||
const string ¶msFilename )
|
const string ¶msFilename )
|
||||||
{
|
{
|
||||||
Ptr<GenericDescriptorMatcher> descriptorMatcher;
|
Ptr<GenericDescriptorMatcher> descriptorMatcher =
|
||||||
if( ! genericDescritptorMatcherType.compare("ONEWAY") )
|
Algorithm::create<GenericDescriptorMatcher>("DescriptorMatcher." + genericDescritptorMatcherType);
|
||||||
{
|
|
||||||
descriptorMatcher = new OneWayDescriptorMatcher();
|
|
||||||
}
|
|
||||||
else if( ! genericDescritptorMatcherType.compare("FERN") )
|
|
||||||
{
|
|
||||||
descriptorMatcher = new FernDescriptorMatcher();
|
|
||||||
}
|
|
||||||
|
|
||||||
if( !paramsFilename.empty() && !descriptorMatcher.empty() )
|
if( !paramsFilename.empty() && !descriptorMatcher.empty() )
|
||||||
{
|
{
|
||||||
FileStorage fs = FileStorage( paramsFilename, FileStorage::READ );
|
FileStorage fs = FileStorage( paramsFilename, FileStorage::READ );
|
||||||
@ -1146,331 +1077,6 @@ Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::create( const string& ge
|
|||||||
return descriptorMatcher;
|
return descriptorMatcher;
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* OneWayDescriptorMatcher *
|
|
||||||
\****************************************************************************************/
|
|
||||||
|
|
||||||
OneWayDescriptorMatcher::Params::Params( int _poseCount, Size _patchSize, string _pcaFilename,
|
|
||||||
string _trainPath, string _trainImagesList,
|
|
||||||
float _minScale, float _maxScale, float _stepScale ) :
|
|
||||||
poseCount(_poseCount), patchSize(_patchSize), pcaFilename(_pcaFilename),
|
|
||||||
trainPath(_trainPath), trainImagesList(_trainImagesList),
|
|
||||||
minScale(_minScale), maxScale(_maxScale), stepScale(_stepScale)
|
|
||||||
{}
|
|
||||||
|
|
||||||
|
|
||||||
OneWayDescriptorMatcher::OneWayDescriptorMatcher( const Params& _params)
|
|
||||||
{
|
|
||||||
initialize(_params);
|
|
||||||
}
|
|
||||||
|
|
||||||
OneWayDescriptorMatcher::~OneWayDescriptorMatcher()
|
|
||||||
{}
|
|
||||||
|
|
||||||
void OneWayDescriptorMatcher::initialize( const Params& _params, const Ptr<OneWayDescriptorBase>& _base )
|
|
||||||
{
|
|
||||||
clear();
|
|
||||||
|
|
||||||
if( _base.empty() )
|
|
||||||
base = _base;
|
|
||||||
|
|
||||||
params = _params;
|
|
||||||
}
|
|
||||||
|
|
||||||
void OneWayDescriptorMatcher::clear()
|
|
||||||
{
|
|
||||||
GenericDescriptorMatcher::clear();
|
|
||||||
|
|
||||||
prevTrainCount = 0;
|
|
||||||
if( !base.empty() )
|
|
||||||
base->clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void OneWayDescriptorMatcher::train()
|
|
||||||
{
|
|
||||||
if( base.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
|
|
||||||
{
|
|
||||||
base = new OneWayDescriptorObject( params.patchSize, params.poseCount, params.pcaFilename,
|
|
||||||
params.trainPath, params.trainImagesList, params.minScale, params.maxScale, params.stepScale );
|
|
||||||
|
|
||||||
base->Allocate( (int)trainPointCollection.keypointCount() );
|
|
||||||
prevTrainCount = (int)trainPointCollection.keypointCount();
|
|
||||||
|
|
||||||
const vector<vector<KeyPoint> >& points = trainPointCollection.getKeypoints();
|
|
||||||
int count = 0;
|
|
||||||
for( size_t i = 0; i < points.size(); i++ )
|
|
||||||
{
|
|
||||||
IplImage _image = trainPointCollection.getImage((int)i);
|
|
||||||
for( size_t j = 0; j < points[i].size(); j++ )
|
|
||||||
base->InitializeDescriptor( count++, &_image, points[i][j], "" );
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(_KDTREE)
|
|
||||||
base->ConvertDescriptorsArrayToTree();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool OneWayDescriptorMatcher::isMaskSupported()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void OneWayDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
|
||||||
vector<vector<DMatch> >& matches, int knn,
|
|
||||||
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
|
||||||
{
|
|
||||||
train();
|
|
||||||
|
|
||||||
CV_Assert( knn == 1 ); // knn > 1 unsupported because of bug in OneWayDescriptorBase for this case
|
|
||||||
|
|
||||||
matches.resize( queryKeypoints.size() );
|
|
||||||
IplImage _qimage = queryImage;
|
|
||||||
for( size_t i = 0; i < queryKeypoints.size(); i++ )
|
|
||||||
{
|
|
||||||
int descIdx = -1, poseIdx = -1;
|
|
||||||
float distance;
|
|
||||||
base->FindDescriptor( &_qimage, queryKeypoints[i].pt, descIdx, poseIdx, distance );
|
|
||||||
matches[i].push_back( DMatch((int)i, descIdx, distance) );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void OneWayDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
|
||||||
vector<vector<DMatch> >& matches, float maxDistance,
|
|
||||||
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
|
||||||
{
|
|
||||||
train();
|
|
||||||
|
|
||||||
matches.resize( queryKeypoints.size() );
|
|
||||||
IplImage _qimage = queryImage;
|
|
||||||
for( size_t i = 0; i < queryKeypoints.size(); i++ )
|
|
||||||
{
|
|
||||||
int descIdx = -1, poseIdx = -1;
|
|
||||||
float distance;
|
|
||||||
base->FindDescriptor( &_qimage, queryKeypoints[i].pt, descIdx, poseIdx, distance );
|
|
||||||
if( distance < maxDistance )
|
|
||||||
matches[i].push_back( DMatch((int)i, descIdx, distance) );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void OneWayDescriptorMatcher::read( const FileNode &fn )
|
|
||||||
{
|
|
||||||
base = new OneWayDescriptorObject( params.patchSize, params.poseCount, string (), string (), string (),
|
|
||||||
params.minScale, params.maxScale, params.stepScale );
|
|
||||||
base->Read (fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
void OneWayDescriptorMatcher::write( FileStorage& fs ) const
|
|
||||||
{
|
|
||||||
base->Write (fs);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool OneWayDescriptorMatcher::empty() const
|
|
||||||
{
|
|
||||||
return base.empty() || base->empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<GenericDescriptorMatcher> OneWayDescriptorMatcher::clone( bool emptyTrainData ) const
|
|
||||||
{
|
|
||||||
OneWayDescriptorMatcher* matcher = new OneWayDescriptorMatcher( params );
|
|
||||||
|
|
||||||
if( !emptyTrainData )
|
|
||||||
{
|
|
||||||
CV_Error( CV_StsNotImplemented, "deep clone functionality is not implemented, because "
|
|
||||||
"OneWayDescriptorBase has not copy constructor or clone method ");
|
|
||||||
|
|
||||||
//matcher->base;
|
|
||||||
matcher->params = params;
|
|
||||||
matcher->prevTrainCount = prevTrainCount;
|
|
||||||
matcher->trainPointCollection = trainPointCollection;
|
|
||||||
}
|
|
||||||
return matcher;
|
|
||||||
}
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* FernDescriptorMatcher *
|
|
||||||
\****************************************************************************************/
|
|
||||||
FernDescriptorMatcher::Params::Params( int _nclasses, int _patchSize, int _signatureSize,
|
|
||||||
int _nstructs, int _structSize, int _nviews, int _compressionMethod,
|
|
||||||
const PatchGenerator& _patchGenerator ) :
|
|
||||||
nclasses(_nclasses), patchSize(_patchSize), signatureSize(_signatureSize),
|
|
||||||
nstructs(_nstructs), structSize(_structSize), nviews(_nviews),
|
|
||||||
compressionMethod(_compressionMethod), patchGenerator(_patchGenerator)
|
|
||||||
{}
|
|
||||||
|
|
||||||
FernDescriptorMatcher::Params::Params( const string& _filename )
|
|
||||||
{
|
|
||||||
filename = _filename;
|
|
||||||
}
|
|
||||||
|
|
||||||
FernDescriptorMatcher::FernDescriptorMatcher( const Params& _params )
|
|
||||||
{
|
|
||||||
prevTrainCount = 0;
|
|
||||||
params = _params;
|
|
||||||
if( !params.filename.empty() )
|
|
||||||
{
|
|
||||||
classifier = new FernClassifier;
|
|
||||||
FileStorage fs(params.filename, FileStorage::READ);
|
|
||||||
if( fs.isOpened() )
|
|
||||||
classifier->read( fs.getFirstTopLevelNode() );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
FernDescriptorMatcher::~FernDescriptorMatcher()
|
|
||||||
{}
|
|
||||||
|
|
||||||
void FernDescriptorMatcher::clear()
|
|
||||||
{
|
|
||||||
GenericDescriptorMatcher::clear();
|
|
||||||
|
|
||||||
classifier.release();
|
|
||||||
prevTrainCount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FernDescriptorMatcher::train()
|
|
||||||
{
|
|
||||||
if( classifier.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
|
|
||||||
{
|
|
||||||
assert( params.filename.empty() );
|
|
||||||
|
|
||||||
vector<vector<Point2f> > points( trainPointCollection.imageCount() );
|
|
||||||
for( size_t imgIdx = 0; imgIdx < trainPointCollection.imageCount(); imgIdx++ )
|
|
||||||
KeyPoint::convert( trainPointCollection.getKeypoints((int)imgIdx), points[imgIdx] );
|
|
||||||
|
|
||||||
classifier = new FernClassifier( points, trainPointCollection.getImages(), vector<vector<int> >(), 0, // each points is a class
|
|
||||||
params.patchSize, params.signatureSize, params.nstructs, params.structSize,
|
|
||||||
params.nviews, params.compressionMethod, params.patchGenerator );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool FernDescriptorMatcher::isMaskSupported()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FernDescriptorMatcher::calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt,
|
|
||||||
float& bestProb, int& bestMatchIdx, vector<float>& signature )
|
|
||||||
{
|
|
||||||
(*classifier)( image, pt, signature);
|
|
||||||
|
|
||||||
bestProb = -FLT_MAX;
|
|
||||||
bestMatchIdx = -1;
|
|
||||||
for( int ci = 0; ci < classifier->getClassCount(); ci++ )
|
|
||||||
{
|
|
||||||
if( signature[ci] > bestProb )
|
|
||||||
{
|
|
||||||
bestProb = signature[ci];
|
|
||||||
bestMatchIdx = ci;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void FernDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
|
||||||
vector<vector<DMatch> >& matches, int knn,
|
|
||||||
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
|
||||||
{
|
|
||||||
train();
|
|
||||||
|
|
||||||
matches.resize( queryKeypoints.size() );
|
|
||||||
vector<float> signature( (size_t)classifier->getClassCount() );
|
|
||||||
|
|
||||||
for( size_t queryIdx = 0; queryIdx < queryKeypoints.size(); queryIdx++ )
|
|
||||||
{
|
|
||||||
(*classifier)( queryImage, queryKeypoints[queryIdx].pt, signature);
|
|
||||||
|
|
||||||
for( int k = 0; k < knn; k++ )
|
|
||||||
{
|
|
||||||
DMatch bestMatch;
|
|
||||||
size_t best_ci = 0;
|
|
||||||
for( size_t ci = 0; ci < signature.size(); ci++ )
|
|
||||||
{
|
|
||||||
if( -signature[ci] < bestMatch.distance )
|
|
||||||
{
|
|
||||||
int imgIdx = -1, trainIdx = -1;
|
|
||||||
trainPointCollection.getLocalIdx( (int)ci , imgIdx, trainIdx );
|
|
||||||
bestMatch = DMatch( (int)queryIdx, trainIdx, imgIdx, -signature[ci] );
|
|
||||||
best_ci = ci;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if( bestMatch.trainIdx == -1 )
|
|
||||||
break;
|
|
||||||
signature[best_ci] = -std::numeric_limits<float>::max();
|
|
||||||
matches[queryIdx].push_back( bestMatch );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void FernDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
|
||||||
vector<vector<DMatch> >& matches, float maxDistance,
|
|
||||||
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
|
||||||
{
|
|
||||||
train();
|
|
||||||
matches.resize( queryKeypoints.size() );
|
|
||||||
vector<float> signature( (size_t)classifier->getClassCount() );
|
|
||||||
|
|
||||||
for( size_t i = 0; i < queryKeypoints.size(); i++ )
|
|
||||||
{
|
|
||||||
(*classifier)( queryImage, queryKeypoints[i].pt, signature);
|
|
||||||
|
|
||||||
for( int ci = 0; ci < classifier->getClassCount(); ci++ )
|
|
||||||
{
|
|
||||||
if( -signature[ci] < maxDistance )
|
|
||||||
{
|
|
||||||
int imgIdx = -1, trainIdx = -1;
|
|
||||||
trainPointCollection.getLocalIdx( ci , imgIdx, trainIdx );
|
|
||||||
matches[i].push_back( DMatch( (int)i, trainIdx, imgIdx, -signature[ci] ) );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void FernDescriptorMatcher::read( const FileNode &fn )
|
|
||||||
{
|
|
||||||
params.nclasses = fn["nclasses"];
|
|
||||||
params.patchSize = fn["patchSize"];
|
|
||||||
params.signatureSize = fn["signatureSize"];
|
|
||||||
params.nstructs = fn["nstructs"];
|
|
||||||
params.structSize = fn["structSize"];
|
|
||||||
params.nviews = fn["nviews"];
|
|
||||||
params.compressionMethod = fn["compressionMethod"];
|
|
||||||
|
|
||||||
//classifier->read(fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
void FernDescriptorMatcher::write( FileStorage& fs ) const
|
|
||||||
{
|
|
||||||
fs << "nclasses" << params.nclasses;
|
|
||||||
fs << "patchSize" << params.patchSize;
|
|
||||||
fs << "signatureSize" << params.signatureSize;
|
|
||||||
fs << "nstructs" << params.nstructs;
|
|
||||||
fs << "structSize" << params.structSize;
|
|
||||||
fs << "nviews" << params.nviews;
|
|
||||||
fs << "compressionMethod" << params.compressionMethod;
|
|
||||||
|
|
||||||
// classifier->write(fs);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool FernDescriptorMatcher::empty() const
|
|
||||||
{
|
|
||||||
return classifier.empty() || classifier->empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<GenericDescriptorMatcher> FernDescriptorMatcher::clone( bool emptyTrainData ) const
|
|
||||||
{
|
|
||||||
FernDescriptorMatcher* matcher = new FernDescriptorMatcher( params );
|
|
||||||
if( !emptyTrainData )
|
|
||||||
{
|
|
||||||
CV_Error( CV_StsNotImplemented, "deep clone dunctionality is not implemented, because "
|
|
||||||
"FernClassifier has not copy constructor or clone method ");
|
|
||||||
|
|
||||||
//matcher->classifier;
|
|
||||||
matcher->params = params;
|
|
||||||
matcher->prevTrainCount = prevTrainCount;
|
|
||||||
matcher->trainPointCollection = trainPointCollection;
|
|
||||||
}
|
|
||||||
return matcher;
|
|
||||||
}
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* VectorDescriptorMatcher *
|
* VectorDescriptorMatcher *
|
||||||
|
@ -41,7 +41,10 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
|
||||||
#define TABLE_SIZE 400
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
|
const int TABLE_SIZE = 400;
|
||||||
|
|
||||||
static double chitab3[]={0, 0.0150057, 0.0239478, 0.0315227,
|
static double chitab3[]={0, 0.0150057, 0.0239478, 0.0315227,
|
||||||
0.0383427, 0.0446605, 0.0506115, 0.0562786,
|
0.0383427, 0.0446605, 0.0506115, 0.0562786,
|
||||||
@ -144,36 +147,36 @@ static double chitab3[]={0, 0.0150057, 0.0239478, 0.0315227,
|
|||||||
3.37455, 3.48653, 3.61862, 3.77982,
|
3.37455, 3.48653, 3.61862, 3.77982,
|
||||||
3.98692, 4.2776, 4.77167, 133.333 };
|
3.98692, 4.2776, 4.77167, 133.333 };
|
||||||
|
|
||||||
typedef struct CvLinkedPoint
|
typedef struct LinkedPoint
|
||||||
{
|
{
|
||||||
struct CvLinkedPoint* prev;
|
struct LinkedPoint* prev;
|
||||||
struct CvLinkedPoint* next;
|
struct LinkedPoint* next;
|
||||||
CvPoint pt;
|
Point pt;
|
||||||
}
|
}
|
||||||
CvLinkedPoint;
|
LinkedPoint;
|
||||||
|
|
||||||
// the history of region grown
|
// the history of region grown
|
||||||
typedef struct CvMSERGrowHistory
|
typedef struct MSERGrowHistory
|
||||||
{
|
{
|
||||||
struct CvMSERGrowHistory* shortcut;
|
struct MSERGrowHistory* shortcut;
|
||||||
struct CvMSERGrowHistory* child;
|
struct MSERGrowHistory* child;
|
||||||
int stable; // when it ever stabled before, record the size
|
int stable; // when it ever stabled before, record the size
|
||||||
int val;
|
int val;
|
||||||
int size;
|
int size;
|
||||||
}
|
}
|
||||||
CvMSERGrowHistory;
|
MSERGrowHistory;
|
||||||
|
|
||||||
typedef struct CvMSERConnectedComp
|
typedef struct MSERConnectedComp
|
||||||
{
|
{
|
||||||
CvLinkedPoint* head;
|
LinkedPoint* head;
|
||||||
CvLinkedPoint* tail;
|
LinkedPoint* tail;
|
||||||
CvMSERGrowHistory* history;
|
MSERGrowHistory* history;
|
||||||
unsigned long grey_level;
|
unsigned long grey_level;
|
||||||
int size;
|
int size;
|
||||||
int dvar; // the derivative of last var
|
int dvar; // the derivative of last var
|
||||||
float var; // the current variation (most time is the variation of one-step back)
|
float var; // the current variation (most time is the variation of one-step back)
|
||||||
}
|
}
|
||||||
CvMSERConnectedComp;
|
MSERConnectedComp;
|
||||||
|
|
||||||
// Linear Time MSER claims by using bsf can get performance gain, here is the implementation
|
// Linear Time MSER claims by using bsf can get performance gain, here is the implementation
|
||||||
// however it seems that will not do any good in real world test
|
// however it seems that will not do any good in real world test
|
||||||
@ -186,24 +189,29 @@ inline void _bitreset(unsigned long * a, unsigned long b)
|
|||||||
*a &= ~(1<<b);
|
*a &= ~(1<<b);
|
||||||
}
|
}
|
||||||
|
|
||||||
CvMSERParams cvMSERParams( int delta, int minArea, int maxArea, float maxVariation, float minDiversity, int maxEvolution, double areaThreshold, double minMargin, int edgeBlurSize )
|
struct MSERParams
|
||||||
{
|
{
|
||||||
CvMSERParams params;
|
MSERParams( int _delta, int _minArea, int _maxArea, float _maxVariation,
|
||||||
params.delta = delta;
|
float _minDiversity, int _maxEvolution, double _areaThreshold,
|
||||||
params.minArea = minArea;
|
double _minMargin, int _edgeBlurSize )
|
||||||
params.maxArea = maxArea;
|
: delta(_delta), minArea(_minArea), maxArea(_maxArea), maxVariation(_maxVariation),
|
||||||
params.maxVariation = maxVariation;
|
minDiversity(_minDiversity), maxEvolution(_maxEvolution), areaThreshold(_areaThreshold),
|
||||||
params.minDiversity = minDiversity;
|
minMargin(_minMargin), edgeBlurSize(_edgeBlurSize)
|
||||||
params.maxEvolution = maxEvolution;
|
{}
|
||||||
params.areaThreshold = areaThreshold;
|
int delta;
|
||||||
params.minMargin = minMargin;
|
int minArea;
|
||||||
params.edgeBlurSize = edgeBlurSize;
|
int maxArea;
|
||||||
return params;
|
float maxVariation;
|
||||||
}
|
float minDiversity;
|
||||||
|
int maxEvolution;
|
||||||
|
double areaThreshold;
|
||||||
|
double minMargin;
|
||||||
|
int edgeBlurSize;
|
||||||
|
};
|
||||||
|
|
||||||
// clear the connected component in stack
|
// clear the connected component in stack
|
||||||
CV_INLINE static void
|
static void
|
||||||
icvInitMSERComp( CvMSERConnectedComp* comp )
|
initMSERComp( MSERConnectedComp* comp )
|
||||||
{
|
{
|
||||||
comp->size = 0;
|
comp->size = 0;
|
||||||
comp->var = 0;
|
comp->var = 0;
|
||||||
@ -212,9 +220,8 @@ icvInitMSERComp( CvMSERConnectedComp* comp )
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add history of size to a connected component
|
// add history of size to a connected component
|
||||||
CV_INLINE static void
|
static void
|
||||||
icvMSERNewHistory( CvMSERConnectedComp* comp,
|
MSERNewHistory( MSERConnectedComp* comp, MSERGrowHistory* history )
|
||||||
CvMSERGrowHistory* history )
|
|
||||||
{
|
{
|
||||||
history->child = history;
|
history->child = history;
|
||||||
if ( NULL == comp->history )
|
if ( NULL == comp->history )
|
||||||
@ -232,14 +239,14 @@ icvMSERNewHistory( CvMSERConnectedComp* comp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// merging two connected component
|
// merging two connected component
|
||||||
CV_INLINE static void
|
static void
|
||||||
icvMSERMergeComp( CvMSERConnectedComp* comp1,
|
MSERMergeComp( MSERConnectedComp* comp1,
|
||||||
CvMSERConnectedComp* comp2,
|
MSERConnectedComp* comp2,
|
||||||
CvMSERConnectedComp* comp,
|
MSERConnectedComp* comp,
|
||||||
CvMSERGrowHistory* history )
|
MSERGrowHistory* history )
|
||||||
{
|
{
|
||||||
CvLinkedPoint* head;
|
LinkedPoint* head;
|
||||||
CvLinkedPoint* tail;
|
LinkedPoint* tail;
|
||||||
comp->grey_level = comp2->grey_level;
|
comp->grey_level = comp2->grey_level;
|
||||||
history->child = history;
|
history->child = history;
|
||||||
// select the winner by size
|
// select the winner by size
|
||||||
@ -301,18 +308,17 @@ icvMSERMergeComp( CvMSERConnectedComp* comp1,
|
|||||||
comp->size = comp1->size + comp2->size;
|
comp->size = comp1->size + comp2->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_INLINE static float
|
static float
|
||||||
icvMSERVariationCalc( CvMSERConnectedComp* comp,
|
MSERVariationCalc( MSERConnectedComp* comp, int delta )
|
||||||
int delta )
|
|
||||||
{
|
{
|
||||||
CvMSERGrowHistory* history = comp->history;
|
MSERGrowHistory* history = comp->history;
|
||||||
int val = comp->grey_level;
|
int val = comp->grey_level;
|
||||||
if ( NULL != history )
|
if ( NULL != history )
|
||||||
{
|
{
|
||||||
CvMSERGrowHistory* shortcut = history->shortcut;
|
MSERGrowHistory* shortcut = history->shortcut;
|
||||||
while ( shortcut != shortcut->shortcut && shortcut->val + delta > val )
|
while ( shortcut != shortcut->shortcut && shortcut->val + delta > val )
|
||||||
shortcut = shortcut->shortcut;
|
shortcut = shortcut->shortcut;
|
||||||
CvMSERGrowHistory* child = shortcut->child;
|
MSERGrowHistory* child = shortcut->child;
|
||||||
while ( child != child->child && child->val + delta <= val )
|
while ( child != child->child && child->val + delta <= val )
|
||||||
{
|
{
|
||||||
shortcut = child;
|
shortcut = child;
|
||||||
@ -328,15 +334,13 @@ icvMSERVariationCalc( CvMSERConnectedComp* comp,
|
|||||||
return 1.;
|
return 1.;
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_INLINE static bool
|
static bool MSERStableCheck( MSERConnectedComp* comp, MSERParams params )
|
||||||
icvMSERStableCheck( CvMSERConnectedComp* comp,
|
|
||||||
CvMSERParams params )
|
|
||||||
{
|
{
|
||||||
// tricky part: it actually check the stablity of one-step back
|
// tricky part: it actually check the stablity of one-step back
|
||||||
if ( comp->history == NULL || comp->history->size <= params.minArea || comp->history->size >= params.maxArea )
|
if ( comp->history == NULL || comp->history->size <= params.minArea || comp->history->size >= params.maxArea )
|
||||||
return 0;
|
return 0;
|
||||||
float div = (float)(comp->history->size-comp->history->stable)/(float)comp->history->size;
|
float div = (float)(comp->history->size-comp->history->stable)/(float)comp->history->size;
|
||||||
float var = icvMSERVariationCalc( comp, params.delta );
|
float var = MSERVariationCalc( comp, params.delta );
|
||||||
int dvar = ( comp->var < var || (unsigned long)(comp->history->val + 1) < comp->grey_level );
|
int dvar = ( comp->var < var || (unsigned long)(comp->history->val + 1) < comp->grey_level );
|
||||||
int stable = ( dvar && !comp->dvar && comp->var < params.maxVariation && div > params.minDiversity );
|
int stable = ( dvar && !comp->dvar && comp->var < params.maxVariation && div > params.minDiversity );
|
||||||
comp->var = var;
|
comp->var = var;
|
||||||
@ -347,9 +351,7 @@ icvMSERStableCheck( CvMSERConnectedComp* comp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add a pixel to the pixel list
|
// add a pixel to the pixel list
|
||||||
CV_INLINE static void
|
static void accumulateMSERComp( MSERConnectedComp* comp, LinkedPoint* point )
|
||||||
icvAccumulateMSERComp( CvMSERConnectedComp* comp,
|
|
||||||
CvLinkedPoint* point )
|
|
||||||
{
|
{
|
||||||
if ( comp->size > 0 )
|
if ( comp->size > 0 )
|
||||||
{
|
{
|
||||||
@ -366,14 +368,12 @@ icvAccumulateMSERComp( CvMSERConnectedComp* comp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// convert the point set to CvSeq
|
// convert the point set to CvSeq
|
||||||
CV_INLINE static CvContour*
|
static CvContour* MSERToContour( MSERConnectedComp* comp, CvMemStorage* storage )
|
||||||
icvMSERToContour( CvMSERConnectedComp* comp,
|
|
||||||
CvMemStorage* storage )
|
|
||||||
{
|
{
|
||||||
CvSeq* _contour = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage );
|
CvSeq* _contour = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage );
|
||||||
CvContour* contour = (CvContour*)_contour;
|
CvContour* contour = (CvContour*)_contour;
|
||||||
cvSeqPushMulti( _contour, 0, comp->history->size );
|
cvSeqPushMulti( _contour, 0, comp->history->size );
|
||||||
CvLinkedPoint* lpt = comp->head;
|
LinkedPoint* lpt = comp->head;
|
||||||
for ( int i = 0; i < comp->history->size; i++ )
|
for ( int i = 0; i < comp->history->size; i++ )
|
||||||
{
|
{
|
||||||
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, _contour, i );
|
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, _contour, i );
|
||||||
@ -391,8 +391,7 @@ icvMSERToContour( CvMSERConnectedComp* comp,
|
|||||||
// 17~19 bits is the direction
|
// 17~19 bits is the direction
|
||||||
// 8~11 bits is the bucket it falls to (for BitScanForward)
|
// 8~11 bits is the bucket it falls to (for BitScanForward)
|
||||||
// 0~8 bits is the color
|
// 0~8 bits is the color
|
||||||
static int*
|
static int* preprocessMSER_8UC1( CvMat* img,
|
||||||
icvPreprocessMSER_8UC1( CvMat* img,
|
|
||||||
int*** heap_cur,
|
int*** heap_cur,
|
||||||
CvMat* src,
|
CvMat* src,
|
||||||
CvMat* mask )
|
CvMat* mask )
|
||||||
@ -476,17 +475,16 @@ icvPreprocessMSER_8UC1( CvMat* img,
|
|||||||
return startptr;
|
return startptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void extractMSER_8UC1_Pass( int* ioptr,
|
||||||
icvExtractMSER_8UC1_Pass( int* ioptr,
|
|
||||||
int* imgptr,
|
int* imgptr,
|
||||||
int*** heap_cur,
|
int*** heap_cur,
|
||||||
CvLinkedPoint* ptsptr,
|
LinkedPoint* ptsptr,
|
||||||
CvMSERGrowHistory* histptr,
|
MSERGrowHistory* histptr,
|
||||||
CvMSERConnectedComp* comptr,
|
MSERConnectedComp* comptr,
|
||||||
int step,
|
int step,
|
||||||
int stepmask,
|
int stepmask,
|
||||||
int stepgap,
|
int stepgap,
|
||||||
CvMSERParams params,
|
MSERParams params,
|
||||||
int color,
|
int color,
|
||||||
CvSeq* contours,
|
CvSeq* contours,
|
||||||
CvMemStorage* storage )
|
CvMemStorage* storage )
|
||||||
@ -494,7 +492,7 @@ icvExtractMSER_8UC1_Pass( int* ioptr,
|
|||||||
comptr->grey_level = 256;
|
comptr->grey_level = 256;
|
||||||
comptr++;
|
comptr++;
|
||||||
comptr->grey_level = (*imgptr)&0xff;
|
comptr->grey_level = (*imgptr)&0xff;
|
||||||
icvInitMSERComp( comptr );
|
initMSERComp( comptr );
|
||||||
*imgptr |= 0x80000000;
|
*imgptr |= 0x80000000;
|
||||||
heap_cur += (*imgptr)&0xff;
|
heap_cur += (*imgptr)&0xff;
|
||||||
int dir[] = { 1, step, -1, -step };
|
int dir[] = { 1, step, -1, -step };
|
||||||
@ -527,7 +525,7 @@ icvExtractMSER_8UC1_Pass( int* ioptr,
|
|||||||
#endif
|
#endif
|
||||||
imgptr = imgptr_nbr;
|
imgptr = imgptr_nbr;
|
||||||
comptr++;
|
comptr++;
|
||||||
icvInitMSERComp( comptr );
|
initMSERComp( comptr );
|
||||||
comptr->grey_level = (*imgptr)&0xff;
|
comptr->grey_level = (*imgptr)&0xff;
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
@ -544,7 +542,7 @@ icvExtractMSER_8UC1_Pass( int* ioptr,
|
|||||||
int i = (int)(imgptr-ioptr);
|
int i = (int)(imgptr-ioptr);
|
||||||
ptsptr->pt = cvPoint( i&stepmask, i>>stepgap );
|
ptsptr->pt = cvPoint( i&stepmask, i>>stepgap );
|
||||||
// get the current location
|
// get the current location
|
||||||
icvAccumulateMSERComp( comptr, ptsptr );
|
accumulateMSERComp( comptr, ptsptr );
|
||||||
ptsptr++;
|
ptsptr++;
|
||||||
// get the next pixel from boundary heap
|
// get the next pixel from boundary heap
|
||||||
if ( **heap_cur )
|
if ( **heap_cur )
|
||||||
@ -595,13 +593,13 @@ icvExtractMSER_8UC1_Pass( int* ioptr,
|
|||||||
if ( pixel_val < comptr[-1].grey_level )
|
if ( pixel_val < comptr[-1].grey_level )
|
||||||
{
|
{
|
||||||
// check the stablity and push a new history, increase the grey level
|
// check the stablity and push a new history, increase the grey level
|
||||||
if ( icvMSERStableCheck( comptr, params ) )
|
if ( MSERStableCheck( comptr, params ) )
|
||||||
{
|
{
|
||||||
CvContour* contour = icvMSERToContour( comptr, storage );
|
CvContour* contour = MSERToContour( comptr, storage );
|
||||||
contour->color = color;
|
contour->color = color;
|
||||||
cvSeqPush( contours, &contour );
|
cvSeqPush( contours, &contour );
|
||||||
}
|
}
|
||||||
icvMSERNewHistory( comptr, histptr );
|
MSERNewHistory( comptr, histptr );
|
||||||
comptr[0].grey_level = pixel_val;
|
comptr[0].grey_level = pixel_val;
|
||||||
histptr++;
|
histptr++;
|
||||||
} else {
|
} else {
|
||||||
@ -609,20 +607,20 @@ icvExtractMSER_8UC1_Pass( int* ioptr,
|
|||||||
for ( ; ; )
|
for ( ; ; )
|
||||||
{
|
{
|
||||||
comptr--;
|
comptr--;
|
||||||
icvMSERMergeComp( comptr+1, comptr, comptr, histptr );
|
MSERMergeComp( comptr+1, comptr, comptr, histptr );
|
||||||
histptr++;
|
histptr++;
|
||||||
if ( pixel_val <= comptr[0].grey_level )
|
if ( pixel_val <= comptr[0].grey_level )
|
||||||
break;
|
break;
|
||||||
if ( pixel_val < comptr[-1].grey_level )
|
if ( pixel_val < comptr[-1].grey_level )
|
||||||
{
|
{
|
||||||
// check the stablity here otherwise it wouldn't be an ER
|
// check the stablity here otherwise it wouldn't be an ER
|
||||||
if ( icvMSERStableCheck( comptr, params ) )
|
if ( MSERStableCheck( comptr, params ) )
|
||||||
{
|
{
|
||||||
CvContour* contour = icvMSERToContour( comptr, storage );
|
CvContour* contour = MSERToContour( comptr, storage );
|
||||||
contour->color = color;
|
contour->color = color;
|
||||||
cvSeqPush( contours, &contour );
|
cvSeqPush( contours, &contour );
|
||||||
}
|
}
|
||||||
icvMSERNewHistory( comptr, histptr );
|
MSERNewHistory( comptr, histptr );
|
||||||
comptr[0].grey_level = pixel_val;
|
comptr[0].grey_level = pixel_val;
|
||||||
histptr++;
|
histptr++;
|
||||||
break;
|
break;
|
||||||
@ -635,12 +633,11 @@ icvExtractMSER_8UC1_Pass( int* ioptr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void extractMSER_8UC1( CvMat* src,
|
||||||
icvExtractMSER_8UC1( CvMat* src,
|
|
||||||
CvMat* mask,
|
CvMat* mask,
|
||||||
CvSeq* contours,
|
CvSeq* contours,
|
||||||
CvMemStorage* storage,
|
CvMemStorage* storage,
|
||||||
CvMSERParams params )
|
MSERParams params )
|
||||||
{
|
{
|
||||||
int step = 8;
|
int step = 8;
|
||||||
int stepgap = 3;
|
int stepgap = 3;
|
||||||
@ -662,16 +659,16 @@ icvExtractMSER_8UC1( CvMat* src,
|
|||||||
heap_start[0] = heap;
|
heap_start[0] = heap;
|
||||||
|
|
||||||
// pre-allocate linked point and grow history
|
// pre-allocate linked point and grow history
|
||||||
CvLinkedPoint* pts = (CvLinkedPoint*)cvAlloc( src->rows*src->cols*sizeof(pts[0]) );
|
LinkedPoint* pts = (LinkedPoint*)cvAlloc( src->rows*src->cols*sizeof(pts[0]) );
|
||||||
CvMSERGrowHistory* history = (CvMSERGrowHistory*)cvAlloc( src->rows*src->cols*sizeof(history[0]) );
|
MSERGrowHistory* history = (MSERGrowHistory*)cvAlloc( src->rows*src->cols*sizeof(history[0]) );
|
||||||
CvMSERConnectedComp comp[257];
|
MSERConnectedComp comp[257];
|
||||||
|
|
||||||
// darker to brighter (MSER-)
|
// darker to brighter (MSER-)
|
||||||
imgptr = icvPreprocessMSER_8UC1( img, heap_start, src, mask );
|
imgptr = preprocessMSER_8UC1( img, heap_start, src, mask );
|
||||||
icvExtractMSER_8UC1_Pass( ioptr, imgptr, heap_start, pts, history, comp, step, stepmask, stepgap, params, -1, contours, storage );
|
extractMSER_8UC1_Pass( ioptr, imgptr, heap_start, pts, history, comp, step, stepmask, stepgap, params, -1, contours, storage );
|
||||||
// brighter to darker (MSER+)
|
// brighter to darker (MSER+)
|
||||||
imgptr = icvPreprocessMSER_8UC1( img, heap_start, src, mask );
|
imgptr = preprocessMSER_8UC1( img, heap_start, src, mask );
|
||||||
icvExtractMSER_8UC1_Pass( ioptr, imgptr, heap_start, pts, history, comp, step, stepmask, stepgap, params, 1, contours, storage );
|
extractMSER_8UC1_Pass( ioptr, imgptr, heap_start, pts, history, comp, step, stepmask, stepgap, params, 1, contours, storage );
|
||||||
|
|
||||||
// clean up
|
// clean up
|
||||||
cvFree( &history );
|
cvFree( &history );
|
||||||
@ -680,26 +677,26 @@ icvExtractMSER_8UC1( CvMat* src,
|
|||||||
cvReleaseMat( &img );
|
cvReleaseMat( &img );
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CvMSCRNode;
|
struct MSCRNode;
|
||||||
|
|
||||||
typedef struct CvTempMSCR
|
struct TempMSCR
|
||||||
{
|
{
|
||||||
CvMSCRNode* head;
|
MSCRNode* head;
|
||||||
CvMSCRNode* tail;
|
MSCRNode* tail;
|
||||||
double m; // the margin used to prune area later
|
double m; // the margin used to prune area later
|
||||||
int size;
|
int size;
|
||||||
} CvTempMSCR;
|
};
|
||||||
|
|
||||||
typedef struct CvMSCRNode
|
struct MSCRNode
|
||||||
{
|
{
|
||||||
CvMSCRNode* shortcut;
|
MSCRNode* shortcut;
|
||||||
// to make the finding of root less painful
|
// to make the finding of root less painful
|
||||||
CvMSCRNode* prev;
|
MSCRNode* prev;
|
||||||
CvMSCRNode* next;
|
MSCRNode* next;
|
||||||
// a point double-linked list
|
// a point double-linked list
|
||||||
CvTempMSCR* tmsr;
|
TempMSCR* tmsr;
|
||||||
// the temporary msr (set to NULL at every re-initialise)
|
// the temporary msr (set to NULL at every re-initialise)
|
||||||
CvTempMSCR* gmsr;
|
TempMSCR* gmsr;
|
||||||
// the global msr (once set, never to NULL)
|
// the global msr (once set, never to NULL)
|
||||||
int index;
|
int index;
|
||||||
// the index of the node, at this point, it should be x at the first 16-bits, and y at the last 16-bits.
|
// the index of the node, at this point, it should be x at the first 16-bits, and y at the last 16-bits.
|
||||||
@ -708,25 +705,23 @@ typedef struct CvMSCRNode
|
|||||||
int size, sizei;
|
int size, sizei;
|
||||||
double dt, di;
|
double dt, di;
|
||||||
double s;
|
double s;
|
||||||
} CvMSCRNode;
|
};
|
||||||
|
|
||||||
typedef struct CvMSCREdge
|
struct MSCREdge
|
||||||
{
|
{
|
||||||
double chi;
|
double chi;
|
||||||
CvMSCRNode* left;
|
MSCRNode* left;
|
||||||
CvMSCRNode* right;
|
MSCRNode* right;
|
||||||
} CvMSCREdge;
|
};
|
||||||
|
|
||||||
CV_INLINE static double
|
static double ChiSquaredDistance( uchar* x, uchar* y )
|
||||||
icvChisquaredDistance( uchar* x, uchar* y )
|
|
||||||
{
|
{
|
||||||
return (double)((x[0]-y[0])*(x[0]-y[0]))/(double)(x[0]+y[0]+1e-10)+
|
return (double)((x[0]-y[0])*(x[0]-y[0]))/(double)(x[0]+y[0]+1e-10)+
|
||||||
(double)((x[1]-y[1])*(x[1]-y[1]))/(double)(x[1]+y[1]+1e-10)+
|
(double)((x[1]-y[1])*(x[1]-y[1]))/(double)(x[1]+y[1]+1e-10)+
|
||||||
(double)((x[2]-y[2])*(x[2]-y[2]))/(double)(x[2]+y[2]+1e-10);
|
(double)((x[2]-y[2])*(x[2]-y[2]))/(double)(x[2]+y[2]+1e-10);
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_INLINE static void
|
static void initMSCRNode( MSCRNode* node )
|
||||||
icvInitMSCRNode( CvMSCRNode* node )
|
|
||||||
{
|
{
|
||||||
node->gmsr = node->tmsr = NULL;
|
node->gmsr = node->tmsr = NULL;
|
||||||
node->reinit = 0xffff;
|
node->reinit = 0xffff;
|
||||||
@ -736,9 +731,8 @@ icvInitMSCRNode( CvMSCRNode* node )
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the preprocess to get the edge list with proper gaussian blur
|
// the preprocess to get the edge list with proper gaussian blur
|
||||||
static int
|
static int preprocessMSER_8UC3( MSCRNode* node,
|
||||||
icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
MSCREdge* edge,
|
||||||
CvMSCREdge* edge,
|
|
||||||
double* total,
|
double* total,
|
||||||
CvMat* src,
|
CvMat* src,
|
||||||
CvMat* mask,
|
CvMat* mask,
|
||||||
@ -755,7 +749,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
{
|
{
|
||||||
for ( int j = 0; j < src->cols-1; j++ )
|
for ( int j = 0; j < src->cols-1; j++ )
|
||||||
{
|
{
|
||||||
*dxptr = icvChisquaredDistance( srcptr, lastptr );
|
*dxptr = ChiSquaredDistance( srcptr, lastptr );
|
||||||
dxptr++;
|
dxptr++;
|
||||||
srcptr += 3;
|
srcptr += 3;
|
||||||
lastptr += 3;
|
lastptr += 3;
|
||||||
@ -770,7 +764,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
{
|
{
|
||||||
for ( int j = 0; j < src->cols; j++ )
|
for ( int j = 0; j < src->cols; j++ )
|
||||||
{
|
{
|
||||||
*dyptr = icvChisquaredDistance( srcptr, lastptr );
|
*dyptr = ChiSquaredDistance( srcptr, lastptr );
|
||||||
dyptr++;
|
dyptr++;
|
||||||
srcptr += 3;
|
srcptr += 3;
|
||||||
lastptr += 3;
|
lastptr += 3;
|
||||||
@ -793,8 +787,8 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
Ne = 0;
|
Ne = 0;
|
||||||
int maskcpt = mask->step-mask->cols+1;
|
int maskcpt = mask->step-mask->cols+1;
|
||||||
uchar* maskptr = mask->data.ptr;
|
uchar* maskptr = mask->data.ptr;
|
||||||
CvMSCRNode* nodeptr = node;
|
MSCRNode* nodeptr = node;
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = 0;
|
nodeptr->index = 0;
|
||||||
*total += edge->chi = *dxptr;
|
*total += edge->chi = *dxptr;
|
||||||
if ( maskptr[0] && maskptr[1] )
|
if ( maskptr[0] && maskptr[1] )
|
||||||
@ -809,7 +803,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
maskptr++;
|
maskptr++;
|
||||||
for ( int i = 1; i < src->cols-1; i++ )
|
for ( int i = 1; i < src->cols-1; i++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = i;
|
nodeptr->index = i;
|
||||||
if ( maskptr[0] && maskptr[1] )
|
if ( maskptr[0] && maskptr[1] )
|
||||||
{
|
{
|
||||||
@ -823,13 +817,13 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
nodeptr++;
|
nodeptr++;
|
||||||
maskptr++;
|
maskptr++;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = src->cols-1;
|
nodeptr->index = src->cols-1;
|
||||||
nodeptr++;
|
nodeptr++;
|
||||||
maskptr += maskcpt;
|
maskptr += maskcpt;
|
||||||
for ( int i = 1; i < src->rows-1; i++ )
|
for ( int i = 1; i < src->rows-1; i++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = i<<16;
|
nodeptr->index = i<<16;
|
||||||
if ( maskptr[0] )
|
if ( maskptr[0] )
|
||||||
{
|
{
|
||||||
@ -856,7 +850,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
maskptr++;
|
maskptr++;
|
||||||
for ( int j = 1; j < src->cols-1; j++ )
|
for ( int j = 1; j < src->cols-1; j++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = (i<<16)|j;
|
nodeptr->index = (i<<16)|j;
|
||||||
if ( maskptr[0] )
|
if ( maskptr[0] )
|
||||||
{
|
{
|
||||||
@ -882,7 +876,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
nodeptr++;
|
nodeptr++;
|
||||||
maskptr++;
|
maskptr++;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = (i<<16)|(src->cols-1);
|
nodeptr->index = (i<<16)|(src->cols-1);
|
||||||
if ( maskptr[0] && maskptr[-mask->step] )
|
if ( maskptr[0] && maskptr[-mask->step] )
|
||||||
{
|
{
|
||||||
@ -896,7 +890,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
nodeptr++;
|
nodeptr++;
|
||||||
maskptr += maskcpt;
|
maskptr += maskcpt;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = (src->rows-1)<<16;
|
nodeptr->index = (src->rows-1)<<16;
|
||||||
if ( maskptr[0] )
|
if ( maskptr[0] )
|
||||||
{
|
{
|
||||||
@ -923,7 +917,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
maskptr++;
|
maskptr++;
|
||||||
for ( int i = 1; i < src->cols-1; i++ )
|
for ( int i = 1; i < src->cols-1; i++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = ((src->rows-1)<<16)|i;
|
nodeptr->index = ((src->rows-1)<<16)|i;
|
||||||
if ( maskptr[0] )
|
if ( maskptr[0] )
|
||||||
{
|
{
|
||||||
@ -949,7 +943,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
nodeptr++;
|
nodeptr++;
|
||||||
maskptr++;
|
maskptr++;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = ((src->rows-1)<<16)|(src->cols-1);
|
nodeptr->index = ((src->rows-1)<<16)|(src->cols-1);
|
||||||
if ( maskptr[0] && maskptr[-mask->step] )
|
if ( maskptr[0] && maskptr[-mask->step] )
|
||||||
{
|
{
|
||||||
@ -959,8 +953,8 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
Ne++;
|
Ne++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CvMSCRNode* nodeptr = node;
|
MSCRNode* nodeptr = node;
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = 0;
|
nodeptr->index = 0;
|
||||||
*total += edge->chi = *dxptr;
|
*total += edge->chi = *dxptr;
|
||||||
dxptr++;
|
dxptr++;
|
||||||
@ -970,7 +964,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
nodeptr++;
|
nodeptr++;
|
||||||
for ( int i = 1; i < src->cols-1; i++ )
|
for ( int i = 1; i < src->cols-1; i++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = i;
|
nodeptr->index = i;
|
||||||
*total += edge->chi = *dxptr;
|
*total += edge->chi = *dxptr;
|
||||||
dxptr++;
|
dxptr++;
|
||||||
@ -979,12 +973,12 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
edge++;
|
edge++;
|
||||||
nodeptr++;
|
nodeptr++;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = src->cols-1;
|
nodeptr->index = src->cols-1;
|
||||||
nodeptr++;
|
nodeptr++;
|
||||||
for ( int i = 1; i < src->rows-1; i++ )
|
for ( int i = 1; i < src->rows-1; i++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = i<<16;
|
nodeptr->index = i<<16;
|
||||||
*total += edge->chi = *dyptr;
|
*total += edge->chi = *dyptr;
|
||||||
dyptr++;
|
dyptr++;
|
||||||
@ -999,7 +993,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
nodeptr++;
|
nodeptr++;
|
||||||
for ( int j = 1; j < src->cols-1; j++ )
|
for ( int j = 1; j < src->cols-1; j++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = (i<<16)|j;
|
nodeptr->index = (i<<16)|j;
|
||||||
*total += edge->chi = *dyptr;
|
*total += edge->chi = *dyptr;
|
||||||
dyptr++;
|
dyptr++;
|
||||||
@ -1013,7 +1007,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
edge++;
|
edge++;
|
||||||
nodeptr++;
|
nodeptr++;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = (i<<16)|(src->cols-1);
|
nodeptr->index = (i<<16)|(src->cols-1);
|
||||||
*total += edge->chi = *dyptr;
|
*total += edge->chi = *dyptr;
|
||||||
dyptr++;
|
dyptr++;
|
||||||
@ -1022,7 +1016,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
edge++;
|
edge++;
|
||||||
nodeptr++;
|
nodeptr++;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = (src->rows-1)<<16;
|
nodeptr->index = (src->rows-1)<<16;
|
||||||
*total += edge->chi = *dxptr;
|
*total += edge->chi = *dxptr;
|
||||||
dxptr++;
|
dxptr++;
|
||||||
@ -1037,7 +1031,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
nodeptr++;
|
nodeptr++;
|
||||||
for ( int i = 1; i < src->cols-1; i++ )
|
for ( int i = 1; i < src->cols-1; i++ )
|
||||||
{
|
{
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = ((src->rows-1)<<16)|i;
|
nodeptr->index = ((src->rows-1)<<16)|i;
|
||||||
*total += edge->chi = *dxptr;
|
*total += edge->chi = *dxptr;
|
||||||
dxptr++;
|
dxptr++;
|
||||||
@ -1051,7 +1045,7 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
edge++;
|
edge++;
|
||||||
nodeptr++;
|
nodeptr++;
|
||||||
}
|
}
|
||||||
icvInitMSCRNode( nodeptr );
|
initMSCRNode( nodeptr );
|
||||||
nodeptr->index = ((src->rows-1)<<16)|(src->cols-1);
|
nodeptr->index = ((src->rows-1)<<16)|(src->cols-1);
|
||||||
*total += edge->chi = *dyptr;
|
*total += edge->chi = *dyptr;
|
||||||
edge->left = nodeptr-src->cols;
|
edge->left = nodeptr-src->cols;
|
||||||
@ -1063,14 +1057,13 @@ icvPreprocessMSER_8UC3( CvMSCRNode* node,
|
|||||||
#define cmp_mscr_edge(edge1, edge2) \
|
#define cmp_mscr_edge(edge1, edge2) \
|
||||||
((edge1).chi < (edge2).chi)
|
((edge1).chi < (edge2).chi)
|
||||||
|
|
||||||
static CV_IMPLEMENT_QSORT( icvQuickSortMSCREdge, CvMSCREdge, cmp_mscr_edge )
|
static CV_IMPLEMENT_QSORT( QuickSortMSCREdge, MSCREdge, cmp_mscr_edge )
|
||||||
|
|
||||||
// to find the root of one region
|
// to find the root of one region
|
||||||
CV_INLINE static CvMSCRNode*
|
static MSCRNode* findMSCR( MSCRNode* x )
|
||||||
icvFindMSCR( CvMSCRNode* x )
|
|
||||||
{
|
{
|
||||||
CvMSCRNode* prev = x;
|
MSCRNode* prev = x;
|
||||||
CvMSCRNode* next;
|
MSCRNode* next;
|
||||||
for ( ; ; )
|
for ( ; ; )
|
||||||
{
|
{
|
||||||
next = x->shortcut;
|
next = x->shortcut;
|
||||||
@ -1079,7 +1072,7 @@ icvFindMSCR( CvMSCRNode* x )
|
|||||||
prev= x;
|
prev= x;
|
||||||
x = next;
|
x = next;
|
||||||
}
|
}
|
||||||
CvMSCRNode* root = x;
|
MSCRNode* root = x;
|
||||||
for ( ; ; )
|
for ( ; ; )
|
||||||
{
|
{
|
||||||
prev = x->shortcut;
|
prev = x->shortcut;
|
||||||
@ -1093,9 +1086,7 @@ icvFindMSCR( CvMSCRNode* x )
|
|||||||
// the stable mscr should be:
|
// the stable mscr should be:
|
||||||
// bigger than minArea and smaller than maxArea
|
// bigger than minArea and smaller than maxArea
|
||||||
// differ from its ancestor more than minDiversity
|
// differ from its ancestor more than minDiversity
|
||||||
CV_INLINE static bool
|
static bool MSCRStableCheck( MSCRNode* x, MSERParams params )
|
||||||
icvMSCRStableCheck( CvMSCRNode* x,
|
|
||||||
CvMSERParams params )
|
|
||||||
{
|
{
|
||||||
if ( x->size <= params.minArea || x->size >= params.maxArea )
|
if ( x->size <= params.minArea || x->size >= params.maxArea )
|
||||||
return 0;
|
return 0;
|
||||||
@ -1106,25 +1097,25 @@ icvMSCRStableCheck( CvMSCRNode* x,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
icvExtractMSER_8UC3( CvMat* src,
|
extractMSER_8UC3( CvMat* src,
|
||||||
CvMat* mask,
|
CvMat* mask,
|
||||||
CvSeq* contours,
|
CvSeq* contours,
|
||||||
CvMemStorage* storage,
|
CvMemStorage* storage,
|
||||||
CvMSERParams params )
|
MSERParams params )
|
||||||
{
|
{
|
||||||
CvMSCRNode* map = (CvMSCRNode*)cvAlloc( src->cols*src->rows*sizeof(map[0]) );
|
MSCRNode* map = (MSCRNode*)cvAlloc( src->cols*src->rows*sizeof(map[0]) );
|
||||||
int Ne = src->cols*src->rows*2-src->cols-src->rows;
|
int Ne = src->cols*src->rows*2-src->cols-src->rows;
|
||||||
CvMSCREdge* edge = (CvMSCREdge*)cvAlloc( Ne*sizeof(edge[0]) );
|
MSCREdge* edge = (MSCREdge*)cvAlloc( Ne*sizeof(edge[0]) );
|
||||||
CvTempMSCR* mscr = (CvTempMSCR*)cvAlloc( src->cols*src->rows*sizeof(mscr[0]) );
|
TempMSCR* mscr = (TempMSCR*)cvAlloc( src->cols*src->rows*sizeof(mscr[0]) );
|
||||||
double emean = 0;
|
double emean = 0;
|
||||||
CvMat* dx = cvCreateMat( src->rows, src->cols-1, CV_64FC1 );
|
CvMat* dx = cvCreateMat( src->rows, src->cols-1, CV_64FC1 );
|
||||||
CvMat* dy = cvCreateMat( src->rows-1, src->cols, CV_64FC1 );
|
CvMat* dy = cvCreateMat( src->rows-1, src->cols, CV_64FC1 );
|
||||||
Ne = icvPreprocessMSER_8UC3( map, edge, &emean, src, mask, dx, dy, Ne, params.edgeBlurSize );
|
Ne = preprocessMSER_8UC3( map, edge, &emean, src, mask, dx, dy, Ne, params.edgeBlurSize );
|
||||||
emean = emean / (double)Ne;
|
emean = emean / (double)Ne;
|
||||||
icvQuickSortMSCREdge( edge, Ne, 0 );
|
QuickSortMSCREdge( edge, Ne, 0 );
|
||||||
CvMSCREdge* edge_ub = edge+Ne;
|
MSCREdge* edge_ub = edge+Ne;
|
||||||
CvMSCREdge* edgeptr = edge;
|
MSCREdge* edgeptr = edge;
|
||||||
CvTempMSCR* mscrptr = mscr;
|
TempMSCR* mscrptr = mscr;
|
||||||
// the evolution process
|
// the evolution process
|
||||||
for ( int i = 0; i < params.maxEvolution; i++ )
|
for ( int i = 0; i < params.maxEvolution; i++ )
|
||||||
{
|
{
|
||||||
@ -1135,21 +1126,21 @@ icvExtractMSER_8UC3( CvMat* src,
|
|||||||
// to process all the edges in the list that chi < thres
|
// to process all the edges in the list that chi < thres
|
||||||
while ( edgeptr < edge_ub && edgeptr->chi < thres )
|
while ( edgeptr < edge_ub && edgeptr->chi < thres )
|
||||||
{
|
{
|
||||||
CvMSCRNode* lr = icvFindMSCR( edgeptr->left );
|
MSCRNode* lr = findMSCR( edgeptr->left );
|
||||||
CvMSCRNode* rr = icvFindMSCR( edgeptr->right );
|
MSCRNode* rr = findMSCR( edgeptr->right );
|
||||||
// get the region root (who is responsible)
|
// get the region root (who is responsible)
|
||||||
if ( lr != rr )
|
if ( lr != rr )
|
||||||
{
|
{
|
||||||
// rank idea take from: N-tree Disjoint-Set Forests for Maximally Stable Extremal Regions
|
// rank idea take from: N-tree Disjoint-Set Forests for Maximally Stable Extremal Regions
|
||||||
if ( rr->rank > lr->rank )
|
if ( rr->rank > lr->rank )
|
||||||
{
|
{
|
||||||
CvMSCRNode* tmp;
|
MSCRNode* tmp;
|
||||||
CV_SWAP( lr, rr, tmp );
|
CV_SWAP( lr, rr, tmp );
|
||||||
} else if ( lr->rank == rr->rank ) {
|
} else if ( lr->rank == rr->rank ) {
|
||||||
// at the same rank, we will compare the size
|
// at the same rank, we will compare the size
|
||||||
if ( lr->size > rr->size )
|
if ( lr->size > rr->size )
|
||||||
{
|
{
|
||||||
CvMSCRNode* tmp;
|
MSCRNode* tmp;
|
||||||
CV_SWAP( lr, rr, tmp );
|
CV_SWAP( lr, rr, tmp );
|
||||||
}
|
}
|
||||||
lr->rank++;
|
lr->rank++;
|
||||||
@ -1181,7 +1172,7 @@ icvExtractMSER_8UC3( CvMat* src,
|
|||||||
if ( s < lr->s )
|
if ( s < lr->s )
|
||||||
{
|
{
|
||||||
// skip the first one and check stablity
|
// skip the first one and check stablity
|
||||||
if ( i > lr->reinit+1 && icvMSCRStableCheck( lr, params ) )
|
if ( i > lr->reinit+1 && MSCRStableCheck( lr, params ) )
|
||||||
{
|
{
|
||||||
if ( lr->tmsr == NULL )
|
if ( lr->tmsr == NULL )
|
||||||
{
|
{
|
||||||
@ -1202,13 +1193,13 @@ icvExtractMSER_8UC3( CvMat* src,
|
|||||||
if ( edgeptr >= edge_ub )
|
if ( edgeptr >= edge_ub )
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
for ( CvTempMSCR* ptr = mscr; ptr < mscrptr; ptr++ )
|
for ( TempMSCR* ptr = mscr; ptr < mscrptr; ptr++ )
|
||||||
// to prune area with margin less than minMargin
|
// to prune area with margin less than minMargin
|
||||||
if ( ptr->m > params.minMargin )
|
if ( ptr->m > params.minMargin )
|
||||||
{
|
{
|
||||||
CvSeq* _contour = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage );
|
CvSeq* _contour = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage );
|
||||||
cvSeqPushMulti( _contour, 0, ptr->size );
|
cvSeqPushMulti( _contour, 0, ptr->size );
|
||||||
CvMSCRNode* lpt = ptr->head;
|
MSCRNode* lpt = ptr->head;
|
||||||
for ( int i = 0; i < ptr->size; i++ )
|
for ( int i = 0; i < ptr->size; i++ )
|
||||||
{
|
{
|
||||||
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, _contour, i );
|
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, _contour, i );
|
||||||
@ -1228,12 +1219,12 @@ icvExtractMSER_8UC3( CvMat* src,
|
|||||||
cvFree( &map );
|
cvFree( &map );
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
cvExtractMSER( CvArr* _img,
|
extractMSER( CvArr* _img,
|
||||||
CvArr* _mask,
|
CvArr* _mask,
|
||||||
CvSeq** _contours,
|
CvSeq** _contours,
|
||||||
CvMemStorage* storage,
|
CvMemStorage* storage,
|
||||||
CvMSERParams params )
|
MSERParams params )
|
||||||
{
|
{
|
||||||
CvMat srchdr, *src = cvGetMat( _img, &srchdr );
|
CvMat srchdr, *src = cvGetMat( _img, &srchdr );
|
||||||
CvMat maskhdr, *mask = _mask ? cvGetMat( _mask, &maskhdr ) : 0;
|
CvMat maskhdr, *mask = _mask ? cvGetMat( _mask, &maskhdr ) : 0;
|
||||||
@ -1252,30 +1243,24 @@ cvExtractMSER( CvArr* _img,
|
|||||||
switch ( CV_MAT_TYPE(src->type) )
|
switch ( CV_MAT_TYPE(src->type) )
|
||||||
{
|
{
|
||||||
case CV_8UC1:
|
case CV_8UC1:
|
||||||
icvExtractMSER_8UC1( src, mask, contours, storage, params );
|
extractMSER_8UC1( src, mask, contours, storage, params );
|
||||||
break;
|
break;
|
||||||
case CV_8UC3:
|
case CV_8UC3:
|
||||||
icvExtractMSER_8UC3( src, mask, contours, storage, params );
|
extractMSER_8UC3( src, mask, contours, storage, params );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace cv
|
|
||||||
{
|
|
||||||
|
|
||||||
MSER::MSER()
|
|
||||||
{
|
|
||||||
*(CvMSERParams*)this = cvMSERParams();
|
|
||||||
}
|
|
||||||
|
|
||||||
MSER::MSER( int _delta, int _min_area, int _max_area,
|
MSER::MSER( int _delta, int _min_area, int _max_area,
|
||||||
double _max_variation, double _min_diversity,
|
double _max_variation, double _min_diversity,
|
||||||
int _max_evolution, double _area_threshold,
|
int _max_evolution, double _area_threshold,
|
||||||
double _min_margin, int _edge_blur_size )
|
double _min_margin, int _edge_blur_size )
|
||||||
|
: delta(_delta), minArea(_min_area), maxArea(_max_area),
|
||||||
|
maxVariation(_max_variation), minDiversity(_min_diversity),
|
||||||
|
maxEvolution(_max_evolution), areaThreshold(_area_threshold),
|
||||||
|
minMargin(_min_margin), edgeBlurSize(_edge_blur_size)
|
||||||
{
|
{
|
||||||
*(CvMSERParams*)this = cvMSERParams(_delta, _min_area, _max_area, (float)_max_variation,
|
|
||||||
(float)_min_diversity, _max_evolution, _area_threshold, _min_margin, _edge_blur_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MSER::operator()( const Mat& image, vector<vector<Point> >& dstcontours, const Mat& mask ) const
|
void MSER::operator()( const Mat& image, vector<vector<Point> >& dstcontours, const Mat& mask ) const
|
||||||
@ -1285,12 +1270,56 @@ void MSER::operator()( const Mat& image, vector<vector<Point> >& dstcontours, co
|
|||||||
pmask = &(_mask = mask);
|
pmask = &(_mask = mask);
|
||||||
MemStorage storage(cvCreateMemStorage(0));
|
MemStorage storage(cvCreateMemStorage(0));
|
||||||
Seq<CvSeq*> contours;
|
Seq<CvSeq*> contours;
|
||||||
cvExtractMSER( &_image, pmask, &contours.seq, storage, *(const CvMSERParams*)this );
|
extractMSER( &_image, pmask, &contours.seq, storage,
|
||||||
|
MSERParams(delta, minArea, maxArea, maxVariation, minDiversity,
|
||||||
|
maxEvolution, areaThreshold, minMargin, edgeBlurSize));
|
||||||
SeqIterator<CvSeq*> it = contours.begin();
|
SeqIterator<CvSeq*> it = contours.begin();
|
||||||
size_t i, ncontours = contours.size();
|
size_t i, ncontours = contours.size();
|
||||||
dstcontours.resize(ncontours);
|
dstcontours.resize(ncontours);
|
||||||
for( i = 0; i < ncontours; i++, ++it )
|
for( i = 0; i < ncontours; i++, ++it )
|
||||||
Seq<Point>(*it).copyTo(dstcontours[i]);
|
Seq<Point>(*it).copyTo(dstcontours[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MserFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
||||||
|
{
|
||||||
|
vector<vector<Point> > msers;
|
||||||
|
|
||||||
|
(*this)(image, msers, mask);
|
||||||
|
|
||||||
|
vector<vector<Point> >::const_iterator contour_it = msers.begin();
|
||||||
|
for( ; contour_it != msers.end(); ++contour_it )
|
||||||
|
{
|
||||||
|
// TODO check transformation from MSER region to KeyPoint
|
||||||
|
RotatedRect rect = fitEllipse(Mat(*contour_it));
|
||||||
|
float diam = sqrt(rect.size.height*rect.size.width);
|
||||||
|
|
||||||
|
if( diam > std::numeric_limits<float>::epsilon() )
|
||||||
|
keypoints.push_back( KeyPoint( rect.center, diam, rect.angle) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static Algorithm* createMSER() { return new MSER; }
|
||||||
|
static AlgorithmInfo mser_info("Feature2D.MSER", createMSER);
|
||||||
|
|
||||||
|
AlgorithmInfo* MSER::info() const
|
||||||
|
{
|
||||||
|
static volatile bool initialized = false;
|
||||||
|
if( !initialized )
|
||||||
|
{
|
||||||
|
mser_info.addParam(this, "delta", delta);
|
||||||
|
mser_info.addParam(this, "minArea", minArea);
|
||||||
|
mser_info.addParam(this, "maxArea", maxArea);
|
||||||
|
mser_info.addParam(this, "maxVariation", maxVariation);
|
||||||
|
mser_info.addParam(this, "minDiversity", minDiversity);
|
||||||
|
mser_info.addParam(this, "maxEvolution", maxEvolution);
|
||||||
|
mser_info.addParam(this, "areaThreshold", areaThreshold);
|
||||||
|
mser_info.addParam(this, "minMargin", minMargin);
|
||||||
|
mser_info.addParam(this, "edgeBlurSize", edgeBlurSize);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
return &mser_info;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -94,27 +94,6 @@ HarrisResponses(const Mat& img, vector<KeyPoint>& pts, int blockSize, float harr
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
struct KeypointResponseGreaterThanThreshold
|
|
||||||
{
|
|
||||||
KeypointResponseGreaterThanThreshold(float _value) :
|
|
||||||
value(_value)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
inline bool operator()(const KeyPoint& kpt) const
|
|
||||||
{
|
|
||||||
return kpt.response >= value;
|
|
||||||
}
|
|
||||||
float value;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct KeypointResponseGreater
|
|
||||||
{
|
|
||||||
inline bool operator()(const KeyPoint& kp1, const KeyPoint& kp2) const
|
|
||||||
{
|
|
||||||
return kp1.response > kp2.response;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static float IC_Angle(const Mat& image, const int half_k, Point2f pt,
|
static float IC_Angle(const Mat& image, const int half_k, Point2f pt,
|
||||||
const vector<int> & u_max)
|
const vector<int> & u_max)
|
||||||
{
|
{
|
||||||
@ -224,37 +203,37 @@ static void computeOrbDescriptor(const KeyPoint& kpt,
|
|||||||
{
|
{
|
||||||
for (int i = 0; i < dsize; ++i, pattern += 16)
|
for (int i = 0; i < dsize; ++i, pattern += 16)
|
||||||
{
|
{
|
||||||
int t0, t1, t2, t3, a, b, k, val;
|
int t0, t1, t2, t3, u, v, k, val;
|
||||||
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
|
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
|
||||||
t2 = GET_VALUE(2); t3 = GET_VALUE(3);
|
t2 = GET_VALUE(2); t3 = GET_VALUE(3);
|
||||||
a = 0, b = 2;
|
u = 0, v = 2;
|
||||||
if( t1 > t0 ) t0 = t1, a = 1;
|
if( t1 > t0 ) t0 = t1, u = 1;
|
||||||
if( t3 > t2 ) t2 = t3, b = 3;
|
if( t3 > t2 ) t2 = t3, v = 3;
|
||||||
k = t0 > t2 ? a : b;
|
k = t0 > t2 ? u : v;
|
||||||
val = k;
|
val = k;
|
||||||
|
|
||||||
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
|
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
|
||||||
t2 = GET_VALUE(6); t3 = GET_VALUE(7);
|
t2 = GET_VALUE(6); t3 = GET_VALUE(7);
|
||||||
a = 0, b = 2;
|
u = 0, v = 2;
|
||||||
if( t1 > t0 ) t0 = t1, a = 1;
|
if( t1 > t0 ) t0 = t1, u = 1;
|
||||||
if( t3 > t2 ) t2 = t3, b = 3;
|
if( t3 > t2 ) t2 = t3, v = 3;
|
||||||
k = t0 > t2 ? a : b;
|
k = t0 > t2 ? u : v;
|
||||||
val |= k << 2;
|
val |= k << 2;
|
||||||
|
|
||||||
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
|
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
|
||||||
t2 = GET_VALUE(10); t3 = GET_VALUE(11);
|
t2 = GET_VALUE(10); t3 = GET_VALUE(11);
|
||||||
a = 0, b = 2;
|
u = 0, v = 2;
|
||||||
if( t1 > t0 ) t0 = t1, a = 1;
|
if( t1 > t0 ) t0 = t1, u = 1;
|
||||||
if( t3 > t2 ) t2 = t3, b = 3;
|
if( t3 > t2 ) t2 = t3, v = 3;
|
||||||
k = t0 > t2 ? a : b;
|
k = t0 > t2 ? u : v;
|
||||||
val |= k << 4;
|
val |= k << 4;
|
||||||
|
|
||||||
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
|
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
|
||||||
t2 = GET_VALUE(14); t3 = GET_VALUE(15);
|
t2 = GET_VALUE(14); t3 = GET_VALUE(15);
|
||||||
a = 0, b = 2;
|
u = 0, v = 2;
|
||||||
if( t1 > t0 ) t0 = t1, a = 1;
|
if( t1 > t0 ) t0 = t1, u = 1;
|
||||||
if( t3 > t2 ) t2 = t3, b = 3;
|
if( t3 > t2 ) t2 = t3, v = 3;
|
||||||
k = t0 > t2 ? a : b;
|
k = t0 > t2 ? u : v;
|
||||||
val |= k << 6;
|
val |= k << 6;
|
||||||
|
|
||||||
desc[i] = (uchar)val;
|
desc[i] = (uchar)val;
|
||||||
@ -567,168 +546,195 @@ static void makeRandomPattern(int patchSize, Point* pattern, int npoints)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
void ORB::CommonParams::read(const FileNode& fn)
|
static Algorithm* createORB() { return new ORB; }
|
||||||
|
static AlgorithmInfo orb_info("Feature2D.ORB", createORB);
|
||||||
|
|
||||||
|
AlgorithmInfo* ORB::info() const
|
||||||
{
|
{
|
||||||
scale_factor_ = fn["scaleFactor"];
|
static volatile bool initialized = false;
|
||||||
n_levels_ = int(fn["nLevels"]);
|
if( !initialized )
|
||||||
first_level_ = int(fn["firstLevel"]);
|
{
|
||||||
edge_threshold_ = fn["edgeThreshold"];
|
orb_info.addParam(this, "nFeatures", nfeatures);
|
||||||
patch_size_ = fn["patchSize"];
|
orb_info.addParam(this, "scaleFactor", scaleFactor);
|
||||||
WTA_K_ = fn["WTA_K"];
|
orb_info.addParam(this, "nLevels", nlevels);
|
||||||
if( WTA_K_ == 0 ) WTA_K_ = 2;
|
orb_info.addParam(this, "firstLevel", firstLevel);
|
||||||
score_type_ = fn["scoreType"];
|
orb_info.addParam(this, "edgeThreshold", edgeThreshold);
|
||||||
|
orb_info.addParam(this, "patchSize", patchSize);
|
||||||
|
orb_info.addParam(this, "WTA_K", WTA_K);
|
||||||
|
orb_info.addParam(this, "scoreType", scoreType);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
return &orb_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ORB::CommonParams::write(FileStorage& fs) const
|
static inline float getScale(int level, int firstLevel, double scaleFactor)
|
||||||
{
|
{
|
||||||
fs << "scaleFactor" << scale_factor_;
|
return (float)std::pow(scaleFactor, (double)(level - firstLevel));
|
||||||
fs << "nLevels" << int(n_levels_);
|
|
||||||
fs << "firstLevel" << int(first_level_);
|
|
||||||
fs << "edgeThreshold" << int(edge_threshold_);
|
|
||||||
fs << "patchSize" << int(patch_size_);
|
|
||||||
fs << "WTA_K" << WTA_K_;
|
|
||||||
fs << "scoreType" << score_type_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ORB::read(const FileNode& fn)
|
|
||||||
{
|
|
||||||
CommonParams params;
|
|
||||||
params.read(fn);
|
|
||||||
int n_features = int(fn["nFeatures"]);
|
|
||||||
*this = ORB(n_features, params);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ORB::write(FileStorage& fs) const
|
|
||||||
{
|
|
||||||
params_.write(fs);
|
|
||||||
fs << "nFeatures" << int(n_features_);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline float get_scale(const ORB::CommonParams& params, int level)
|
|
||||||
{
|
|
||||||
return std::pow(params.scale_factor_, float(level) - float(params.first_level_));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Constructor
|
/** Constructor
|
||||||
* @param detector_params parameters to use
|
* @param detector_params parameters to use
|
||||||
*/
|
*/
|
||||||
ORB::ORB(size_t n_features, const CommonParams & detector_params) :
|
ORB::ORB(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold,
|
||||||
params_(detector_params), n_features_(n_features)
|
int _firstLevel, int WTA_K, int _scoreType, int _patchSize) :
|
||||||
{
|
nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels),
|
||||||
// fill the extractors and descriptors for the corresponding scales
|
edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(WTA_K),
|
||||||
int n_levels = (int)params_.n_levels_;
|
scoreType(_scoreType), patchSize(_patchSize)
|
||||||
float factor = (float)(1.0 / params_.scale_factor_);
|
{}
|
||||||
float n_desired_features_per_scale = n_features_*(1 - factor)/(1 - (float)pow((double)factor, (double)n_levels));
|
|
||||||
|
|
||||||
n_features_per_level_.resize(n_levels);
|
|
||||||
int sum_n_features = 0;
|
|
||||||
for( int level = 0; level < n_levels-1; level++ )
|
|
||||||
{
|
|
||||||
n_features_per_level_[level] = cvRound(n_desired_features_per_scale);
|
|
||||||
sum_n_features += n_features_per_level_[level];
|
|
||||||
n_desired_features_per_scale *= factor;
|
|
||||||
}
|
|
||||||
n_features_per_level_[n_levels-1] = n_features - sum_n_features;
|
|
||||||
|
|
||||||
// Make sure we forget about what is too close to the boundary
|
|
||||||
//params_.edge_threshold_ = std::max(params_.edge_threshold_, params_.patch_size_/2 + kKernelWidth / 2 + 2);
|
|
||||||
|
|
||||||
// pre-compute the end of a row in a circular patch
|
|
||||||
int half_patch_size = params_.patch_size_ / 2;
|
|
||||||
u_max_.resize(half_patch_size + 1);
|
|
||||||
for (int v = 0; v <= half_patch_size * sqrt(2.f) / 2 + 1; ++v)
|
|
||||||
u_max_[v] = cvRound(sqrt(float(half_patch_size * half_patch_size - v * v)));
|
|
||||||
|
|
||||||
// Make sure we are symmetric
|
|
||||||
for (int v = half_patch_size, v_0 = 0; v >= half_patch_size * sqrt(2.f) / 2; --v)
|
|
||||||
{
|
|
||||||
while (u_max_[v_0] == u_max_[v_0 + 1])
|
|
||||||
++v_0;
|
|
||||||
u_max_[v] = v_0;
|
|
||||||
++v_0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int npoints = 512;
|
|
||||||
Point pattern_buf[npoints];
|
|
||||||
const Point* pattern0 = (const Point*)bit_pattern_31_;
|
|
||||||
if( params_.patch_size_ != 31 )
|
|
||||||
{
|
|
||||||
pattern0 = pattern_buf;
|
|
||||||
makeRandomPattern(params_.patch_size_, pattern_buf, npoints);
|
|
||||||
}
|
|
||||||
|
|
||||||
CV_Assert( params_.WTA_K_ == 2 || params_.WTA_K_ == 3 || params_.WTA_K_ == 4 );
|
|
||||||
|
|
||||||
if( params_.WTA_K_ == 2 )
|
|
||||||
std::copy(pattern0, pattern0 + npoints, back_inserter(pattern));
|
|
||||||
else
|
|
||||||
{
|
|
||||||
int ntuples = descriptorSize()*4;
|
|
||||||
initializeOrbPattern(pattern0, pattern, ntuples, params_.WTA_K_, npoints);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/** destructor to empty the patterns */
|
|
||||||
ORB::~ORB()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/** returns the descriptor size in bytes */
|
|
||||||
int ORB::descriptorSize() const
|
int ORB::descriptorSize() const
|
||||||
{
|
{
|
||||||
return kBytes;
|
return kBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ORB::descriptorType() const
|
||||||
|
{
|
||||||
|
return CV_8U;
|
||||||
|
}
|
||||||
|
|
||||||
/** Compute the ORB features and descriptors on an image
|
/** Compute the ORB features and descriptors on an image
|
||||||
* @param img the image to compute the features and descriptors on
|
* @param img the image to compute the features and descriptors on
|
||||||
* @param mask the mask to apply
|
* @param mask the mask to apply
|
||||||
* @param keypoints the resulting keypoints
|
* @param keypoints the resulting keypoints
|
||||||
*/
|
*/
|
||||||
void ORB::operator()(const Mat &image, const Mat &mask, vector<KeyPoint> & keypoints)
|
void ORB::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const
|
||||||
{
|
{
|
||||||
Mat empty_descriptors;
|
(*this)(image, mask, keypoints, noArray(), false);
|
||||||
this->operator ()(image, mask, keypoints, empty_descriptors, true, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Compute the ORB features and descriptors on an image
|
|
||||||
* @param img the image to compute the features and descriptors on
|
/** Compute the ORB keypoint orientations
|
||||||
* @param mask the mask to apply
|
* @param image the image to compute the features and descriptors on
|
||||||
|
* @param integral_image the integral image of the iamge (can be empty, but the computation will be slower)
|
||||||
|
* @param scale the scale at which we compute the orientation
|
||||||
* @param keypoints the resulting keypoints
|
* @param keypoints the resulting keypoints
|
||||||
* @param descriptors the resulting descriptors
|
|
||||||
* @param useProvidedKeypoints if true, the keypoints are used as an input
|
|
||||||
*/
|
*/
|
||||||
void ORB::operator()(const Mat &image, const Mat &mask, vector<KeyPoint> & keypoints,
|
static void computeOrientation(const Mat& image, vector<KeyPoint>& keypoints,
|
||||||
Mat & descriptors, bool useProvidedKeypoints)
|
int halfPatchSize, const vector<int>& umax)
|
||||||
{
|
{
|
||||||
this->operator ()(image, mask, keypoints, descriptors, !useProvidedKeypoints, true);
|
// Process each keypoint
|
||||||
}
|
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
||||||
|
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
|
||||||
//takes keypoints and culls them by the response
|
|
||||||
static void cull(vector<KeyPoint>& keypoints, size_t n_points)
|
|
||||||
{
|
|
||||||
//this is only necessary if the keypoints size is greater than the number of desired points.
|
|
||||||
if (keypoints.size() > n_points)
|
|
||||||
{
|
{
|
||||||
if (n_points==0) {
|
keypoint->angle = IC_Angle(image, halfPatchSize, keypoint->pt, umax);
|
||||||
keypoints.clear();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
//first use nth element to partition the keypoints into the best and worst.
|
|
||||||
std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
|
|
||||||
//this is the boundary response, and in the case of FAST may be ambigous
|
|
||||||
float ambiguous_response = keypoints[n_points - 1].response;
|
|
||||||
//use std::partition to grab all of the keypoints with the boundary response.
|
|
||||||
vector<KeyPoint>::const_iterator new_end =
|
|
||||||
std::partition(keypoints.begin() + n_points, keypoints.end(),
|
|
||||||
KeypointResponseGreaterThanThreshold(ambiguous_response));
|
|
||||||
//resize the keypoints, given this new end point. nth_element and partition reordered the points inplace
|
|
||||||
keypoints.resize(new_end - keypoints.begin());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Compute the ORB keypoints on an image
|
||||||
|
* @param image_pyramid the image pyramid to compute the features and descriptors on
|
||||||
|
* @param mask_pyramid the masks to apply at every level
|
||||||
|
* @param keypoints the resulting keypoints, clustered per level
|
||||||
|
*/
|
||||||
|
static void computeKeyPoints(const vector<Mat>& imagePyramid,
|
||||||
|
const vector<Mat>& maskPyramid,
|
||||||
|
vector<vector<KeyPoint> >& allKeypoints,
|
||||||
|
int nfeatures, int firstLevel, double scaleFactor,
|
||||||
|
int edgeThreshold, int patchSize, int scoreType )
|
||||||
|
{
|
||||||
|
int nlevels = (int)imagePyramid.size();
|
||||||
|
vector<int> nfeaturesPerLevel(nlevels);
|
||||||
|
|
||||||
|
// fill the extractors and descriptors for the corresponding scales
|
||||||
|
float factor = (float)(1.0 / scaleFactor);
|
||||||
|
float ndesiredFeaturesPerScale = nfeatures*(1 - factor)/(1 - (float)pow((double)factor, (double)nlevels));
|
||||||
|
|
||||||
|
int sumFeatures = 0;
|
||||||
|
for( int level = 0; level < nlevels-1; level++ )
|
||||||
|
{
|
||||||
|
nfeaturesPerLevel[level] = cvRound(ndesiredFeaturesPerScale);
|
||||||
|
sumFeatures += nfeaturesPerLevel[level];
|
||||||
|
ndesiredFeaturesPerScale *= factor;
|
||||||
|
}
|
||||||
|
nfeaturesPerLevel[nlevels-1] = std::max(nfeatures - sumFeatures, 0);
|
||||||
|
|
||||||
|
// Make sure we forget about what is too close to the boundary
|
||||||
|
//edge_threshold_ = std::max(edge_threshold_, patch_size_/2 + kKernelWidth / 2 + 2);
|
||||||
|
|
||||||
|
// pre-compute the end of a row in a circular patch
|
||||||
|
int halfPatchSize = patchSize / 2;
|
||||||
|
vector<int> umax(halfPatchSize + 1);
|
||||||
|
|
||||||
|
int v, v0, vmax = cvFloor(halfPatchSize * sqrt(2.f) / 2 + 1);
|
||||||
|
int vmin = cvCeil(halfPatchSize * sqrt(2.f) / 2);
|
||||||
|
for (v = 0; v <= vmax; ++v)
|
||||||
|
umax[v] = cvRound(sqrt(halfPatchSize * halfPatchSize - v * v));
|
||||||
|
|
||||||
|
// Make sure we are symmetric
|
||||||
|
for (v = halfPatchSize, v0 = 0; v >= vmin; --v)
|
||||||
|
{
|
||||||
|
while (umax[v0] == umax[v0 + 1])
|
||||||
|
++v0;
|
||||||
|
umax[v] = v0;
|
||||||
|
++v0;
|
||||||
|
}
|
||||||
|
|
||||||
|
allKeypoints.resize(nlevels);
|
||||||
|
|
||||||
|
for (int level = 0; level < nlevels; ++level)
|
||||||
|
{
|
||||||
|
int nfeatures = nfeaturesPerLevel[level];
|
||||||
|
allKeypoints[level].reserve(nfeatures*2);
|
||||||
|
|
||||||
|
vector<KeyPoint> & keypoints = allKeypoints[level];
|
||||||
|
|
||||||
|
// Detect FAST features, 20 is a good threshold
|
||||||
|
FastFeatureDetector fd(20, true);
|
||||||
|
fd.detect(imagePyramid[level], keypoints, maskPyramid[level]);
|
||||||
|
|
||||||
|
// Remove keypoints very close to the border
|
||||||
|
KeyPointsFilter::runByImageBorder(keypoints, imagePyramid[level].size(), edgeThreshold);
|
||||||
|
|
||||||
|
if( scoreType == ORB::HARRIS_SCORE )
|
||||||
|
{
|
||||||
|
// Keep more points than necessary as FAST does not give amazing corners
|
||||||
|
KeyPointsFilter::retainBest(keypoints, 2 * nfeatures);
|
||||||
|
|
||||||
|
// Compute the Harris cornerness (better scoring than FAST)
|
||||||
|
HarrisResponses(imagePyramid[level], keypoints, 7, HARRIS_K);
|
||||||
|
}
|
||||||
|
|
||||||
|
//cull to the final desired level, using the new Harris scores or the original FAST scores.
|
||||||
|
KeyPointsFilter::retainBest(keypoints, nfeatures);
|
||||||
|
|
||||||
|
float sf = getScale(level, firstLevel, scaleFactor);
|
||||||
|
|
||||||
|
// Set the level of the coordinates
|
||||||
|
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
||||||
|
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
|
||||||
|
{
|
||||||
|
keypoint->octave = level;
|
||||||
|
keypoint->size = patchSize*sf;
|
||||||
|
}
|
||||||
|
|
||||||
|
computeOrientation(imagePyramid[level], keypoints, halfPatchSize, umax);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Compute the ORB decriptors
|
||||||
|
* @param image the image to compute the features and descriptors on
|
||||||
|
* @param integral_image the integral image of the image (can be empty, but the computation will be slower)
|
||||||
|
* @param level the scale at which we compute the orientation
|
||||||
|
* @param keypoints the keypoints to use
|
||||||
|
* @param descriptors the resulting descriptors
|
||||||
|
*/
|
||||||
|
static void computeDescriptors(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors,
|
||||||
|
const vector<Point>& pattern, int dsize, int WTA_K)
|
||||||
|
{
|
||||||
|
//convert to grayscale if more than one color
|
||||||
|
CV_Assert(image.type() == CV_8UC1);
|
||||||
|
//create the descriptor mat, keypoints.size() rows, BYTES cols
|
||||||
|
descriptors = Mat::zeros((int)keypoints.size(), dsize, CV_8UC1);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < keypoints.size(); i++)
|
||||||
|
computeOrbDescriptor(keypoints[i], image, &pattern[0], descriptors.ptr((int)i), dsize, WTA_K);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Compute the ORB features and descriptors on an image
|
/** Compute the ORB features and descriptors on an image
|
||||||
* @param img the image to compute the features and descriptors on
|
* @param img the image to compute the features and descriptors on
|
||||||
@ -738,24 +744,25 @@ static void cull(vector<KeyPoint>& keypoints, size_t n_points)
|
|||||||
* @param do_keypoints if true, the keypoints are computed, otherwise used as an input
|
* @param do_keypoints if true, the keypoints are computed, otherwise used as an input
|
||||||
* @param do_descriptors if true, also computes the descriptors
|
* @param do_descriptors if true, also computes the descriptors
|
||||||
*/
|
*/
|
||||||
void ORB::operator()(const Mat &image_in, const Mat &mask, vector<KeyPoint> & keypoints_in_out,
|
void ORB::operator()( InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints,
|
||||||
Mat& descriptors, bool do_keypoints, bool do_descriptors)
|
OutputArray _descriptors, bool useProvidedKeypoints) const
|
||||||
{
|
{
|
||||||
if (((!do_keypoints) && (!do_descriptors)) || (image_in.empty()))
|
bool do_keypoints = !useProvidedKeypoints;
|
||||||
|
bool do_descriptors = _descriptors.needed();
|
||||||
|
|
||||||
|
if( (!do_keypoints && !do_descriptors) || _image.empty() )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
//ROI handling
|
//ROI handling
|
||||||
const int HARRIS_BLOCK_SIZE = 9;
|
const int HARRIS_BLOCK_SIZE = 9;
|
||||||
int half_patch_size = params_.patch_size_ / 2;
|
int halfPatchSize = patchSize / 2;
|
||||||
int border = std::max(params_.edge_threshold_, std::max(half_patch_size, HARRIS_BLOCK_SIZE/2))+1;
|
int border = std::max(edgeThreshold, std::max(halfPatchSize, HARRIS_BLOCK_SIZE/2))+1;
|
||||||
|
|
||||||
Mat image;
|
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||||
if (image_in.type() != CV_8UC1)
|
if( image.type() != CV_8UC1 )
|
||||||
cvtColor(image_in, image, CV_BGR2GRAY);
|
cvtColor(_image, image, CV_BGR2GRAY);
|
||||||
else
|
|
||||||
image = image_in;
|
|
||||||
|
|
||||||
int n_levels = (int)params_.n_levels_;
|
int nlevels = this->nlevels;
|
||||||
|
|
||||||
if( !do_keypoints )
|
if( !do_keypoints )
|
||||||
{
|
{
|
||||||
@ -768,46 +775,46 @@ void ORB::operator()(const Mat &image_in, const Mat &mask, vector<KeyPoint> & ke
|
|||||||
//
|
//
|
||||||
// In short, ultimately the descriptor should
|
// In short, ultimately the descriptor should
|
||||||
// ignore octave parameter and deal only with the keypoint size.
|
// ignore octave parameter and deal only with the keypoint size.
|
||||||
n_levels = 0;
|
nlevels = 0;
|
||||||
for( size_t i = 0; i < keypoints_in_out.size(); i++ )
|
for( size_t i = 0; i < _keypoints.size(); i++ )
|
||||||
n_levels = std::max(n_levels, std::max(keypoints_in_out[i].octave, 0));
|
nlevels = std::max(nlevels, std::max(_keypoints[i].octave, 0));
|
||||||
n_levels++;
|
nlevels++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pre-compute the scale pyramids
|
// Pre-compute the scale pyramids
|
||||||
vector<Mat> image_pyramid(n_levels), mask_pyramid(n_levels);
|
vector<Mat> imagePyramid(nlevels), maskPyramid(nlevels);
|
||||||
for (int level = 0; level < n_levels; ++level)
|
for (int level = 0; level < nlevels; ++level)
|
||||||
{
|
{
|
||||||
float scale = 1/get_scale(params_, level);
|
float scale = 1/getScale(level, firstLevel, scale);
|
||||||
Size sz(cvRound(image.cols*scale), cvRound(image.rows*scale));
|
Size sz(cvRound(image.cols*scale), cvRound(image.rows*scale));
|
||||||
Size wholeSize(sz.width + border*2, sz.height + border*2);
|
Size wholeSize(sz.width + border*2, sz.height + border*2);
|
||||||
Mat temp(wholeSize, image.type()), masktemp;
|
Mat temp(wholeSize, image.type()), masktemp;
|
||||||
image_pyramid[level] = temp(Rect(border, border, sz.width, sz.height));
|
imagePyramid[level] = temp(Rect(border, border, sz.width, sz.height));
|
||||||
|
|
||||||
if( !mask.empty() )
|
if( !mask.empty() )
|
||||||
{
|
{
|
||||||
masktemp = Mat(wholeSize, mask.type());
|
masktemp = Mat(wholeSize, mask.type());
|
||||||
mask_pyramid[level] = masktemp(Rect(border, border, sz.width, sz.height));
|
maskPyramid[level] = masktemp(Rect(border, border, sz.width, sz.height));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute the resized image
|
// Compute the resized image
|
||||||
if (level != (int)params_.first_level_)
|
if( level != firstLevel )
|
||||||
{
|
{
|
||||||
if( level < (int)params_.first_level_ )
|
if( level < firstLevel )
|
||||||
{
|
{
|
||||||
resize(image, image_pyramid[level], sz, scale, scale, INTER_LINEAR);
|
resize(image, imagePyramid[level], sz, scale, scale, INTER_LINEAR);
|
||||||
if (!mask.empty())
|
if (!mask.empty())
|
||||||
resize(mask, mask_pyramid[level], sz, scale, scale, INTER_LINEAR);
|
resize(mask, maskPyramid[level], sz, scale, scale, INTER_LINEAR);
|
||||||
copyMakeBorder(image_pyramid[level], temp, border, border, border, border,
|
copyMakeBorder(imagePyramid[level], temp, border, border, border, border,
|
||||||
BORDER_REFLECT_101+BORDER_ISOLATED);
|
BORDER_REFLECT_101+BORDER_ISOLATED);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
float sf = params_.scale_factor_;
|
float sf = scaleFactor;
|
||||||
resize(image_pyramid[level-1], image_pyramid[level], sz, 1./sf, 1./sf, INTER_LINEAR);
|
resize(imagePyramid[level-1], imagePyramid[level], sz, 1./sf, 1./sf, INTER_LINEAR);
|
||||||
if (!mask.empty())
|
if (!mask.empty())
|
||||||
resize(mask_pyramid[level-1], mask_pyramid[level], sz, 1./sf, 1./sf, INTER_LINEAR);
|
resize(maskPyramid[level-1], maskPyramid[level], sz, 1./sf, 1./sf, INTER_LINEAR);
|
||||||
copyMakeBorder(image_pyramid[level], temp, border, border, border, border,
|
copyMakeBorder(imagePyramid[level], temp, border, border, border, border,
|
||||||
BORDER_REFLECT_101+BORDER_ISOLATED);
|
BORDER_REFLECT_101+BORDER_ISOLATED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -815,22 +822,24 @@ void ORB::operator()(const Mat &image_in, const Mat &mask, vector<KeyPoint> & ke
|
|||||||
{
|
{
|
||||||
copyMakeBorder(image, temp, border, border, border, border,
|
copyMakeBorder(image, temp, border, border, border, border,
|
||||||
BORDER_REFLECT_101);
|
BORDER_REFLECT_101);
|
||||||
image.copyTo(image_pyramid[level]);
|
image.copyTo(imagePyramid[level]);
|
||||||
if( !mask.empty() )
|
if( !mask.empty() )
|
||||||
mask.copyTo(mask_pyramid[level]);
|
mask.copyTo(maskPyramid[level]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if( !mask.empty() )
|
if( !mask.empty() )
|
||||||
copyMakeBorder(mask_pyramid[level], masktemp, border, border, border, border,
|
copyMakeBorder(maskPyramid[level], masktemp, border, border, border, border,
|
||||||
BORDER_CONSTANT+BORDER_ISOLATED);
|
BORDER_CONSTANT+BORDER_ISOLATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pre-compute the keypoints (we keep the best over all scales, so this has to be done beforehand
|
// Pre-compute the keypoints (we keep the best over all scales, so this has to be done beforehand
|
||||||
vector < vector<KeyPoint> > all_keypoints;
|
vector < vector<KeyPoint> > allKeypoints;
|
||||||
if (do_keypoints)
|
if( do_keypoints )
|
||||||
{
|
{
|
||||||
// Get keypoints, those will be far enough from the border that no check will be required for the descriptor
|
// Get keypoints, those will be far enough from the border that no check will be required for the descriptor
|
||||||
computeKeyPoints(image_pyramid, mask_pyramid, all_keypoints);
|
computeKeyPoints(imagePyramid, maskPyramid, allKeypoints,
|
||||||
|
nfeatures, firstLevel, scaleFactor,
|
||||||
|
edgeThreshold, patchSize, scoreType);
|
||||||
|
|
||||||
// make sure we have the right number of keypoints keypoints
|
// make sure we have the right number of keypoints keypoints
|
||||||
/*vector<KeyPoint> temp;
|
/*vector<KeyPoint> temp;
|
||||||
@ -842,7 +851,7 @@ void ORB::operator()(const Mat &image_in, const Mat &mask, vector<KeyPoint> & ke
|
|||||||
keypoints.clear();
|
keypoints.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
cull(temp, n_features_);
|
KeyPoint::retainBest(temp, n_features_);
|
||||||
|
|
||||||
for (vector<KeyPoint>::iterator keypoint = temp.begin(),
|
for (vector<KeyPoint>::iterator keypoint = temp.begin(),
|
||||||
keypoint_end = temp.end(); keypoint != keypoint_end; ++keypoint)
|
keypoint_end = temp.end(); keypoint != keypoint_end; ++keypoint)
|
||||||
@ -851,48 +860,72 @@ void ORB::operator()(const Mat &image_in, const Mat &mask, vector<KeyPoint> & ke
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Remove keypoints very close to the border
|
// Remove keypoints very close to the border
|
||||||
KeyPointsFilter::runByImageBorder(keypoints_in_out, image.size(), params_.edge_threshold_);
|
KeyPointsFilter::runByImageBorder(_keypoints, image.size(), edgeThreshold);
|
||||||
|
|
||||||
// Cluster the input keypoints depending on the level they were computed at
|
// Cluster the input keypoints depending on the level they were computed at
|
||||||
all_keypoints.resize(n_levels);
|
allKeypoints.resize(nlevels);
|
||||||
for (vector<KeyPoint>::iterator keypoint = keypoints_in_out.begin(),
|
for (vector<KeyPoint>::iterator keypoint = _keypoints.begin(),
|
||||||
keypoint_end = keypoints_in_out.end(); keypoint != keypoint_end; ++keypoint)
|
keypointEnd = _keypoints.end(); keypoint != keypointEnd; ++keypoint)
|
||||||
all_keypoints[keypoint->octave].push_back(*keypoint);
|
allKeypoints[keypoint->octave].push_back(*keypoint);
|
||||||
|
|
||||||
// Make sure we rescale the coordinates
|
// Make sure we rescale the coordinates
|
||||||
for (int level = 0; level < n_levels; ++level)
|
for (int level = 0; level < nlevels; ++level)
|
||||||
{
|
{
|
||||||
if (level == (int)params_.first_level_)
|
if (level == firstLevel)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
vector<KeyPoint> & keypoints = all_keypoints[level];
|
vector<KeyPoint> & keypoints = allKeypoints[level];
|
||||||
float scale = 1/get_scale(params_, level);
|
float scale = 1/getScale(level, firstLevel, scaleFactor);
|
||||||
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
||||||
keypoint_end = keypoints.end(); keypoint != keypoint_end; ++keypoint)
|
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
|
||||||
keypoint->pt *= scale;
|
keypoint->pt *= scale;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_descriptors)
|
Mat descriptors;
|
||||||
|
vector<Point> pattern;
|
||||||
|
|
||||||
|
if( do_descriptors )
|
||||||
{
|
{
|
||||||
int nkeypoints = 0;
|
int nkeypoints = 0;
|
||||||
for (int level = 0; level < n_levels; ++level)
|
for (int level = 0; level < nlevels; ++level)
|
||||||
nkeypoints += (int)all_keypoints[level].size();
|
nkeypoints += (int)allKeypoints[level].size();
|
||||||
if( nkeypoints == 0 )
|
if( nkeypoints == 0 )
|
||||||
descriptors.release();
|
_descriptors.release();
|
||||||
else
|
else
|
||||||
descriptors.create(nkeypoints, descriptorSize(), CV_8U);
|
{
|
||||||
|
_descriptors.create(nkeypoints, descriptorSize(), CV_8U);
|
||||||
|
descriptors = _descriptors.getMat();
|
||||||
|
}
|
||||||
|
|
||||||
|
const int npoints = 512;
|
||||||
|
Point patternbuf[npoints];
|
||||||
|
const Point* pattern0 = (const Point*)bit_pattern_31_;
|
||||||
|
|
||||||
|
if( patchSize != 31 )
|
||||||
|
{
|
||||||
|
pattern0 = patternbuf;
|
||||||
|
makeRandomPattern(patchSize, patternbuf, npoints);
|
||||||
|
}
|
||||||
|
|
||||||
|
CV_Assert( WTA_K == 2 || WTA_K == 3 || WTA_K == 4 );
|
||||||
|
|
||||||
|
if( WTA_K == 2 )
|
||||||
|
std::copy(pattern0, pattern0 + npoints, std::back_inserter(pattern));
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int ntuples = descriptorSize()*4;
|
||||||
|
initializeOrbPattern(pattern0, pattern, ntuples, WTA_K, npoints);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keypoints_in_out.clear();
|
_keypoints.clear();
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
for (int level = 0; level < n_levels; ++level)
|
for (int level = 0; level < nlevels; ++level)
|
||||||
{
|
{
|
||||||
// Get the features and compute their orientation
|
// Get the features and compute their orientation
|
||||||
vector<KeyPoint>& keypoints = all_keypoints[level];
|
vector<KeyPoint>& keypoints = allKeypoints[level];
|
||||||
int nkeypoints = (int)keypoints.size();
|
int nkeypoints = (int)keypoints.size();
|
||||||
if (nkeypoints==0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// Compute the descriptors
|
// Compute the descriptors
|
||||||
if (do_descriptors)
|
if (do_descriptors)
|
||||||
@ -900,122 +933,33 @@ void ORB::operator()(const Mat &image_in, const Mat &mask, vector<KeyPoint> & ke
|
|||||||
Mat desc = descriptors.rowRange(offset, offset + nkeypoints);
|
Mat desc = descriptors.rowRange(offset, offset + nkeypoints);
|
||||||
offset += nkeypoints;
|
offset += nkeypoints;
|
||||||
// preprocess the resized image
|
// preprocess the resized image
|
||||||
Mat& working_mat = image_pyramid[level];
|
Mat& workingMat = imagePyramid[level];
|
||||||
//boxFilter(working_mat, working_mat, working_mat.depth(), Size(5,5), Point(-1,-1), true, BORDER_REFLECT_101);
|
//boxFilter(working_mat, working_mat, working_mat.depth(), Size(5,5), Point(-1,-1), true, BORDER_REFLECT_101);
|
||||||
GaussianBlur(working_mat, working_mat, Size(7, 7), 2, 2, BORDER_REFLECT_101);
|
GaussianBlur(workingMat, workingMat, Size(7, 7), 2, 2, BORDER_REFLECT_101);
|
||||||
computeDescriptors(working_mat, Mat(), level, keypoints, desc);
|
computeDescriptors(workingMat, keypoints, desc, pattern, descriptorSize(), WTA_K);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy to the output data
|
// Copy to the output data
|
||||||
if (level != (int)params_.first_level_)
|
if (level != firstLevel)
|
||||||
{
|
{
|
||||||
float scale = get_scale(params_, level);
|
float scale = getScale(level, firstLevel, scaleFactor);
|
||||||
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
||||||
keypoint_end = keypoints.end(); keypoint != keypoint_end; ++keypoint)
|
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
|
||||||
keypoint->pt *= scale;
|
keypoint->pt *= scale;
|
||||||
}
|
}
|
||||||
// And add the keypoints to the output
|
// And add the keypoints to the output
|
||||||
keypoints_in_out.insert(keypoints_in_out.end(), keypoints.begin(), keypoints.end());
|
_keypoints.insert(_keypoints.end(), keypoints.begin(), keypoints.end());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Compute the ORB keypoints on an image
|
|
||||||
* @param image_pyramid the image pyramid to compute the features and descriptors on
|
|
||||||
* @param mask_pyramid the masks to apply at every level
|
|
||||||
* @param keypoints the resulting keypoints, clustered per level
|
|
||||||
*/
|
|
||||||
void ORB::computeKeyPoints(const vector<Mat>& image_pyramid,
|
|
||||||
const vector<Mat>& mask_pyramid,
|
|
||||||
vector<vector<KeyPoint> >& all_keypoints_out) const
|
|
||||||
{
|
|
||||||
all_keypoints_out.resize(params_.n_levels_);
|
|
||||||
|
|
||||||
for (int level = 0; level < (int)params_.n_levels_; ++level)
|
void ORB::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
|
||||||
{
|
|
||||||
int n_features = n_features_per_level_[level];
|
|
||||||
all_keypoints_out[level].reserve(n_features*2);
|
|
||||||
|
|
||||||
vector<KeyPoint> & keypoints = all_keypoints_out[level];
|
|
||||||
|
|
||||||
// Detect FAST features, 20 is a good threshold
|
|
||||||
FastFeatureDetector fd(20, true);
|
|
||||||
fd.detect(image_pyramid[level], keypoints, mask_pyramid[level]);
|
|
||||||
|
|
||||||
// Remove keypoints very close to the border
|
|
||||||
KeyPointsFilter::runByImageBorder(keypoints, image_pyramid[level].size(), params_.edge_threshold_);
|
|
||||||
|
|
||||||
if( params_.score_type_ == CommonParams::HARRIS_SCORE )
|
|
||||||
{
|
|
||||||
// Keep more points than necessary as FAST does not give amazing corners
|
|
||||||
cull(keypoints, 2 * n_features);
|
|
||||||
|
|
||||||
// Compute the Harris cornerness (better scoring than FAST)
|
|
||||||
HarrisResponses(image_pyramid[level], keypoints, 7, HARRIS_K);
|
|
||||||
}
|
|
||||||
|
|
||||||
//cull to the final desired level, using the new Harris scores or the original FAST scores.
|
|
||||||
cull(keypoints, n_features);
|
|
||||||
|
|
||||||
float sf = get_scale(params_, level);
|
|
||||||
|
|
||||||
// Set the level of the coordinates
|
|
||||||
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
|
||||||
keypoint_end = keypoints.end(); keypoint != keypoint_end; ++keypoint)
|
|
||||||
{
|
|
||||||
keypoint->octave = level;
|
|
||||||
keypoint->size = params_.patch_size_*sf;
|
|
||||||
}
|
|
||||||
|
|
||||||
computeOrientation(image_pyramid[level], Mat(), level, keypoints);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Compute the ORB keypoint orientations
|
|
||||||
* @param image the image to compute the features and descriptors on
|
|
||||||
* @param integral_image the integral image of the iamge (can be empty, but the computation will be slower)
|
|
||||||
* @param scale the scale at which we compute the orientation
|
|
||||||
* @param keypoints the resulting keypoints
|
|
||||||
*/
|
|
||||||
void ORB::computeOrientation(const Mat& image, const Mat&, unsigned int /*scale*/,
|
|
||||||
vector<KeyPoint>& keypoints) const
|
|
||||||
{
|
{
|
||||||
int half_patch_size = params_.patch_size_/2;
|
(*this)(image, mask, keypoints, noArray(), false);
|
||||||
|
}
|
||||||
// Process each keypoint
|
|
||||||
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
|
|
||||||
keypoint_end = keypoints.end(); keypoint != keypoint_end; ++keypoint)
|
|
||||||
{
|
|
||||||
keypoint->angle = IC_Angle(image, half_patch_size, keypoint->pt, u_max_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Compute the integral image and upadte the cached values
|
void ORB::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
|
||||||
* @param image the image to compute the features and descriptors on
|
|
||||||
* @param level the scale at which we compute the orientation
|
|
||||||
* @param descriptors the resulting descriptors
|
|
||||||
*/
|
|
||||||
void ORB::computeIntegralImage(const Mat&, unsigned int, Mat&)
|
|
||||||
{
|
{
|
||||||
}
|
(*this)(image, Mat(), keypoints, descriptors, true);
|
||||||
|
|
||||||
/** Compute the ORB decriptors
|
|
||||||
* @param image the image to compute the features and descriptors on
|
|
||||||
* @param integral_image the integral image of the image (can be empty, but the computation will be slower)
|
|
||||||
* @param level the scale at which we compute the orientation
|
|
||||||
* @param keypoints the keypoints to use
|
|
||||||
* @param descriptors the resulting descriptors
|
|
||||||
*/
|
|
||||||
void ORB::computeDescriptors(const Mat& image, const Mat& /*integral_image*/, unsigned int,
|
|
||||||
vector<KeyPoint>& keypoints, Mat& descriptors) const
|
|
||||||
{
|
|
||||||
//convert to grayscale if more than one color
|
|
||||||
CV_Assert(image.type() == CV_8UC1);
|
|
||||||
//create the descriptor mat, keypoints.size() rows, BYTES cols
|
|
||||||
int dsize = descriptorSize();
|
|
||||||
descriptors = Mat::zeros((int)keypoints.size(), dsize, CV_8UC1);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < keypoints.size(); i++)
|
|
||||||
computeOrbDescriptor(keypoints[i], image, &pattern[0], descriptors.ptr((int)i), dsize, params_.WTA_K_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -10,7 +10,7 @@
|
|||||||
// License Agreement
|
// License Agreement
|
||||||
// For Open Source Computer Vision Library
|
// For Open Source Computer Vision Library
|
||||||
//
|
//
|
||||||
// Copyright (C) 2008, Willow Garage Inc., all rights reserved.
|
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||||
// Third party copyrights are property of their respective owners.
|
// Third party copyrights are property of their respective owners.
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without modification,
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
@ -41,20 +41,23 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
|
||||||
static void
|
namespace cv
|
||||||
icvComputeIntegralImages( const CvMat* matI, CvMat* matS, CvMat* matT, CvMat* _FT )
|
|
||||||
{
|
{
|
||||||
int x, y, rows = matI->rows, cols = matI->cols;
|
|
||||||
const uchar* I = matI->data.ptr;
|
static void
|
||||||
int *S = matS->data.i, *T = matT->data.i, *FT = _FT->data.i;
|
computeIntegralImages( const Mat& matI, Mat& matS, Mat& matT, Mat& _FT )
|
||||||
int istep = matI->step, step = matS->step/sizeof(S[0]);
|
{
|
||||||
|
CV_Assert( matI.type() == CV_8U );
|
||||||
|
|
||||||
assert( CV_MAT_TYPE(matI->type) == CV_8UC1 &&
|
int x, y, rows = matI.rows, cols = matI.cols;
|
||||||
CV_MAT_TYPE(matS->type) == CV_32SC1 &&
|
|
||||||
CV_ARE_TYPES_EQ(matS, matT) && CV_ARE_TYPES_EQ(matS, _FT) &&
|
matS.create(rows + 1, cols + 1, CV_32S);
|
||||||
CV_ARE_SIZES_EQ(matS, matT) && CV_ARE_SIZES_EQ(matS, _FT) &&
|
matT.create(rows + 1, cols + 1, CV_32S);
|
||||||
matS->step == matT->step && matS->step == _FT->step &&
|
_FT.create(rows + 1, cols + 1, CV_32S);
|
||||||
matI->rows+1 == matS->rows && matI->cols+1 == matS->cols );
|
|
||||||
|
const uchar* I = matI.ptr<uchar>();
|
||||||
|
int *S = matS.ptr<int>(), *T = matT.ptr<int>(), *FT = _FT.ptr<int>();
|
||||||
|
int istep = matI.step, step = matS.step/sizeof(S[0]);
|
||||||
|
|
||||||
for( x = 0; x <= cols; x++ )
|
for( x = 0; x <= cols; x++ )
|
||||||
S[x] = T[x] = FT[x] = 0;
|
S[x] = T[x] = FT[x] = 0;
|
||||||
@ -92,16 +95,14 @@ icvComputeIntegralImages( const CvMat* matI, CvMat* matS, CvMat* matT, CvMat* _F
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct CvStarFeature
|
struct StarFeature
|
||||||
{
|
{
|
||||||
int area;
|
int area;
|
||||||
int* p[8];
|
int* p[8];
|
||||||
}
|
};
|
||||||
CvStarFeature;
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
icvStarDetectorComputeResponses( const CvMat* img, CvMat* responses, CvMat* sizes,
|
StarDetectorComputeResponses( const Mat& img, Mat& responses, Mat& sizes, int maxSize )
|
||||||
const CvStarDetectorParams* params )
|
|
||||||
{
|
{
|
||||||
const int MAX_PATTERN = 17;
|
const int MAX_PATTERN = 17;
|
||||||
static const int sizes0[] = {1, 2, 3, 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128, -1};
|
static const int sizes0[] = {1, 2, 3, 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128, -1};
|
||||||
@ -117,22 +118,19 @@ icvStarDetectorComputeResponses( const CvMat* img, CvMat* responses, CvMat* size
|
|||||||
absmask.i = 0x7fffffff;
|
absmask.i = 0x7fffffff;
|
||||||
volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2);
|
volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2);
|
||||||
#endif
|
#endif
|
||||||
CvStarFeature f[MAX_PATTERN];
|
StarFeature f[MAX_PATTERN];
|
||||||
|
|
||||||
CvMat *sum = 0, *tilted = 0, *flatTilted = 0;
|
Mat sum, tilted, flatTilted;
|
||||||
int y, i=0, rows = img->rows, cols = img->cols, step;
|
int y, i=0, rows = img.rows, cols = img.cols;
|
||||||
int border, npatterns=0, maxIdx=0;
|
int border, npatterns=0, maxIdx=0;
|
||||||
#ifdef _OPENMP
|
|
||||||
int nthreads = cvGetNumThreads();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
assert( CV_MAT_TYPE(img->type) == CV_8UC1 &&
|
CV_Assert( img.type() == CV_8UC1 );
|
||||||
CV_MAT_TYPE(responses->type) == CV_32FC1 &&
|
|
||||||
CV_MAT_TYPE(sizes->type) == CV_16SC1 &&
|
responses.create( img.size(), CV_32F );
|
||||||
CV_ARE_SIZES_EQ(responses, sizes) );
|
sizes.create( img.size(), CV_16S );
|
||||||
|
|
||||||
while( pairs[i][0] >= 0 && !
|
while( pairs[i][0] >= 0 && !
|
||||||
( sizes0[pairs[i][0]] >= params->maxSize
|
( sizes0[pairs[i][0]] >= maxSize
|
||||||
|| sizes0[pairs[i+1][0]] + sizes0[pairs[i+1][0]]/2 >= std::min(rows, cols) ) )
|
|| sizes0[pairs[i+1][0]] + sizes0[pairs[i+1][0]]/2 >= std::min(rows, cols) ) )
|
||||||
{
|
{
|
||||||
++i;
|
++i;
|
||||||
@ -141,13 +139,9 @@ icvStarDetectorComputeResponses( const CvMat* img, CvMat* responses, CvMat* size
|
|||||||
npatterns = i;
|
npatterns = i;
|
||||||
npatterns += (pairs[npatterns-1][0] >= 0);
|
npatterns += (pairs[npatterns-1][0] >= 0);
|
||||||
maxIdx = pairs[npatterns-1][0];
|
maxIdx = pairs[npatterns-1][0];
|
||||||
|
|
||||||
sum = cvCreateMat( rows + 1, cols + 1, CV_32SC1 );
|
computeIntegralImages( img, sum, tilted, flatTilted );
|
||||||
tilted = cvCreateMat( rows + 1, cols + 1, CV_32SC1 );
|
int step = (int)(sum.step/sum.elemSize());
|
||||||
flatTilted = cvCreateMat( rows + 1, cols + 1, CV_32SC1 );
|
|
||||||
step = sum->step/CV_ELEM_SIZE(sum->type);
|
|
||||||
|
|
||||||
icvComputeIntegralImages( img, sum, tilted, flatTilted );
|
|
||||||
|
|
||||||
for( i = 0; i <= maxIdx; i++ )
|
for( i = 0; i <= maxIdx; i++ )
|
||||||
{
|
{
|
||||||
@ -155,15 +149,15 @@ icvStarDetectorComputeResponses( const CvMat* img, CvMat* responses, CvMat* size
|
|||||||
int ur_area = (2*ur_size + 1)*(2*ur_size + 1);
|
int ur_area = (2*ur_size + 1)*(2*ur_size + 1);
|
||||||
int t_area = t_size*t_size + (t_size + 1)*(t_size + 1);
|
int t_area = t_size*t_size + (t_size + 1)*(t_size + 1);
|
||||||
|
|
||||||
f[i].p[0] = sum->data.i + (ur_size + 1)*step + ur_size + 1;
|
f[i].p[0] = sum.ptr<int>() + (ur_size + 1)*step + ur_size + 1;
|
||||||
f[i].p[1] = sum->data.i - ur_size*step + ur_size + 1;
|
f[i].p[1] = sum.ptr<int>() - ur_size*step + ur_size + 1;
|
||||||
f[i].p[2] = sum->data.i + (ur_size + 1)*step - ur_size;
|
f[i].p[2] = sum.ptr<int>() + (ur_size + 1)*step - ur_size;
|
||||||
f[i].p[3] = sum->data.i - ur_size*step - ur_size;
|
f[i].p[3] = sum.ptr<int>() - ur_size*step - ur_size;
|
||||||
|
|
||||||
f[i].p[4] = tilted->data.i + (t_size + 1)*step + 1;
|
f[i].p[4] = tilted.ptr<int>() + (t_size + 1)*step + 1;
|
||||||
f[i].p[5] = flatTilted->data.i - t_size;
|
f[i].p[5] = flatTilted.ptr<int>() - t_size;
|
||||||
f[i].p[6] = flatTilted->data.i + t_size + 1;
|
f[i].p[6] = flatTilted.ptr<int>() + t_size + 1;
|
||||||
f[i].p[7] = tilted->data.i - t_size*step + 1;
|
f[i].p[7] = tilted.ptr<int>() - t_size*step + 1;
|
||||||
|
|
||||||
f[i].area = ur_area + t_area;
|
f[i].area = ur_area + t_area;
|
||||||
sizes1[i] = sizes0[i];
|
sizes1[i] = sizes0[i];
|
||||||
@ -199,10 +193,10 @@ icvStarDetectorComputeResponses( const CvMat* img, CvMat* responses, CvMat* size
|
|||||||
|
|
||||||
for( y = 0; y < border; y++ )
|
for( y = 0; y < border; y++ )
|
||||||
{
|
{
|
||||||
float* r_ptr = (float*)(responses->data.ptr + responses->step*y);
|
float* r_ptr = responses.ptr<float>(y);
|
||||||
float* r_ptr2 = (float*)(responses->data.ptr + responses->step*(rows - 1 - y));
|
float* r_ptr2 = responses.ptr<float>(rows - 1 - y);
|
||||||
short* s_ptr = (short*)(sizes->data.ptr + sizes->step*y);
|
short* s_ptr = sizes.ptr<short>(y);
|
||||||
short* s_ptr2 = (short*)(sizes->data.ptr + sizes->step*(rows - 1 - y));
|
short* s_ptr2 = sizes.ptr<short>(rows - 1 - y);
|
||||||
|
|
||||||
memset( r_ptr, 0, cols*sizeof(r_ptr[0]));
|
memset( r_ptr, 0, cols*sizeof(r_ptr[0]));
|
||||||
memset( r_ptr2, 0, cols*sizeof(r_ptr2[0]));
|
memset( r_ptr2, 0, cols*sizeof(r_ptr2[0]));
|
||||||
@ -210,14 +204,11 @@ icvStarDetectorComputeResponses( const CvMat* img, CvMat* responses, CvMat* size
|
|||||||
memset( s_ptr2, 0, cols*sizeof(s_ptr2[0]));
|
memset( s_ptr2, 0, cols*sizeof(s_ptr2[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _OPENMP
|
|
||||||
#pragma omp parallel for num_threads(nthreads) schedule(static)
|
|
||||||
#endif
|
|
||||||
for( y = border; y < rows - border; y++ )
|
for( y = border; y < rows - border; y++ )
|
||||||
{
|
{
|
||||||
int x = border, i;
|
int x = border, i;
|
||||||
float* r_ptr = (float*)(responses->data.ptr + responses->step*y);
|
float* r_ptr = responses.ptr<float>(y);
|
||||||
short* s_ptr = (short*)(sizes->data.ptr + sizes->step*y);
|
short* s_ptr = sizes.ptr<short>(y);
|
||||||
|
|
||||||
memset( r_ptr, 0, border*sizeof(r_ptr[0]));
|
memset( r_ptr, 0, border*sizeof(r_ptr[0]));
|
||||||
memset( s_ptr, 0, border*sizeof(s_ptr[0]));
|
memset( s_ptr, 0, border*sizeof(s_ptr[0]));
|
||||||
@ -300,22 +291,17 @@ icvStarDetectorComputeResponses( const CvMat* img, CvMat* responses, CvMat* size
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cvReleaseMat(&sum);
|
|
||||||
cvReleaseMat(&tilted);
|
|
||||||
cvReleaseMat(&flatTilted);
|
|
||||||
|
|
||||||
return border;
|
return border;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static bool
|
static bool StarDetectorSuppressLines( const Mat& responses, const Mat& sizes, Point pt,
|
||||||
icvStarDetectorSuppressLines( const CvMat* responses, const CvMat* sizes, CvPoint pt,
|
int lineThresholdProjected, int lineThresholdBinarized )
|
||||||
const CvStarDetectorParams* params )
|
|
||||||
{
|
{
|
||||||
const float* r_ptr = responses->data.fl;
|
const float* r_ptr = responses.ptr<float>();
|
||||||
int rstep = responses->step/sizeof(r_ptr[0]);
|
int rstep = (int)(responses.step/sizeof(r_ptr[0]));
|
||||||
const short* s_ptr = sizes->data.s;
|
const short* s_ptr = sizes.ptr<short>();
|
||||||
int sstep = sizes->step/sizeof(s_ptr[0]);
|
int sstep = (int)(sizes.step/sizeof(s_ptr[0]));
|
||||||
int sz = s_ptr[pt.y*sstep + pt.x];
|
int sz = s_ptr[pt.y*sstep + pt.x];
|
||||||
int x, y, delta = sz/4, radius = delta*4;
|
int x, y, delta = sz/4, radius = delta*4;
|
||||||
float Lxx = 0, Lyy = 0, Lxy = 0;
|
float Lxx = 0, Lyy = 0, Lxy = 0;
|
||||||
@ -329,7 +315,7 @@ icvStarDetectorSuppressLines( const CvMat* responses, const CvMat* sizes, CvPoin
|
|||||||
Lxx += Lx*Lx; Lyy += Ly*Ly; Lxy += Lx*Ly;
|
Lxx += Lx*Lx; Lyy += Ly*Ly; Lxy += Lx*Ly;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( (Lxx + Lyy)*(Lxx + Lyy) >= params->lineThresholdProjected*(Lxx*Lyy - Lxy*Lxy) )
|
if( (Lxx + Lyy)*(Lxx + Lyy) >= lineThresholdProjected*(Lxx*Lyy - Lxy*Lxy) )
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
for( y = pt.y - radius; y <= pt.y + radius; y += delta )
|
for( y = pt.y - radius; y <= pt.y + radius; y += delta )
|
||||||
@ -340,7 +326,7 @@ icvStarDetectorSuppressLines( const CvMat* responses, const CvMat* sizes, CvPoin
|
|||||||
Lxxb += Lxb * Lxb; Lyyb += Lyb * Lyb; Lxyb += Lxb * Lyb;
|
Lxxb += Lxb * Lxb; Lyyb += Lyb * Lyb; Lxyb += Lxb * Lyb;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( (Lxxb + Lyyb)*(Lxxb + Lyyb) >= params->lineThresholdBinarized*(Lxxb*Lyyb - Lxyb*Lxyb) )
|
if( (Lxxb + Lyyb)*(Lxxb + Lyyb) >= lineThresholdBinarized*(Lxxb*Lyyb - Lxyb*Lxyb) )
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -348,24 +334,27 @@ icvStarDetectorSuppressLines( const CvMat* responses, const CvMat* sizes, CvPoin
|
|||||||
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
icvStarDetectorSuppressNonmax( const CvMat* responses, const CvMat* sizes,
|
StarDetectorSuppressNonmax( const Mat& responses, const Mat& sizes,
|
||||||
CvSeq* keypoints, int border,
|
vector<KeyPoint>& keypoints, int border,
|
||||||
const CvStarDetectorParams* params )
|
int responseThreshold,
|
||||||
|
int lineThresholdProjected,
|
||||||
|
int lineThresholdBinarized,
|
||||||
|
int suppressNonmaxSize )
|
||||||
{
|
{
|
||||||
int x, y, x1, y1, delta = params->suppressNonmaxSize/2;
|
int x, y, x1, y1, delta = suppressNonmaxSize/2;
|
||||||
int rows = responses->rows, cols = responses->cols;
|
int rows = responses.rows, cols = responses.cols;
|
||||||
const float* r_ptr = responses->data.fl;
|
const float* r_ptr = responses.ptr<float>();
|
||||||
int rstep = responses->step/sizeof(r_ptr[0]);
|
int rstep = (int)(responses.step/sizeof(r_ptr[0]));
|
||||||
const short* s_ptr = sizes->data.s;
|
const short* s_ptr = sizes.ptr<short>();
|
||||||
int sstep = sizes->step/sizeof(s_ptr[0]);
|
int sstep = (int)(sizes.step/sizeof(s_ptr[0]));
|
||||||
short featureSize = 0;
|
short featureSize = 0;
|
||||||
|
|
||||||
for( y = border; y < rows - border; y += delta+1 )
|
for( y = border; y < rows - border; y += delta+1 )
|
||||||
for( x = border; x < cols - border; x += delta+1 )
|
for( x = border; x < cols - border; x += delta+1 )
|
||||||
{
|
{
|
||||||
float maxResponse = (float)params->responseThreshold;
|
float maxResponse = (float)responseThreshold;
|
||||||
float minResponse = (float)-params->responseThreshold;
|
float minResponse = (float)-responseThreshold;
|
||||||
CvPoint maxPt = {-1,-1}, minPt = {-1,-1};
|
Point maxPt(-1, -1), minPt(-1, -1);
|
||||||
int tileEndY = MIN(y + delta, rows - border - 1);
|
int tileEndY = MIN(y + delta, rows - border - 1);
|
||||||
int tileEndX = MIN(x + delta, cols - border - 1);
|
int tileEndX = MIN(x + delta, cols - border - 1);
|
||||||
|
|
||||||
@ -376,12 +365,12 @@ icvStarDetectorSuppressNonmax( const CvMat* responses, const CvMat* sizes,
|
|||||||
if( maxResponse < val )
|
if( maxResponse < val )
|
||||||
{
|
{
|
||||||
maxResponse = val;
|
maxResponse = val;
|
||||||
maxPt = cvPoint(x1, y1);
|
maxPt = Point(x1, y1);
|
||||||
}
|
}
|
||||||
else if( minResponse > val )
|
else if( minResponse > val )
|
||||||
{
|
{
|
||||||
minResponse = val;
|
minResponse = val;
|
||||||
minPt = cvPoint(x1, y1);
|
minPt = Point(x1, y1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -396,10 +385,11 @@ icvStarDetectorSuppressNonmax( const CvMat* responses, const CvMat* sizes,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if( (featureSize = s_ptr[maxPt.y*sstep + maxPt.x]) >= 4 &&
|
if( (featureSize = s_ptr[maxPt.y*sstep + maxPt.x]) >= 4 &&
|
||||||
!icvStarDetectorSuppressLines( responses, sizes, maxPt, params ))
|
!StarDetectorSuppressLines( responses, sizes, maxPt, lineThresholdProjected,
|
||||||
|
lineThresholdBinarized ))
|
||||||
{
|
{
|
||||||
CvStarKeypoint kpt = cvStarKeypoint( maxPt, featureSize, maxResponse );
|
KeyPoint kpt((float)maxPt.x, (float)maxPt.y, featureSize, -1, maxResponse);
|
||||||
cvSeqPush( keypoints, &kpt );
|
keypoints.push_back(kpt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
skip_max:
|
skip_max:
|
||||||
@ -414,66 +404,67 @@ icvStarDetectorSuppressNonmax( const CvMat* responses, const CvMat* sizes,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if( (featureSize = s_ptr[minPt.y*sstep + minPt.x]) >= 4 &&
|
if( (featureSize = s_ptr[minPt.y*sstep + minPt.x]) >= 4 &&
|
||||||
!icvStarDetectorSuppressLines( responses, sizes, minPt, params ))
|
!StarDetectorSuppressLines( responses, sizes, minPt,
|
||||||
|
lineThresholdProjected, lineThresholdBinarized))
|
||||||
{
|
{
|
||||||
CvStarKeypoint kpt = cvStarKeypoint( minPt, featureSize, minResponse );
|
KeyPoint kpt((float)minPt.x, (float)minPt.y, featureSize, -1, maxResponse);
|
||||||
cvSeqPush( keypoints, &kpt );
|
keypoints.push_back(kpt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
skip_min:
|
skip_min:
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_IMPL CvSeq*
|
|
||||||
cvGetStarKeypoints( const CvArr* _img, CvMemStorage* storage,
|
|
||||||
CvStarDetectorParams params )
|
|
||||||
{
|
|
||||||
CvMat stub, *img = cvGetMat(_img, &stub);
|
|
||||||
CvSeq* keypoints = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvStarKeypoint), storage );
|
|
||||||
CvMat* responses = cvCreateMat( img->rows, img->cols, CV_32FC1 );
|
|
||||||
CvMat* sizes = cvCreateMat( img->rows, img->cols, CV_16SC1 );
|
|
||||||
|
|
||||||
int border = icvStarDetectorComputeResponses( img, responses, sizes, ¶ms );
|
|
||||||
if( border >= 0 )
|
|
||||||
icvStarDetectorSuppressNonmax( responses, sizes, keypoints, border, ¶ms );
|
|
||||||
|
|
||||||
cvReleaseMat( &responses );
|
|
||||||
cvReleaseMat( &sizes );
|
|
||||||
|
|
||||||
return border >= 0 ? keypoints : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace cv
|
|
||||||
{
|
|
||||||
|
|
||||||
StarDetector::StarDetector()
|
|
||||||
{
|
|
||||||
*(CvStarDetectorParams*)this = cvStarDetectorParams();
|
|
||||||
}
|
|
||||||
|
|
||||||
StarDetector::StarDetector(int _maxSize, int _responseThreshold,
|
StarDetector::StarDetector(int _maxSize, int _responseThreshold,
|
||||||
int _lineThresholdProjected,
|
int _lineThresholdProjected,
|
||||||
int _lineThresholdBinarized,
|
int _lineThresholdBinarized,
|
||||||
int _suppressNonmaxSize)
|
int _suppressNonmaxSize)
|
||||||
{
|
: maxSize(_maxSize), responseThreshold(_responseThreshold),
|
||||||
*(CvStarDetectorParams*)this = cvStarDetectorParams(_maxSize, _responseThreshold,
|
lineThresholdProjected(_lineThresholdProjected),
|
||||||
_lineThresholdProjected, _lineThresholdBinarized, _suppressNonmaxSize);
|
lineThresholdBinarized(_lineThresholdBinarized),
|
||||||
}
|
suppressNonmaxSize(_suppressNonmaxSize)
|
||||||
|
{}
|
||||||
|
|
||||||
void StarDetector::operator()(const Mat& image, vector<KeyPoint>& keypoints) const
|
|
||||||
|
void StarDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
|
||||||
{
|
{
|
||||||
CvMat _image = image;
|
Mat grayImage = image;
|
||||||
MemStorage storage(cvCreateMemStorage(0));
|
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
|
||||||
Seq<CvStarKeypoint> kp = cvGetStarKeypoints( &_image, storage, *(const CvStarDetectorParams*)this);
|
|
||||||
Seq<CvStarKeypoint>::iterator it = kp.begin();
|
(*this)(grayImage, keypoints);
|
||||||
keypoints.resize(kp.size());
|
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
||||||
size_t i, n = kp.size();
|
}
|
||||||
for( i = 0; i < n; i++, ++it )
|
|
||||||
|
void StarDetector::operator()(const Mat& img, vector<KeyPoint>& keypoints) const
|
||||||
|
{
|
||||||
|
Mat responses, sizes;
|
||||||
|
int border = StarDetectorComputeResponses( img, responses, sizes, maxSize );
|
||||||
|
keypoints.clear();
|
||||||
|
if( border >= 0 )
|
||||||
|
StarDetectorSuppressNonmax( responses, sizes, keypoints, border,
|
||||||
|
responseThreshold, lineThresholdProjected,
|
||||||
|
lineThresholdBinarized, suppressNonmaxSize );
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static Algorithm* createStarDetector() { return new StarDetector; }
|
||||||
|
static AlgorithmInfo star_info("Feature2D.STAR", createStarDetector);
|
||||||
|
|
||||||
|
AlgorithmInfo* StarDetector::info() const
|
||||||
|
{
|
||||||
|
static volatile bool initialized = false;
|
||||||
|
if( !initialized )
|
||||||
{
|
{
|
||||||
const CvStarKeypoint& kpt = *it;
|
star_info.addParam(this, "maxSize", maxSize);
|
||||||
keypoints[i] = KeyPoint(kpt.pt, (float)kpt.size, -1.f, kpt.response, 0);
|
star_info.addParam(this, "responseThreshold", responseThreshold);
|
||||||
|
star_info.addParam(this, "lineThresholdProjected", lineThresholdProjected);
|
||||||
|
star_info.addParam(this, "lineThresholdBinarized", lineThresholdBinarized);
|
||||||
|
star_info.addParam(this, "suppressNonmaxSize", suppressNonmaxSize);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
}
|
}
|
||||||
}
|
return &star_info;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,513 +0,0 @@
|
|||||||
# x1 y1 x2 y2
|
|
||||||
-1 -2 -1 7
|
|
||||||
-1 -14 3 -3
|
|
||||||
-2 1 2 11
|
|
||||||
6 1 -7 -10
|
|
||||||
2 13 0 -1
|
|
||||||
5 -14 -3 5
|
|
||||||
8 -2 4 2
|
|
||||||
8 -11 5 -15
|
|
||||||
-23 -6 -9 8
|
|
||||||
6 -12 8 -10
|
|
||||||
-1 -3 1 8
|
|
||||||
6 3 6 5
|
|
||||||
-6 -7 -5 5
|
|
||||||
-2 22 -8 -11
|
|
||||||
7 14 5 8
|
|
||||||
14 -1 -14 -5
|
|
||||||
9 -14 0 2
|
|
||||||
-3 7 6 22
|
|
||||||
6 -6 -5 -8
|
|
||||||
9 -5 -1 7
|
|
||||||
-7 -3 -18 -10
|
|
||||||
-5 4 11 0
|
|
||||||
3 2 10 9
|
|
||||||
3 -10 9 4
|
|
||||||
12 0 19 -3
|
|
||||||
15 1 -5 -11
|
|
||||||
-1 14 8 7
|
|
||||||
-23 7 5 -5
|
|
||||||
-6 0 17 -10
|
|
||||||
-4 13 -4 -3
|
|
||||||
1 -12 2 -12
|
|
||||||
8 0 22 3
|
|
||||||
13 -13 -1 3
|
|
||||||
17 -16 10 6
|
|
||||||
15 7 0 -5
|
|
||||||
-12 2 -2 19
|
|
||||||
-6 3 -15 -4
|
|
||||||
3 8 14 0
|
|
||||||
-11 4 5 5
|
|
||||||
-7 11 1 7
|
|
||||||
12 6 3 21
|
|
||||||
2 -3 1 14
|
|
||||||
1 5 11 -5
|
|
||||||
-17 3 2 -6
|
|
||||||
8 6 -10 5
|
|
||||||
-2 -14 4 0
|
|
||||||
-7 5 5 -6
|
|
||||||
4 10 -7 4
|
|
||||||
0 22 -18 7
|
|
||||||
-3 -1 18 0
|
|
||||||
22 -4 3 -5
|
|
||||||
-7 1 -3 2
|
|
||||||
-20 19 -2 17
|
|
||||||
-10 3 24 -8
|
|
||||||
-14 -5 5 7
|
|
||||||
12 -2 -15 -4
|
|
||||||
12 4 -19 0
|
|
||||||
13 20 5 3
|
|
||||||
-12 -8 0 5
|
|
||||||
6 -5 -11 -7
|
|
||||||
-11 6 -22 -3
|
|
||||||
4 15 1 10
|
|
||||||
-4 -7 -6 15
|
|
||||||
10 5 24 0
|
|
||||||
6 3 -2 22
|
|
||||||
14 -13 -4 4
|
|
||||||
8 -13 -22 -18
|
|
||||||
-1 -1 3 -7
|
|
||||||
-12 -19 3 4
|
|
||||||
10 8 -2 13
|
|
||||||
-1 -6 -5 -6
|
|
||||||
-21 2 2 -3
|
|
||||||
-7 4 16 0
|
|
||||||
-5 -6 -1 -12
|
|
||||||
-1 1 18 9
|
|
||||||
10 -7 6 -11
|
|
||||||
3 4 -7 19
|
|
||||||
5 -18 5 -4
|
|
||||||
0 4 4 -20
|
|
||||||
-11 7 12 18
|
|
||||||
17 -20 7 -18
|
|
||||||
15 2 -11 19
|
|
||||||
6 -18 3 -7
|
|
||||||
1 -4 13 -14
|
|
||||||
3 17 -8 2
|
|
||||||
2 -7 6 1
|
|
||||||
-9 17 8 -2
|
|
||||||
-6 -8 12 -1
|
|
||||||
4 -2 6 -1
|
|
||||||
7 -2 8 6
|
|
||||||
-1 -8 -9 -7
|
|
||||||
-9 8 0 15
|
|
||||||
22 0 -15 -4
|
|
||||||
-1 -14 -2 3
|
|
||||||
-4 -7 -7 17
|
|
||||||
-2 -8 -4 9
|
|
||||||
-7 5 7 7
|
|
||||||
13 -5 11 -8
|
|
||||||
-4 11 8 0
|
|
||||||
-11 5 -6 -9
|
|
||||||
-6 2 -20 3
|
|
||||||
2 -6 10 6
|
|
||||||
-6 -6 7 -15
|
|
||||||
-3 -6 1 2
|
|
||||||
0 11 2 -3
|
|
||||||
-12 7 5 14
|
|
||||||
-7 0 -1 -1
|
|
||||||
0 -16 8 6
|
|
||||||
11 22 -3 0
|
|
||||||
0 19 -17 5
|
|
||||||
-14 -23 -19 -13
|
|
||||||
10 -8 -2 -11
|
|
||||||
6 -11 13 -10
|
|
||||||
-7 1 0 14
|
|
||||||
1 -12 -5 -5
|
|
||||||
7 4 -1 8
|
|
||||||
-5 -1 2 15
|
|
||||||
-1 -3 -10 7
|
|
||||||
-6 3 -18 10
|
|
||||||
-13 -7 10 -13
|
|
||||||
-1 1 -10 13
|
|
||||||
14 -19 -14 8
|
|
||||||
-13 -4 1 7
|
|
||||||
-2 1 -7 12
|
|
||||||
-5 3 -5 1
|
|
||||||
-2 -2 -10 8
|
|
||||||
14 2 7 8
|
|
||||||
9 3 2 8
|
|
||||||
1 -9 0 -18
|
|
||||||
0 4 12 1
|
|
||||||
9 0 -10 -14
|
|
||||||
-9 -13 6 -2
|
|
||||||
5 1 10 10
|
|
||||||
-6 -3 -5 -16
|
|
||||||
6 11 0 -5
|
|
||||||
10 -23 2 1
|
|
||||||
-5 13 9 -3
|
|
||||||
-1 -4 -5 -13
|
|
||||||
13 10 8 -11
|
|
||||||
20 19 2 -9
|
|
||||||
-8 4 -9 0
|
|
||||||
10 -14 19 15
|
|
||||||
-12 -14 -3 -10
|
|
||||||
-3 -23 -2 17
|
|
||||||
-11 -3 -14 6
|
|
||||||
-2 19 2 -4
|
|
||||||
5 -5 -13 3
|
|
||||||
-2 2 4 -5
|
|
||||||
4 17 -11 17
|
|
||||||
-2 -7 23 1
|
|
||||||
13 8 -16 1
|
|
||||||
-5 -13 -17 1
|
|
||||||
6 4 -3 -8
|
|
||||||
-9 -5 -10 -2
|
|
||||||
0 -9 -2 -7
|
|
||||||
0 5 2 5
|
|
||||||
-16 -4 3 6
|
|
||||||
-15 2 12 -2
|
|
||||||
-1 4 2 6
|
|
||||||
1 1 -8 -2
|
|
||||||
12 -2 -2 -5
|
|
||||||
8 -8 9 -9
|
|
||||||
-10 2 1 3
|
|
||||||
10 -4 4 -9
|
|
||||||
12 6 5 2
|
|
||||||
-8 -3 5 0
|
|
||||||
1 -13 2 -7
|
|
||||||
-10 -1 -18 7
|
|
||||||
8 -1 -10 -9
|
|
||||||
-1 -23 2 6
|
|
||||||
-3 -5 2 3
|
|
||||||
11 0 -7 -4
|
|
||||||
2 15 -3 -10
|
|
||||||
-8 -20 3 -13
|
|
||||||
-12 -19 -11 5
|
|
||||||
-13 -17 2 -3
|
|
||||||
4 7 0 -12
|
|
||||||
-1 5 -6 -14
|
|
||||||
11 -4 -4 0
|
|
||||||
10 3 -3 7
|
|
||||||
21 13 6 -11
|
|
||||||
24 -12 -4 -7
|
|
||||||
16 4 -14 3
|
|
||||||
5 -3 -12 -7
|
|
||||||
-4 0 -5 7
|
|
||||||
-9 -17 -7 13
|
|
||||||
-6 22 5 -11
|
|
||||||
-8 2 -11 23
|
|
||||||
-10 7 14 -1
|
|
||||||
-10 -3 3 8
|
|
||||||
1 -13 0 -6
|
|
||||||
-21 -7 -14 6
|
|
||||||
19 18 -6 -4
|
|
||||||
7 10 -4 -1
|
|
||||||
21 -1 -5 1
|
|
||||||
6 -10 -2 -11
|
|
||||||
-3 18 7 -1
|
|
||||||
-9 -3 10 -5
|
|
||||||
14 -13 -3 17
|
|
||||||
-19 11 -18 -1
|
|
||||||
-2 8 -23 -18
|
|
||||||
-5 0 -9 -2
|
|
||||||
-11 -4 -8 2
|
|
||||||
6 14 -6 -3
|
|
||||||
0 -3 0 -15
|
|
||||||
4 -9 -9 -15
|
|
||||||
11 -1 11 3
|
|
||||||
-16 -10 7 -7
|
|
||||||
-10 -2 -2 -10
|
|
||||||
-3 -5 -23 5
|
|
||||||
-8 13 -11 -15
|
|
||||||
11 -15 -6 6
|
|
||||||
-3 -16 2 -2
|
|
||||||
12 6 24 -16
|
|
||||||
0 -10 11 8
|
|
||||||
7 -7 -7 -19
|
|
||||||
16 5 -3 9
|
|
||||||
7 9 -16 -7
|
|
||||||
2 3 9 -10
|
|
||||||
1 21 7 8
|
|
||||||
0 7 17 1
|
|
||||||
12 -8 6 9
|
|
||||||
-7 11 -6 -8
|
|
||||||
0 19 3 9
|
|
||||||
-7 1 -11 -5
|
|
||||||
8 0 14 -2
|
|
||||||
-2 12 -6 -15
|
|
||||||
12 4 -21 0
|
|
||||||
-4 17 -7 -6
|
|
||||||
-9 -10 -7 -14
|
|
||||||
-10 -15 -14 -15
|
|
||||||
-5 -7 -12 5
|
|
||||||
0 -4 -4 15
|
|
||||||
2 5 -23 -6
|
|
||||||
-21 -4 4 -6
|
|
||||||
5 -10 6 -15
|
|
||||||
-3 4 5 -1
|
|
||||||
19 -4 -4 -23
|
|
||||||
17 -4 -11 13
|
|
||||||
12 1 -14 4
|
|
||||||
-6 -11 10 -20
|
|
||||||
5 4 20 3
|
|
||||||
-20 -8 1 3
|
|
||||||
9 -19 -3 9
|
|
||||||
15 18 -4 11
|
|
||||||
16 12 7 8
|
|
||||||
-8 -14 9 -3
|
|
||||||
0 -6 -4 2
|
|
||||||
-10 1 2 -1
|
|
||||||
-7 8 18 -6
|
|
||||||
12 9 -23 -7
|
|
||||||
-6 8 2 5
|
|
||||||
6 -9 -7 -12
|
|
||||||
-2 -1 2 -7
|
|
||||||
9 9 15 7
|
|
||||||
2 6 6 -6
|
|
||||||
12 16 19 0
|
|
||||||
3 4 0 6
|
|
||||||
-1 -2 17 2
|
|
||||||
1 8 1 3
|
|
||||||
-1 -12 0 -11
|
|
||||||
2 -11 9 7
|
|
||||||
3 -1 4 -19
|
|
||||||
-11 -1 3 -1
|
|
||||||
-10 1 -4 -10
|
|
||||||
3 -2 11 6
|
|
||||||
7 3 -8 -9
|
|
||||||
-14 24 -10 -2
|
|
||||||
-3 -3 -6 -18
|
|
||||||
-10 -13 -1 -7
|
|
||||||
-7 2 -6 9
|
|
||||||
-4 2 -13 6
|
|
||||||
-4 4 3 -2
|
|
||||||
2 -4 13 9
|
|
||||||
5 -11 -11 -6
|
|
||||||
-2 4 -9 11
|
|
||||||
0 -19 -5 -23
|
|
||||||
-7 -5 -6 -3
|
|
||||||
-4 -6 14 12
|
|
||||||
-11 12 -16 -8
|
|
||||||
15 -21 6 -12
|
|
||||||
-1 -2 16 -8
|
|
||||||
-1 6 -2 -8
|
|
||||||
-1 1 8 -9
|
|
||||||
-4 3 -2 -2
|
|
||||||
0 -7 -8 4
|
|
||||||
-11 11 2 -12
|
|
||||||
3 2 7 11
|
|
||||||
-4 -7 -6 -9
|
|
||||||
-7 3 0 -5
|
|
||||||
-7 3 -5 -10
|
|
||||||
-1 -3 -10 8
|
|
||||||
8 0 1 5
|
|
||||||
0 9 16 1
|
|
||||||
4 8 -3 -11
|
|
||||||
9 -15 17 8
|
|
||||||
2 0 17 -9
|
|
||||||
-11 -6 -3 -10
|
|
||||||
1 1 -8 15
|
|
||||||
-13 -12 4 -2
|
|
||||||
4 -6 -10 -6
|
|
||||||
-7 5 -5 7
|
|
||||||
6 10 9 8
|
|
||||||
7 -5 -3 -18
|
|
||||||
3 -6 4 5
|
|
||||||
-13 -10 -3 -5
|
|
||||||
2 -11 0 -16
|
|
||||||
-21 7 -13 -5
|
|
||||||
-14 -14 -4 -4
|
|
||||||
9 4 -3 7
|
|
||||||
11 4 -4 10
|
|
||||||
17 6 17 9
|
|
||||||
8 -10 -11 0
|
|
||||||
-16 -6 8 -6
|
|
||||||
5 -13 -5 10
|
|
||||||
2 3 16 12
|
|
||||||
-8 13 -6 0
|
|
||||||
0 10 -11 4
|
|
||||||
5 8 -2 10
|
|
||||||
-7 11 3 -13
|
|
||||||
4 2 -3 -7
|
|
||||||
-2 -14 16 -11
|
|
||||||
-6 11 6 7
|
|
||||||
15 -3 -10 8
|
|
||||||
8 -3 -12 12
|
|
||||||
6 -13 7 -14
|
|
||||||
-5 -11 -6 -8
|
|
||||||
-6 7 3 6
|
|
||||||
10 -4 1 5
|
|
||||||
16 9 13 10
|
|
||||||
10 -17 8 2
|
|
||||||
1 -5 -4 4
|
|
||||||
8 -14 2 -5
|
|
||||||
-9 4 -3 -6
|
|
||||||
-7 3 0 -10
|
|
||||||
-8 -2 4 -10
|
|
||||||
5 -8 24 -9
|
|
||||||
-8 2 -9 8
|
|
||||||
17 -4 2 -5
|
|
||||||
0 14 9 -9
|
|
||||||
15 11 5 -6
|
|
||||||
1 -8 4 -3
|
|
||||||
-21 9 2 10
|
|
||||||
-1 2 11 4
|
|
||||||
3 24 -2 2
|
|
||||||
17 -8 -10 -14
|
|
||||||
5 6 7 -13
|
|
||||||
10 11 -1 0
|
|
||||||
6 4 6 -10
|
|
||||||
-2 -12 6 5
|
|
||||||
-1 3 -15 8
|
|
||||||
-4 1 11 -7
|
|
||||||
11 1 0 5
|
|
||||||
-12 6 1 10
|
|
||||||
-2 -3 4 -1
|
|
||||||
-11 -2 12 -1
|
|
||||||
-8 7 -18 -20
|
|
||||||
0 2 2 -9
|
|
||||||
-1 -13 2 -16
|
|
||||||
-1 3 -17 -5
|
|
||||||
8 15 -14 3
|
|
||||||
-12 -13 15 6
|
|
||||||
-8 2 6 2
|
|
||||||
22 6 -23 -3
|
|
||||||
-7 -2 0 -6
|
|
||||||
-10 13 6 -6
|
|
||||||
7 6 12 -10
|
|
||||||
7 -6 11 -2
|
|
||||||
-22 0 -17 -2
|
|
||||||
-1 -4 -14 -11
|
|
||||||
-8 -2 12 7
|
|
||||||
-5 12 -13 7
|
|
||||||
-2 2 6 -7
|
|
||||||
8 0 23 -3
|
|
||||||
12 6 -11 13
|
|
||||||
-10 -21 8 10
|
|
||||||
0 -3 15 7
|
|
||||||
-6 7 -12 -5
|
|
||||||
-10 -21 -11 12
|
|
||||||
-11 -5 -11 8
|
|
||||||
0 5 -1 -11
|
|
||||||
-9 8 -1 7
|
|
||||||
-23 11 -5 21
|
|
||||||
-5 0 6 -8
|
|
||||||
8 -6 12 8
|
|
||||||
5 -7 -2 3
|
|
||||||
-20 -5 9 -12
|
|
||||||
12 -6 3 -11
|
|
||||||
5 4 11 13
|
|
||||||
12 2 -12 13
|
|
||||||
-13 -4 7 4
|
|
||||||
15 0 -16 -3
|
|
||||||
2 -3 14 -2
|
|
||||||
-14 4 -11 16
|
|
||||||
3 -13 10 23
|
|
||||||
-19 9 5 2
|
|
||||||
3 5 -7 14
|
|
||||||
-13 19 15 -11
|
|
||||||
0 14 -5 -2
|
|
||||||
-4 11 -6 0
|
|
||||||
5 -2 -8 -13
|
|
||||||
-15 -11 -17 -7
|
|
||||||
3 1 -8 -10
|
|
||||||
-10 -13 -12 7
|
|
||||||
-13 0 -6 23
|
|
||||||
-17 2 -3 -7
|
|
||||||
3 1 -10 4
|
|
||||||
4 13 -6 14
|
|
||||||
-2 -19 5 -1
|
|
||||||
-8 9 -5 10
|
|
||||||
-1 7 7 5
|
|
||||||
-10 9 0 19
|
|
||||||
5 7 -7 -4
|
|
||||||
1 -11 -11 -1
|
|
||||||
-1 2 11 -4
|
|
||||||
7 -1 -2 2
|
|
||||||
-20 1 -6 -9
|
|
||||||
-18 -4 -18 8
|
|
||||||
-2 -16 -6 7
|
|
||||||
-6 -3 -4 -1
|
|
||||||
-16 0 -5 24
|
|
||||||
-2 -4 9 -1
|
|
||||||
2 -8 15 -6
|
|
||||||
4 11 -3 0
|
|
||||||
6 7 -10 2
|
|
||||||
-9 -7 -6 12
|
|
||||||
15 24 -1 -8
|
|
||||||
-9 15 -15 -3
|
|
||||||
-5 17 -10 11
|
|
||||||
13 -2 4 -15
|
|
||||||
-1 -2 -23 4
|
|
||||||
3 -16 -14 -7
|
|
||||||
-5 -3 -9 -10
|
|
||||||
3 -5 -1 -2
|
|
||||||
4 -1 8 1
|
|
||||||
9 12 -14 9
|
|
||||||
17 -9 0 -3
|
|
||||||
4 5 -6 13
|
|
||||||
-8 -1 10 19
|
|
||||||
-5 8 2 -15
|
|
||||||
-9 -12 -5 -4
|
|
||||||
0 12 4 24
|
|
||||||
-2 8 4 14
|
|
||||||
-4 8 16 -7
|
|
||||||
-1 5 -4 -8
|
|
||||||
18 -2 17 -5
|
|
||||||
-2 8 -2 -9
|
|
||||||
-7 3 -6 1
|
|
||||||
-22 -5 -2 -5
|
|
||||||
-10 -8 1 14
|
|
||||||
-13 -3 9 3
|
|
||||||
-1 -4 0 -1
|
|
||||||
-21 -7 -19 12
|
|
||||||
8 -8 8 24
|
|
||||||
-6 12 3 -2
|
|
||||||
-11 -5 -4 -22
|
|
||||||
5 -3 4 -4
|
|
||||||
24 -16 -9 7
|
|
||||||
23 -10 18 -9
|
|
||||||
12 1 21 17
|
|
||||||
-6 24 -11 -3
|
|
||||||
17 -7 -6 1
|
|
||||||
4 4 -7 2
|
|
||||||
6 14 3 -12
|
|
||||||
0 -6 13 -16
|
|
||||||
5 -10 12 7
|
|
||||||
2 5 -3 6
|
|
||||||
0 7 1 -23
|
|
||||||
-5 15 14 1
|
|
||||||
-1 -3 6 6
|
|
||||||
-9 6 12 -9
|
|
||||||
-2 4 7 -4
|
|
||||||
-5 -4 4 4
|
|
||||||
0 -13 -10 6
|
|
||||||
-12 2 -3 -6
|
|
||||||
0 16 3 -3
|
|
||||||
-14 5 11 6
|
|
||||||
11 5 -13 0
|
|
||||||
5 7 -5 -1
|
|
||||||
4 12 10 6
|
|
||||||
4 -10 -11 -1
|
|
||||||
10 4 5 -14
|
|
||||||
-14 11 0 -13
|
|
||||||
8 2 24 12
|
|
||||||
3 -1 2 -1
|
|
||||||
-14 9 3 -23
|
|
||||||
-6 -8 9 0
|
|
||||||
14 -15 -10 10
|
|
||||||
-6 -10 -5 -7
|
|
||||||
5 11 -15 -3
|
|
||||||
0 1 8 1
|
|
||||||
-6 -11 -18 -4
|
|
||||||
0 9 -4 22
|
|
||||||
-1 -5 4 -9
|
|
||||||
2 -20 6 1
|
|
||||||
2 1 -12 -9
|
|
||||||
15 5 -6 4
|
|
||||||
4 19 11 4
|
|
||||||
-4 17 -1 -8
|
|
||||||
-12 -8 -3 7
|
|
||||||
9 11 1 8
|
|
||||||
22 9 15 -15
|
|
||||||
-7 -7 -23 1
|
|
||||||
13 -5 2 -8
|
|
||||||
-5 3 -11 11
|
|
||||||
-18 3 -5 14
|
|
||||||
7 -20 -23 -10
|
|
||||||
-5 -2 0 6
|
|
||||||
-13 -17 2 -3
|
|
||||||
-1 -6 -2 14
|
|
||||||
-16 -12 6 15
|
|
||||||
-2 -12 -19 3
|
|
@ -440,7 +440,7 @@ protected:
|
|||||||
fs.open( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::WRITE );
|
fs.open( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::WRITE );
|
||||||
if( fs.isOpened() )
|
if( fs.isOpened() )
|
||||||
{
|
{
|
||||||
SurfFeatureDetector fd;
|
ORB fd;
|
||||||
fd.detect(img, keypoints);
|
fd.detect(img, keypoints);
|
||||||
write( fs, "keypoints", keypoints );
|
write( fs, "keypoints", keypoints );
|
||||||
}
|
}
|
||||||
@ -491,7 +491,7 @@ private:
|
|||||||
CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; }
|
CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; }
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename T, typename Distance>
|
/*template<typename T, typename Distance>
|
||||||
class CV_CalonderDescriptorExtractorTest : public CV_DescriptorExtractorTest<Distance>
|
class CV_CalonderDescriptorExtractorTest : public CV_DescriptorExtractorTest<Distance>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -506,7 +506,7 @@ protected:
|
|||||||
new CalonderDescriptorExtractor<T>( string(CV_DescriptorExtractorTest<Distance>::ts->get_data_path()) +
|
new CalonderDescriptorExtractor<T>( string(CV_DescriptorExtractorTest<Distance>::ts->get_data_path()) +
|
||||||
FEATURES2D_DIR + "/calonder_classifier.rtc");
|
FEATURES2D_DIR + "/calonder_classifier.rtc");
|
||||||
}
|
}
|
||||||
};
|
};*/
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* Algorithmic tests for descriptor matchers *
|
* Algorithmic tests for descriptor matchers *
|
||||||
@ -991,24 +991,12 @@ TEST( Features2d_Detector_MSER, regression )
|
|||||||
test.safe_run();
|
test.safe_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST( Features2d_Detector_SIFT, regression )
|
|
||||||
{
|
|
||||||
CV_FeatureDetectorTest test( "detector-sift", FeatureDetector::create("SIFT") );
|
|
||||||
test.safe_run();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST( Features2d_Detector_STAR, regression )
|
TEST( Features2d_Detector_STAR, regression )
|
||||||
{
|
{
|
||||||
CV_FeatureDetectorTest test( "detector-star", FeatureDetector::create("STAR") );
|
CV_FeatureDetectorTest test( "detector-star", FeatureDetector::create("STAR") );
|
||||||
test.safe_run();
|
test.safe_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST( Features2d_Detector_SURF, regression )
|
|
||||||
{
|
|
||||||
CV_FeatureDetectorTest test( "detector-surf", FeatureDetector::create("SURF") );
|
|
||||||
test.safe_run();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST( Features2d_Detector_ORB, regression )
|
TEST( Features2d_Detector_ORB, regression )
|
||||||
{
|
{
|
||||||
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") );
|
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") );
|
||||||
@ -1027,23 +1015,6 @@ TEST( Features2d_Detector_PyramidFAST, regression )
|
|||||||
test.safe_run();
|
test.safe_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Descriptors
|
|
||||||
*/
|
|
||||||
TEST( Features2d_DescriptorExtractor_SIFT, regression )
|
|
||||||
{
|
|
||||||
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-sift", 0.03f,
|
|
||||||
DescriptorExtractor::create("SIFT"), 8.06652f );
|
|
||||||
test.safe_run();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST( Features2d_DescriptorExtractor_SURF, regression )
|
|
||||||
{
|
|
||||||
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-surf", 0.035f,
|
|
||||||
DescriptorExtractor::create("SURF"), 0.147372f );
|
|
||||||
test.safe_run();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST( Features2d_DescriptorExtractor_ORB, regression )
|
TEST( Features2d_DescriptorExtractor_ORB, regression )
|
||||||
{
|
{
|
||||||
// TODO adjust the parameters below
|
// TODO adjust the parameters below
|
||||||
@ -1066,13 +1037,6 @@ TEST( Features2d_DescriptorExtractor_BRIEF, regression )
|
|||||||
test.safe_run();
|
test.safe_run();
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
TEST( Features2d_DescriptorExtractor_OpponentSURF, regression )
|
|
||||||
{
|
|
||||||
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-surf", 0.18f,
|
|
||||||
DescriptorExtractor::create("OpponentSURF"), 0.147372f );
|
|
||||||
test.safe_run();
|
|
||||||
}
|
|
||||||
|
|
||||||
#if CV_SSE2
|
#if CV_SSE2
|
||||||
TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
|
TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
|
||||||
{
|
{
|
||||||
@ -1096,7 +1060,7 @@ TEST( Features2d_DescriptorExtractor_Calonder_float, regression )
|
|||||||
*/
|
*/
|
||||||
TEST( Features2d_DescriptorMatcher_BruteForce, regression )
|
TEST( Features2d_DescriptorMatcher_BruteForce, regression )
|
||||||
{
|
{
|
||||||
CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force", new BruteForceMatcher<L2<float> >, 0.01f );
|
CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force", new BFMatcher(NORM_L2), 0.01f );
|
||||||
test.safe_run();
|
test.safe_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,27 +155,24 @@ void CV_MserTest::run(int)
|
|||||||
{
|
{
|
||||||
string image_path = string(ts->get_data_path()) + "mser/puzzle.png";
|
string image_path = string(ts->get_data_path()) + "mser/puzzle.png";
|
||||||
|
|
||||||
IplImage* img = cvLoadImage( image_path.c_str());
|
Mat img = imread( image_path );
|
||||||
if (!img)
|
if (img.empty())
|
||||||
{
|
{
|
||||||
ts->printf( cvtest::TS::LOG, "Unable to open image mser/puzzle.png\n");
|
ts->printf( cvtest::TS::LOG, "Unable to open image mser/puzzle.png\n");
|
||||||
ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA);
|
ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
CvSeq* contours;
|
Mat yuv;
|
||||||
CvMemStorage* storage= cvCreateMemStorage();
|
cvtColor(img, yuv, COLOR_BGR2YCrCb);
|
||||||
IplImage* hsv = cvCreateImage( cvGetSize( img ), IPL_DEPTH_8U, 3 );
|
vector<vector<Point> > msers;
|
||||||
cvCvtColor( img, hsv, CV_BGR2YCrCb );
|
MSER()(yuv, msers);
|
||||||
CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
|
|
||||||
cvExtractMSER( hsv, NULL, &contours, storage, params );
|
|
||||||
|
|
||||||
vector<CvBox2D> boxes;
|
vector<CvBox2D> boxes;
|
||||||
vector<CvBox2D> boxes_orig;
|
vector<CvBox2D> boxes_orig;
|
||||||
for ( int i = 0; i < contours->total; i++ )
|
for ( size_t i = 0; i < msers.size(); i++ )
|
||||||
{
|
{
|
||||||
CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
|
RotatedRect box = fitEllipse(msers[i]);
|
||||||
CvBox2D box = cvFitEllipse2( r );
|
|
||||||
box.angle=(float)CV_PI/2-box.angle;
|
box.angle=(float)CV_PI/2-box.angle;
|
||||||
boxes.push_back(box);
|
boxes.push_back(box);
|
||||||
}
|
}
|
||||||
@ -203,10 +200,6 @@ void CV_MserTest::run(int)
|
|||||||
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
|
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
|
||||||
ts->printf( cvtest::TS::LOG, "Incorrect correspondence in box %d\n",n_box);
|
ts->printf( cvtest::TS::LOG, "Incorrect correspondence in box %d\n",n_box);
|
||||||
}
|
}
|
||||||
|
|
||||||
cvReleaseMemStorage(&storage);
|
|
||||||
cvReleaseImage(&hsv);
|
|
||||||
cvReleaseImage(&img);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Features2d_MSER, regression) { CV_MserTest test; test.safe_run(); }
|
TEST(Features2d_MSER, regression) { CV_MserTest test; test.safe_run(); }
|
||||||
|
@ -1511,7 +1511,7 @@ private:
|
|||||||
|
|
||||||
////////////////////////////////// SURF //////////////////////////////////////////
|
////////////////////////////////// SURF //////////////////////////////////////////
|
||||||
|
|
||||||
class CV_EXPORTS SURF_GPU : public CvSURFParams
|
class CV_EXPORTS SURF_GPU
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum KeypointLayout
|
enum KeypointLayout
|
||||||
@ -1566,6 +1566,14 @@ public:
|
|||||||
|
|
||||||
void releaseMemory();
|
void releaseMemory();
|
||||||
|
|
||||||
|
// SURF parameters;
|
||||||
|
int extended;
|
||||||
|
int upright;
|
||||||
|
double hessianThreshold;
|
||||||
|
|
||||||
|
int nOctaves;
|
||||||
|
int nOctaveLayers;
|
||||||
|
|
||||||
//! max keypoints = min(keypointsRatio * img.size().area(), 65535)
|
//! max keypoints = min(keypointsRatio * img.size().area(), 65535)
|
||||||
float keypointsRatio;
|
float keypointsRatio;
|
||||||
|
|
||||||
@ -1656,7 +1664,7 @@ public:
|
|||||||
//! Constructor
|
//! Constructor
|
||||||
//! n_features - the number of desired features
|
//! n_features - the number of desired features
|
||||||
//! detector_params - parameters to use
|
//! detector_params - parameters to use
|
||||||
explicit ORB_GPU(size_t n_features = 500, const ORB::CommonParams& detector_params = ORB::CommonParams());
|
explicit ORB_GPU(size_t n_features = 500, float scaleFactor = 1.2f, int nlevels = 3);
|
||||||
|
|
||||||
//! Compute the ORB features on an image
|
//! Compute the ORB features on an image
|
||||||
//! image - the image to compute the features (supports only CV_8UC1 images)
|
//! image - the image to compute the features (supports only CV_8UC1 images)
|
||||||
@ -1682,7 +1690,7 @@ public:
|
|||||||
//! returns the descriptor size in bytes
|
//! returns the descriptor size in bytes
|
||||||
inline int descriptorSize() const { return kBytes; }
|
inline int descriptorSize() const { return kBytes; }
|
||||||
|
|
||||||
void setParams(size_t n_features, const ORB::CommonParams& detector_params);
|
void setParams(size_t n_features, float scaleFactor, int nlevels);
|
||||||
inline void setFastParams(int threshold, bool nonmaxSupression = true)
|
inline void setFastParams(int threshold, bool nonmaxSupression = true)
|
||||||
{
|
{
|
||||||
fastDetector_.threshold = threshold;
|
fastDetector_.threshold = threshold;
|
||||||
@ -1706,7 +1714,7 @@ private:
|
|||||||
|
|
||||||
void mergeKeyPoints(GpuMat& keypoints);
|
void mergeKeyPoints(GpuMat& keypoints);
|
||||||
|
|
||||||
ORB::CommonParams params_;
|
ORB params_;
|
||||||
|
|
||||||
// The number of desired features per scale
|
// The number of desired features per scale
|
||||||
std::vector<size_t> n_features_per_level_;
|
std::vector<size_t> n_features_per_level_;
|
||||||
|
@ -48,14 +48,14 @@ using namespace cv::gpu;
|
|||||||
|
|
||||||
#if !defined (HAVE_CUDA)
|
#if !defined (HAVE_CUDA)
|
||||||
|
|
||||||
cv::gpu::ORB_GPU::ORB_GPU(size_t, const ORB::CommonParams&) : fastDetector_(0) { throw_nogpu(); }
|
cv::gpu::ORB_GPU::ORB_GPU(size_t, float, int) : fastDetector_(0) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::downloadKeyPoints(GpuMat&, std::vector<KeyPoint>&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::downloadKeyPoints(GpuMat&, std::vector<KeyPoint>&) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::convertKeyPoints(Mat&, std::vector<KeyPoint>&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::convertKeyPoints(Mat&, std::vector<KeyPoint>&) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::setParams(size_t, const ORB::CommonParams&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::setParams(size_t, float, int) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::release() { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::release() { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::buildScalePyramids(const GpuMat&, const GpuMat&) { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::buildScalePyramids(const GpuMat&, const GpuMat&) { throw_nogpu(); }
|
||||||
void cv::gpu::ORB_GPU::computeKeyPointsPyramid() { throw_nogpu(); }
|
void cv::gpu::ORB_GPU::computeKeyPointsPyramid() { throw_nogpu(); }
|
||||||
@ -83,10 +83,10 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
}}}
|
}}}
|
||||||
|
|
||||||
cv::gpu::ORB_GPU::ORB_GPU(size_t n_features, const ORB::CommonParams& detector_params) :
|
cv::gpu::ORB_GPU::ORB_GPU(size_t n_features, float scaleFactor, int nlevels) :
|
||||||
fastDetector_(DEFAULT_FAST_THRESHOLD)
|
fastDetector_(DEFAULT_FAST_THRESHOLD)
|
||||||
{
|
{
|
||||||
setParams(n_features, detector_params);
|
setParams(n_features, scaleFactor, nlevels);
|
||||||
|
|
||||||
blurFilter = createGaussianFilter_GPU(CV_8UC1, Size(7, 7), 2, 2, BORDER_REFLECT_101);
|
blurFilter = createGaussianFilter_GPU(CV_8UC1, Size(7, 7), 2, 2, BORDER_REFLECT_101);
|
||||||
|
|
||||||
@ -407,9 +407,9 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::ORB_GPU::setParams(size_t n_features, const ORB::CommonParams& detector_params)
|
void cv::gpu::ORB_GPU::setParams(size_t n_features, float scaleFactor, int nlevels)
|
||||||
{
|
{
|
||||||
params_ = detector_params;
|
params_ = ORB((int)n_features, scaleFactor, nlevels);
|
||||||
|
|
||||||
// fill the extractors and descriptors for the corresponding scales
|
// fill the extractors and descriptors for the corresponding scales
|
||||||
int n_levels = static_cast<int>(params_.n_levels_);
|
int n_levels = static_cast<int>(params_.n_levels_);
|
||||||
|
@ -586,37 +586,6 @@ Use these functions to either mark a connected component with the specified colo
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
inpaint
|
|
||||||
-----------
|
|
||||||
Restores the selected region in an image using the region neighborhood.
|
|
||||||
|
|
||||||
.. ocv:function:: void inpaint( InputArray src, InputArray inpaintMask, OutputArray dst, double inpaintRadius, int flags )
|
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.inpaint(src, inpaintMask, inpaintRange, flags[, dst]) -> dst
|
|
||||||
|
|
||||||
.. ocv:cfunction:: void cvInpaint( const CvArr* src, const CvArr* mask, CvArr* dst, double inpaintRadius, int flags)
|
|
||||||
.. ocv:pyoldfunction:: cv.Inpaint(src, mask, dst, inpaintRadius, flags) -> None
|
|
||||||
|
|
||||||
:param src: Input 8-bit 1-channel or 3-channel image.
|
|
||||||
|
|
||||||
:param inpaintMask: Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted.
|
|
||||||
|
|
||||||
:param dst: Output image with the same size and type as ``src`` .
|
|
||||||
|
|
||||||
:param inpaintRadius: Radius of a circlular neighborhood of each point inpainted that is considered by the algorithm.
|
|
||||||
|
|
||||||
:param flags: Inpainting method that could be one of the following:
|
|
||||||
|
|
||||||
* **INPAINT_NS** Navier-Stokes based method.
|
|
||||||
|
|
||||||
* **INPAINT_TELEA** Method by Alexandru Telea [Telea04]_.
|
|
||||||
|
|
||||||
The function reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video. See
|
|
||||||
http://en.wikipedia.org/wiki/Inpainting
|
|
||||||
for more details.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
integral
|
integral
|
||||||
------------
|
------------
|
||||||
Calculates the integral of an image.
|
Calculates the integral of an image.
|
||||||
|
@ -751,17 +751,6 @@ CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
|
|||||||
InputOutputArray bgdModel, InputOutputArray fgdModel,
|
InputOutputArray bgdModel, InputOutputArray fgdModel,
|
||||||
int iterCount, int mode = GC_EVAL );
|
int iterCount, int mode = GC_EVAL );
|
||||||
|
|
||||||
//! the inpainting algorithm
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm
|
|
||||||
INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm
|
|
||||||
};
|
|
||||||
|
|
||||||
//! restores the damaged image areas using one of the available intpainting algorithms
|
|
||||||
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
|
|
||||||
OutputArray dst, double inpaintRange, int flags );
|
|
||||||
|
|
||||||
//! builds the discrete Voronoi diagram
|
//! builds the discrete Voronoi diagram
|
||||||
CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst,
|
CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst,
|
||||||
OutputArray labels, int distanceType, int maskSize );
|
OutputArray labels, int distanceType, int maskSize );
|
||||||
|
@ -345,7 +345,7 @@ void prefix##remove_at_##type(_CVLIST* l, CVPOS pos)\
|
|||||||
void prefix##set_##type(CVPOS pos, type* data)\
|
void prefix##set_##type(CVPOS pos, type* data)\
|
||||||
{\
|
{\
|
||||||
ELEMENT_##type* element = ((ELEMENT_##type*)(pos.m_pos));\
|
ELEMENT_##type* element = ((ELEMENT_##type*)(pos.m_pos));\
|
||||||
memcpy(&(element->m_data), data, sizeof(data));\
|
memcpy(&(element->m_data), data, sizeof(*data));\
|
||||||
}\
|
}\
|
||||||
type* prefix##get_##type(CVPOS pos)\
|
type* prefix##get_##type(CVPOS pos)\
|
||||||
{\
|
{\
|
||||||
|
10
modules/legacy/doc/legacy.rst
Normal file
10
modules/legacy/doc/legacy.rst
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
********************************
|
||||||
|
photo. Computational Photography
|
||||||
|
********************************
|
||||||
|
|
||||||
|
.. highlight:: cpp
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
inpainting
|
@ -600,6 +600,138 @@ CV_EXPORTS void cvProjectPointsSimple( int point_count, CvPoint3D64f* _object_p
|
|||||||
|
|
||||||
#define cvConvertPointsHomogenious cvConvertPointsHomogeneous
|
#define cvConvertPointsHomogenious cvConvertPointsHomogeneous
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////// feature extractors: obsolete API //////////////////////////////////
|
||||||
|
|
||||||
|
typedef struct CvSURFPoint
|
||||||
|
{
|
||||||
|
CvPoint2D32f pt;
|
||||||
|
|
||||||
|
int laplacian;
|
||||||
|
int size;
|
||||||
|
float dir;
|
||||||
|
float hessian;
|
||||||
|
|
||||||
|
} CvSURFPoint;
|
||||||
|
|
||||||
|
CV_INLINE CvSURFPoint cvSURFPoint( CvPoint2D32f pt, int laplacian,
|
||||||
|
int size, float dir CV_DEFAULT(0),
|
||||||
|
float hessian CV_DEFAULT(0))
|
||||||
|
{
|
||||||
|
CvSURFPoint kp;
|
||||||
|
|
||||||
|
kp.pt = pt;
|
||||||
|
kp.laplacian = laplacian;
|
||||||
|
kp.size = size;
|
||||||
|
kp.dir = dir;
|
||||||
|
kp.hessian = hessian;
|
||||||
|
|
||||||
|
return kp;
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct CvSURFParams
|
||||||
|
{
|
||||||
|
int extended;
|
||||||
|
int upright;
|
||||||
|
double hessianThreshold;
|
||||||
|
|
||||||
|
int nOctaves;
|
||||||
|
int nOctaveLayers;
|
||||||
|
|
||||||
|
} CvSURFParams;
|
||||||
|
|
||||||
|
CVAPI(CvSURFParams) cvSURFParams( double hessianThreshold, int extended CV_DEFAULT(0) );
|
||||||
|
|
||||||
|
// If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed
|
||||||
|
// at the locations provided in keypoints (a CvSeq of CvSURFPoint).
|
||||||
|
CVAPI(void) cvExtractSURF( const CvArr* img, const CvArr* mask,
|
||||||
|
CvSeq** keypoints, CvSeq** descriptors,
|
||||||
|
CvMemStorage* storage, CvSURFParams params,
|
||||||
|
int useProvidedKeyPts CV_DEFAULT(0) );
|
||||||
|
|
||||||
|
/*!
|
||||||
|
Maximal Stable Regions Parameters
|
||||||
|
*/
|
||||||
|
typedef struct CvMSERParams
|
||||||
|
{
|
||||||
|
//! delta, in the code, it compares (size_{i}-size_{i-delta})/size_{i-delta}
|
||||||
|
int delta;
|
||||||
|
//! prune the area which bigger than maxArea
|
||||||
|
int maxArea;
|
||||||
|
//! prune the area which smaller than minArea
|
||||||
|
int minArea;
|
||||||
|
//! prune the area have simliar size to its children
|
||||||
|
float maxVariation;
|
||||||
|
//! trace back to cut off mser with diversity < min_diversity
|
||||||
|
float minDiversity;
|
||||||
|
|
||||||
|
/////// the next few params for MSER of color image
|
||||||
|
|
||||||
|
//! for color image, the evolution steps
|
||||||
|
int maxEvolution;
|
||||||
|
//! the area threshold to cause re-initialize
|
||||||
|
double areaThreshold;
|
||||||
|
//! ignore too small margin
|
||||||
|
double minMargin;
|
||||||
|
//! the aperture size for edge blur
|
||||||
|
int edgeBlurSize;
|
||||||
|
} CvMSERParams;
|
||||||
|
|
||||||
|
CVAPI(CvMSERParams) cvMSERParams( int delta CV_DEFAULT(5), int min_area CV_DEFAULT(60),
|
||||||
|
int max_area CV_DEFAULT(14400), float max_variation CV_DEFAULT(.25f),
|
||||||
|
float min_diversity CV_DEFAULT(.2f), int max_evolution CV_DEFAULT(200),
|
||||||
|
double area_threshold CV_DEFAULT(1.01),
|
||||||
|
double min_margin CV_DEFAULT(.003),
|
||||||
|
int edge_blur_size CV_DEFAULT(5) );
|
||||||
|
|
||||||
|
// Extracts the contours of Maximally Stable Extremal Regions
|
||||||
|
CVAPI(void) cvExtractMSER( CvArr* _img, CvArr* _mask, CvSeq** contours, CvMemStorage* storage, CvMSERParams params );
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct CvStarKeypoint
|
||||||
|
{
|
||||||
|
CvPoint pt;
|
||||||
|
int size;
|
||||||
|
float response;
|
||||||
|
} CvStarKeypoint;
|
||||||
|
|
||||||
|
CV_INLINE CvStarKeypoint cvStarKeypoint(CvPoint pt, int size, float response)
|
||||||
|
{
|
||||||
|
CvStarKeypoint kpt;
|
||||||
|
kpt.pt = pt;
|
||||||
|
kpt.size = size;
|
||||||
|
kpt.response = response;
|
||||||
|
return kpt;
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct CvStarDetectorParams
|
||||||
|
{
|
||||||
|
int maxSize;
|
||||||
|
int responseThreshold;
|
||||||
|
int lineThresholdProjected;
|
||||||
|
int lineThresholdBinarized;
|
||||||
|
int suppressNonmaxSize;
|
||||||
|
} CvStarDetectorParams;
|
||||||
|
|
||||||
|
CV_INLINE CvStarDetectorParams cvStarDetectorParams(
|
||||||
|
int maxSize CV_DEFAULT(45),
|
||||||
|
int responseThreshold CV_DEFAULT(30),
|
||||||
|
int lineThresholdProjected CV_DEFAULT(10),
|
||||||
|
int lineThresholdBinarized CV_DEFAULT(8),
|
||||||
|
int suppressNonmaxSize CV_DEFAULT(5))
|
||||||
|
{
|
||||||
|
CvStarDetectorParams params;
|
||||||
|
params.maxSize = maxSize;
|
||||||
|
params.responseThreshold = responseThreshold;
|
||||||
|
params.lineThresholdProjected = lineThresholdProjected;
|
||||||
|
params.lineThresholdBinarized = lineThresholdBinarized;
|
||||||
|
params.suppressNonmaxSize = suppressNonmaxSize;
|
||||||
|
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
|
CVAPI(CvSeq*) cvGetStarKeypoints( const CvArr* img, CvMemStorage* storage,
|
||||||
|
CvStarDetectorParams params CV_DEFAULT(cvStarDetectorParams()));
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
126
modules/legacy/src/features2d.cpp
Normal file
126
modules/legacy/src/features2d.cpp
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
/* Original code has been submitted by Liu Liu. Here is the copyright.
|
||||||
|
----------------------------------------------------------------------------------
|
||||||
|
* An OpenCV Implementation of SURF
|
||||||
|
* Further Information Refer to "SURF: Speed-Up Robust Feature"
|
||||||
|
* Author: Liu Liu
|
||||||
|
* liuliu.1987+opencv@gmail.com
|
||||||
|
*
|
||||||
|
* There are still serveral lacks for this experimental implementation:
|
||||||
|
* 1.The interpolation of sub-pixel mentioned in article was not implemented yet;
|
||||||
|
* 2.A comparision with original libSurf.so shows that the hessian detector is not a 100% match to their implementation;
|
||||||
|
* 3.Due to above reasons, I recommanded the original one for study and reuse;
|
||||||
|
*
|
||||||
|
* However, the speed of this implementation is something comparable to original one.
|
||||||
|
*
|
||||||
|
* Copyright© 2008, Liu Liu All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or
|
||||||
|
* without modification, are permitted provided that the following
|
||||||
|
* conditions are met:
|
||||||
|
* Redistributions of source code must retain the above
|
||||||
|
* copyright notice, this list of conditions and the following
|
||||||
|
* disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following
|
||||||
|
* disclaimer in the documentation and/or other materials
|
||||||
|
* provided with the distribution.
|
||||||
|
* The name of Contributor may not be used to endorse or
|
||||||
|
* promote products derived from this software without
|
||||||
|
* specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
|
||||||
|
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||||
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
|
||||||
|
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||||||
|
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
||||||
|
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||||
|
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
|
||||||
|
* OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precomp.hpp"
|
||||||
|
|
||||||
|
using namespace cv;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
CV_IMPL CvSURFParams cvSURFParams(double threshold, int extended)
|
||||||
|
{
|
||||||
|
CvSURFParams params;
|
||||||
|
params.hessianThreshold = threshold;
|
||||||
|
params.extended = extended;
|
||||||
|
params.upright = 0;
|
||||||
|
params.nOctaves = 4;
|
||||||
|
params.nOctaveLayers = 2;
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
|
CV_IMPL void
|
||||||
|
cvExtractSURF( const CvArr* _img, const CvArr* _mask,
|
||||||
|
CvSeq** _keypoints, CvSeq** _descriptors,
|
||||||
|
CvMemStorage* storage, CvSURFParams params,
|
||||||
|
int useProvidedKeyPts)
|
||||||
|
{
|
||||||
|
Mat img = cvarrToMat(_img), mask;
|
||||||
|
if(_mask)
|
||||||
|
mask = cvarrToMat(_mask);
|
||||||
|
vector<KeyPoint> kpt;
|
||||||
|
Mat descr;
|
||||||
|
|
||||||
|
Ptr<Feature2D> surf = Algorithm::create<Feature2D>("Feature2D.SURF");
|
||||||
|
if( surf.empty() )
|
||||||
|
CV_Error(CV_StsNotImplemented, "OpenCV was built without SURF support");
|
||||||
|
|
||||||
|
surf->set("hessianThreshold", params.hessianThreshold);
|
||||||
|
surf->set("nOctaves", params.nOctaves);
|
||||||
|
surf->set("nOctaveLayers", params.nOctaveLayers);
|
||||||
|
surf->set("upright", params.upright != 0);
|
||||||
|
surf->set("extended", params.extended != 0);
|
||||||
|
|
||||||
|
surf->operator()(img, mask, kpt, _descriptors ? _OutputArray(descr) : noArray(),
|
||||||
|
useProvidedKeyPts != 0);
|
||||||
|
|
||||||
|
if( _keypoints )
|
||||||
|
*_keypoints = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSURFPoint), storage);
|
||||||
|
|
||||||
|
if( _descriptors )
|
||||||
|
*_descriptors = cvCreateSeq( 0, sizeof(CvSeq), descr.cols*descr.elemSize(), storage );
|
||||||
|
|
||||||
|
for( size_t i = 0; i < kpt.size(); i++ )
|
||||||
|
{
|
||||||
|
if( _keypoints )
|
||||||
|
{
|
||||||
|
CvSURFPoint pt = cvSURFPoint(kpt[i].pt, kpt[i].class_id, cvRound(kpt[i].size));
|
||||||
|
cvSeqPush(*_keypoints, &pt);
|
||||||
|
}
|
||||||
|
if( _descriptors )
|
||||||
|
cvSeqPush(*_descriptors, descr.ptr(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CV_IMPL CvSeq*
|
||||||
|
cvGetStarKeypoints( const CvArr* _img, CvMemStorage* storage,
|
||||||
|
CvStarDetectorParams params )
|
||||||
|
{
|
||||||
|
Ptr<StarDetector> star = new StarDetector(params.maxSize, params.responseThreshold,
|
||||||
|
params.lineThresholdProjected,
|
||||||
|
params.lineThresholdBinarized,
|
||||||
|
params.suppressNonmaxSize);
|
||||||
|
vector<KeyPoint> kpts;
|
||||||
|
star->detect(cvarrToMat(_img), kpts, Mat());
|
||||||
|
|
||||||
|
CvSeq* seq = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvStarKeypoint), storage);
|
||||||
|
for( size_t i = 0; i < kpts.size(); i++ )
|
||||||
|
{
|
||||||
|
CvStarKeypoint kpt = cvStarKeypoint(kpts[i].pt, cvRound(kpts[i].size), kpts[i].response);
|
||||||
|
cvSeqPush(seq, &kpt);
|
||||||
|
}
|
||||||
|
return seq;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -8,13 +8,9 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/opencv_modules.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#ifdef HAVE_OPENCV_HIGHGUI
|
|
||||||
# include "opencv2/highgui/highgui.hpp"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace cv{
|
namespace cv{
|
||||||
|
|
||||||
inline int round(float value)
|
inline int round(float value)
|
||||||
@ -659,7 +655,6 @@ namespace cv{
|
|||||||
|
|
||||||
void OneWayDescriptor::Save(const char* path)
|
void OneWayDescriptor::Save(const char* path)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_OPENCV_HIGHGUI
|
|
||||||
for(int i = 0; i < m_pose_count; i++)
|
for(int i = 0; i < m_pose_count; i++)
|
||||||
{
|
{
|
||||||
char buf[1024];
|
char buf[1024];
|
||||||
@ -674,9 +669,6 @@ namespace cv{
|
|||||||
|
|
||||||
cvReleaseImage(&patch);
|
cvReleaseImage(&patch);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
CV_Error( CV_StsNotImplemented, "This method required opencv_highgui disabled in current build" );
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void OneWayDescriptor::Write(CvFileStorage* fs, const char* name)
|
void OneWayDescriptor::Write(CvFileStorage* fs, const char* name)
|
||||||
@ -1737,9 +1729,12 @@ namespace cv{
|
|||||||
void extractPatches (IplImage *img, vector<IplImage*>& patches, CvSize patch_size)
|
void extractPatches (IplImage *img, vector<IplImage*>& patches, CvSize patch_size)
|
||||||
{
|
{
|
||||||
vector<KeyPoint> features;
|
vector<KeyPoint> features;
|
||||||
SURF surf_extractor(1.0f);
|
Ptr<FeatureDetector> surf_extractor = FeatureDetector::create("SURF");
|
||||||
|
if( surf_extractor.empty() )
|
||||||
|
CV_Error(CV_StsNotImplemented, "OpenCV was built without SURF support");
|
||||||
|
surf_extractor->set("hessianThreshold", 1.0);
|
||||||
//printf("Extracting SURF features...");
|
//printf("Extracting SURF features...");
|
||||||
surf_extractor(img, Mat(), features);
|
surf_extractor->detect(Mat(img), features);
|
||||||
//printf("done\n");
|
//printf("done\n");
|
||||||
|
|
||||||
for (int j = 0; j < (int)features.size(); j++)
|
for (int j = 0; j < (int)features.size(); j++)
|
||||||
@ -1780,7 +1775,6 @@ namespace cv{
|
|||||||
|
|
||||||
void loadPCAFeatures(const char* path, const char* images_list, vector<IplImage*>& patches, CvSize patch_size)
|
void loadPCAFeatures(const char* path, const char* images_list, vector<IplImage*>& patches, CvSize patch_size)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_OPENCV_HIGHGUI
|
|
||||||
char images_filename[1024];
|
char images_filename[1024];
|
||||||
sprintf(images_filename, "%s/%s", path, images_list);
|
sprintf(images_filename, "%s/%s", path, images_list);
|
||||||
FILE *pFile = fopen(images_filename, "r");
|
FILE *pFile = fopen(images_filename, "r");
|
||||||
@ -1809,9 +1803,6 @@ namespace cv{
|
|||||||
cvReleaseImage(&img);
|
cvReleaseImage(&img);
|
||||||
}
|
}
|
||||||
fclose(pFile);
|
fclose(pFile);
|
||||||
#else
|
|
||||||
CV_Error( CV_StsNotImplemented, "This method required opencv_highgui disabled in current build" );
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void generatePCAFeatures(const char* path, const char* img_filename, FileStorage& fs, const char* postfix,
|
void generatePCAFeatures(const char* path, const char* img_filename, FileStorage& fs, const char* postfix,
|
||||||
@ -2159,4 +2150,145 @@ namespace cv{
|
|||||||
cvReleaseMat(&_eigenvectors);
|
cvReleaseMat(&_eigenvectors);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* OneWayDescriptorMatcher *
|
||||||
|
\****************************************************************************************/
|
||||||
|
|
||||||
|
OneWayDescriptorMatcher::Params::Params( int _poseCount, Size _patchSize, string _pcaFilename,
|
||||||
|
string _trainPath, string _trainImagesList,
|
||||||
|
float _minScale, float _maxScale, float _stepScale ) :
|
||||||
|
poseCount(_poseCount), patchSize(_patchSize), pcaFilename(_pcaFilename),
|
||||||
|
trainPath(_trainPath), trainImagesList(_trainImagesList),
|
||||||
|
minScale(_minScale), maxScale(_maxScale), stepScale(_stepScale)
|
||||||
|
{}
|
||||||
|
|
||||||
|
|
||||||
|
OneWayDescriptorMatcher::OneWayDescriptorMatcher( const Params& _params)
|
||||||
|
{
|
||||||
|
initialize(_params);
|
||||||
|
}
|
||||||
|
|
||||||
|
OneWayDescriptorMatcher::~OneWayDescriptorMatcher()
|
||||||
|
{}
|
||||||
|
|
||||||
|
void OneWayDescriptorMatcher::initialize( const Params& _params, const Ptr<OneWayDescriptorBase>& _base )
|
||||||
|
{
|
||||||
|
clear();
|
||||||
|
|
||||||
|
if( _base.empty() )
|
||||||
|
base = _base;
|
||||||
|
|
||||||
|
params = _params;
|
||||||
|
}
|
||||||
|
|
||||||
|
void OneWayDescriptorMatcher::clear()
|
||||||
|
{
|
||||||
|
GenericDescriptorMatcher::clear();
|
||||||
|
|
||||||
|
prevTrainCount = 0;
|
||||||
|
if( !base.empty() )
|
||||||
|
base->clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OneWayDescriptorMatcher::train()
|
||||||
|
{
|
||||||
|
if( base.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
|
||||||
|
{
|
||||||
|
base = new OneWayDescriptorObject( params.patchSize, params.poseCount, params.pcaFilename,
|
||||||
|
params.trainPath, params.trainImagesList, params.minScale, params.maxScale, params.stepScale );
|
||||||
|
|
||||||
|
base->Allocate( (int)trainPointCollection.keypointCount() );
|
||||||
|
prevTrainCount = (int)trainPointCollection.keypointCount();
|
||||||
|
|
||||||
|
const vector<vector<KeyPoint> >& points = trainPointCollection.getKeypoints();
|
||||||
|
int count = 0;
|
||||||
|
for( size_t i = 0; i < points.size(); i++ )
|
||||||
|
{
|
||||||
|
IplImage _image = trainPointCollection.getImage((int)i);
|
||||||
|
for( size_t j = 0; j < points[i].size(); j++ )
|
||||||
|
base->InitializeDescriptor( count++, &_image, points[i][j], "" );
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(_KDTREE)
|
||||||
|
base->ConvertDescriptorsArrayToTree();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool OneWayDescriptorMatcher::isMaskSupported()
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void OneWayDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
||||||
|
vector<vector<DMatch> >& matches, int knn,
|
||||||
|
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
||||||
|
{
|
||||||
|
train();
|
||||||
|
|
||||||
|
CV_Assert( knn == 1 ); // knn > 1 unsupported because of bug in OneWayDescriptorBase for this case
|
||||||
|
|
||||||
|
matches.resize( queryKeypoints.size() );
|
||||||
|
IplImage _qimage = queryImage;
|
||||||
|
for( size_t i = 0; i < queryKeypoints.size(); i++ )
|
||||||
|
{
|
||||||
|
int descIdx = -1, poseIdx = -1;
|
||||||
|
float distance;
|
||||||
|
base->FindDescriptor( &_qimage, queryKeypoints[i].pt, descIdx, poseIdx, distance );
|
||||||
|
matches[i].push_back( DMatch((int)i, descIdx, distance) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OneWayDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
||||||
|
vector<vector<DMatch> >& matches, float maxDistance,
|
||||||
|
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
||||||
|
{
|
||||||
|
train();
|
||||||
|
|
||||||
|
matches.resize( queryKeypoints.size() );
|
||||||
|
IplImage _qimage = queryImage;
|
||||||
|
for( size_t i = 0; i < queryKeypoints.size(); i++ )
|
||||||
|
{
|
||||||
|
int descIdx = -1, poseIdx = -1;
|
||||||
|
float distance;
|
||||||
|
base->FindDescriptor( &_qimage, queryKeypoints[i].pt, descIdx, poseIdx, distance );
|
||||||
|
if( distance < maxDistance )
|
||||||
|
matches[i].push_back( DMatch((int)i, descIdx, distance) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OneWayDescriptorMatcher::read( const FileNode &fn )
|
||||||
|
{
|
||||||
|
base = new OneWayDescriptorObject( params.patchSize, params.poseCount, string (), string (), string (),
|
||||||
|
params.minScale, params.maxScale, params.stepScale );
|
||||||
|
base->Read (fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OneWayDescriptorMatcher::write( FileStorage& fs ) const
|
||||||
|
{
|
||||||
|
base->Write (fs);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool OneWayDescriptorMatcher::empty() const
|
||||||
|
{
|
||||||
|
return base.empty() || base->empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ptr<GenericDescriptorMatcher> OneWayDescriptorMatcher::clone( bool emptyTrainData ) const
|
||||||
|
{
|
||||||
|
OneWayDescriptorMatcher* matcher = new OneWayDescriptorMatcher( params );
|
||||||
|
|
||||||
|
if( !emptyTrainData )
|
||||||
|
{
|
||||||
|
CV_Error( CV_StsNotImplemented, "deep clone functionality is not implemented, because "
|
||||||
|
"OneWayDescriptorBase has not copy constructor or clone method ");
|
||||||
|
|
||||||
|
//matcher->base;
|
||||||
|
matcher->params = params;
|
||||||
|
matcher->prevTrainCount = prevTrainCount;
|
||||||
|
matcher->trainPointCollection = trainPointCollection;
|
||||||
|
}
|
||||||
|
return matcher;
|
||||||
|
}
|
||||||
}
|
}
|
@ -1215,5 +1215,363 @@ void FernClassifier::setVerbose(bool _verbose)
|
|||||||
{
|
{
|
||||||
verbose = _verbose;
|
verbose = _verbose;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* FernDescriptorMatcher *
|
||||||
|
\****************************************************************************************/
|
||||||
|
FernDescriptorMatcher::Params::Params( int _nclasses, int _patchSize, int _signatureSize,
|
||||||
|
int _nstructs, int _structSize, int _nviews, int _compressionMethod,
|
||||||
|
const PatchGenerator& _patchGenerator ) :
|
||||||
|
nclasses(_nclasses), patchSize(_patchSize), signatureSize(_signatureSize),
|
||||||
|
nstructs(_nstructs), structSize(_structSize), nviews(_nviews),
|
||||||
|
compressionMethod(_compressionMethod), patchGenerator(_patchGenerator)
|
||||||
|
{}
|
||||||
|
|
||||||
|
FernDescriptorMatcher::Params::Params( const string& _filename )
|
||||||
|
{
|
||||||
|
filename = _filename;
|
||||||
|
}
|
||||||
|
|
||||||
|
FernDescriptorMatcher::FernDescriptorMatcher( const Params& _params )
|
||||||
|
{
|
||||||
|
prevTrainCount = 0;
|
||||||
|
params = _params;
|
||||||
|
if( !params.filename.empty() )
|
||||||
|
{
|
||||||
|
classifier = new FernClassifier;
|
||||||
|
FileStorage fs(params.filename, FileStorage::READ);
|
||||||
|
if( fs.isOpened() )
|
||||||
|
classifier->read( fs.getFirstTopLevelNode() );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FernDescriptorMatcher::~FernDescriptorMatcher()
|
||||||
|
{}
|
||||||
|
|
||||||
|
void FernDescriptorMatcher::clear()
|
||||||
|
{
|
||||||
|
GenericDescriptorMatcher::clear();
|
||||||
|
|
||||||
|
classifier.release();
|
||||||
|
prevTrainCount = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FernDescriptorMatcher::train()
|
||||||
|
{
|
||||||
|
if( classifier.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
|
||||||
|
{
|
||||||
|
assert( params.filename.empty() );
|
||||||
|
|
||||||
|
vector<vector<Point2f> > points( trainPointCollection.imageCount() );
|
||||||
|
for( size_t imgIdx = 0; imgIdx < trainPointCollection.imageCount(); imgIdx++ )
|
||||||
|
KeyPoint::convert( trainPointCollection.getKeypoints((int)imgIdx), points[imgIdx] );
|
||||||
|
|
||||||
|
classifier = new FernClassifier( points, trainPointCollection.getImages(), vector<vector<int> >(), 0, // each points is a class
|
||||||
|
params.patchSize, params.signatureSize, params.nstructs, params.structSize,
|
||||||
|
params.nviews, params.compressionMethod, params.patchGenerator );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FernDescriptorMatcher::isMaskSupported()
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FernDescriptorMatcher::calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt,
|
||||||
|
float& bestProb, int& bestMatchIdx, vector<float>& signature )
|
||||||
|
{
|
||||||
|
(*classifier)( image, pt, signature);
|
||||||
|
|
||||||
|
bestProb = -FLT_MAX;
|
||||||
|
bestMatchIdx = -1;
|
||||||
|
for( int ci = 0; ci < classifier->getClassCount(); ci++ )
|
||||||
|
{
|
||||||
|
if( signature[ci] > bestProb )
|
||||||
|
{
|
||||||
|
bestProb = signature[ci];
|
||||||
|
bestMatchIdx = ci;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FernDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
||||||
|
vector<vector<DMatch> >& matches, int knn,
|
||||||
|
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
||||||
|
{
|
||||||
|
train();
|
||||||
|
|
||||||
|
matches.resize( queryKeypoints.size() );
|
||||||
|
vector<float> signature( (size_t)classifier->getClassCount() );
|
||||||
|
|
||||||
|
for( size_t queryIdx = 0; queryIdx < queryKeypoints.size(); queryIdx++ )
|
||||||
|
{
|
||||||
|
(*classifier)( queryImage, queryKeypoints[queryIdx].pt, signature);
|
||||||
|
|
||||||
|
for( int k = 0; k < knn; k++ )
|
||||||
|
{
|
||||||
|
DMatch bestMatch;
|
||||||
|
size_t best_ci = 0;
|
||||||
|
for( size_t ci = 0; ci < signature.size(); ci++ )
|
||||||
|
{
|
||||||
|
if( -signature[ci] < bestMatch.distance )
|
||||||
|
{
|
||||||
|
int imgIdx = -1, trainIdx = -1;
|
||||||
|
trainPointCollection.getLocalIdx( (int)ci , imgIdx, trainIdx );
|
||||||
|
bestMatch = DMatch( (int)queryIdx, trainIdx, imgIdx, -signature[ci] );
|
||||||
|
best_ci = ci;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if( bestMatch.trainIdx == -1 )
|
||||||
|
break;
|
||||||
|
signature[best_ci] = -std::numeric_limits<float>::max();
|
||||||
|
matches[queryIdx].push_back( bestMatch );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FernDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
||||||
|
vector<vector<DMatch> >& matches, float maxDistance,
|
||||||
|
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
|
||||||
|
{
|
||||||
|
train();
|
||||||
|
matches.resize( queryKeypoints.size() );
|
||||||
|
vector<float> signature( (size_t)classifier->getClassCount() );
|
||||||
|
|
||||||
|
for( size_t i = 0; i < queryKeypoints.size(); i++ )
|
||||||
|
{
|
||||||
|
(*classifier)( queryImage, queryKeypoints[i].pt, signature);
|
||||||
|
|
||||||
|
for( int ci = 0; ci < classifier->getClassCount(); ci++ )
|
||||||
|
{
|
||||||
|
if( -signature[ci] < maxDistance )
|
||||||
|
{
|
||||||
|
int imgIdx = -1, trainIdx = -1;
|
||||||
|
trainPointCollection.getLocalIdx( ci , imgIdx, trainIdx );
|
||||||
|
matches[i].push_back( DMatch( (int)i, trainIdx, imgIdx, -signature[ci] ) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FernDescriptorMatcher::read( const FileNode &fn )
|
||||||
|
{
|
||||||
|
params.nclasses = fn["nclasses"];
|
||||||
|
params.patchSize = fn["patchSize"];
|
||||||
|
params.signatureSize = fn["signatureSize"];
|
||||||
|
params.nstructs = fn["nstructs"];
|
||||||
|
params.structSize = fn["structSize"];
|
||||||
|
params.nviews = fn["nviews"];
|
||||||
|
params.compressionMethod = fn["compressionMethod"];
|
||||||
|
|
||||||
|
//classifier->read(fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FernDescriptorMatcher::write( FileStorage& fs ) const
|
||||||
|
{
|
||||||
|
fs << "nclasses" << params.nclasses;
|
||||||
|
fs << "patchSize" << params.patchSize;
|
||||||
|
fs << "signatureSize" << params.signatureSize;
|
||||||
|
fs << "nstructs" << params.nstructs;
|
||||||
|
fs << "structSize" << params.structSize;
|
||||||
|
fs << "nviews" << params.nviews;
|
||||||
|
fs << "compressionMethod" << params.compressionMethod;
|
||||||
|
|
||||||
|
// classifier->write(fs);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FernDescriptorMatcher::empty() const
|
||||||
|
{
|
||||||
|
return classifier.empty() || classifier->empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ptr<GenericDescriptorMatcher> FernDescriptorMatcher::clone( bool emptyTrainData ) const
|
||||||
|
{
|
||||||
|
FernDescriptorMatcher* matcher = new FernDescriptorMatcher( params );
|
||||||
|
if( !emptyTrainData )
|
||||||
|
{
|
||||||
|
CV_Error( CV_StsNotImplemented, "deep clone dunctionality is not implemented, because "
|
||||||
|
"FernClassifier has not copy constructor or clone method ");
|
||||||
|
|
||||||
|
//matcher->classifier;
|
||||||
|
matcher->params = params;
|
||||||
|
matcher->prevTrainCount = prevTrainCount;
|
||||||
|
matcher->trainPointCollection = trainPointCollection;
|
||||||
|
}
|
||||||
|
return matcher;
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////// Planar Object Detector ////////////////////////////////////
|
||||||
|
|
||||||
|
PlanarObjectDetector::PlanarObjectDetector()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
PlanarObjectDetector::PlanarObjectDetector(const FileNode& node)
|
||||||
|
{
|
||||||
|
read(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
PlanarObjectDetector::PlanarObjectDetector(const vector<Mat>& pyr, int npoints,
|
||||||
|
int patchSize, int nstructs, int structSize,
|
||||||
|
int nviews, const LDetector& detector,
|
||||||
|
const PatchGenerator& patchGenerator)
|
||||||
|
{
|
||||||
|
train(pyr, npoints, patchSize, nstructs,
|
||||||
|
structSize, nviews, detector, patchGenerator);
|
||||||
|
}
|
||||||
|
|
||||||
|
PlanarObjectDetector::~PlanarObjectDetector()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<KeyPoint> PlanarObjectDetector::getModelPoints() const
|
||||||
|
{
|
||||||
|
return modelPoints;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PlanarObjectDetector::train(const vector<Mat>& pyr, int npoints,
|
||||||
|
int patchSize, int nstructs, int structSize,
|
||||||
|
int nviews, const LDetector& detector,
|
||||||
|
const PatchGenerator& patchGenerator)
|
||||||
|
{
|
||||||
|
modelROI = Rect(0, 0, pyr[0].cols, pyr[0].rows);
|
||||||
|
ldetector = detector;
|
||||||
|
ldetector.setVerbose(verbose);
|
||||||
|
ldetector.getMostStable2D(pyr[0], modelPoints, npoints, patchGenerator);
|
||||||
|
|
||||||
|
npoints = (int)modelPoints.size();
|
||||||
|
fernClassifier.setVerbose(verbose);
|
||||||
|
fernClassifier.trainFromSingleView(pyr[0], modelPoints,
|
||||||
|
patchSize, (int)modelPoints.size(), nstructs, structSize, nviews,
|
||||||
|
FernClassifier::COMPRESSION_NONE, patchGenerator);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PlanarObjectDetector::train(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
|
||||||
|
int patchSize, int nstructs, int structSize,
|
||||||
|
int nviews, const LDetector& detector,
|
||||||
|
const PatchGenerator& patchGenerator)
|
||||||
|
{
|
||||||
|
modelROI = Rect(0, 0, pyr[0].cols, pyr[0].rows);
|
||||||
|
ldetector = detector;
|
||||||
|
ldetector.setVerbose(verbose);
|
||||||
|
modelPoints.resize(keypoints.size());
|
||||||
|
std::copy(keypoints.begin(), keypoints.end(), modelPoints.begin());
|
||||||
|
|
||||||
|
fernClassifier.setVerbose(verbose);
|
||||||
|
fernClassifier.trainFromSingleView(pyr[0], modelPoints,
|
||||||
|
patchSize, (int)modelPoints.size(), nstructs, structSize, nviews,
|
||||||
|
FernClassifier::COMPRESSION_NONE, patchGenerator);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PlanarObjectDetector::read(const FileNode& node)
|
||||||
|
{
|
||||||
|
FileNodeIterator it = node["model-roi"].begin(), it_end;
|
||||||
|
it >> modelROI.x >> modelROI.y >> modelROI.width >> modelROI.height;
|
||||||
|
ldetector.read(node["detector"]);
|
||||||
|
fernClassifier.read(node["fern-classifier"]);
|
||||||
|
cv::read(node["model-points"], modelPoints);
|
||||||
|
CV_Assert(modelPoints.size() == (size_t)fernClassifier.getClassCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void PlanarObjectDetector::write(FileStorage& fs, const String& objname) const
|
||||||
|
{
|
||||||
|
WriteStructContext ws(fs, objname, CV_NODE_MAP);
|
||||||
|
|
||||||
|
{
|
||||||
|
WriteStructContext wsroi(fs, "model-roi", CV_NODE_SEQ + CV_NODE_FLOW);
|
||||||
|
cv::write(fs, modelROI.x);
|
||||||
|
cv::write(fs, modelROI.y);
|
||||||
|
cv::write(fs, modelROI.width);
|
||||||
|
cv::write(fs, modelROI.height);
|
||||||
|
}
|
||||||
|
ldetector.write(fs, "detector");
|
||||||
|
cv::write(fs, "model-points", modelPoints);
|
||||||
|
fernClassifier.write(fs, "fern-classifier");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool PlanarObjectDetector::operator()(const Mat& image, Mat& H, vector<Point2f>& corners) const
|
||||||
|
{
|
||||||
|
vector<Mat> pyr;
|
||||||
|
buildPyramid(image, pyr, ldetector.nOctaves - 1);
|
||||||
|
vector<KeyPoint> keypoints;
|
||||||
|
ldetector(pyr, keypoints);
|
||||||
|
|
||||||
|
return (*this)(pyr, keypoints, H, corners);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PlanarObjectDetector::operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
|
||||||
|
Mat& matH, vector<Point2f>& corners, vector<int>* pairs) const
|
||||||
|
{
|
||||||
|
int i, j, m = (int)modelPoints.size(), n = (int)keypoints.size();
|
||||||
|
vector<int> bestMatches(m, -1);
|
||||||
|
vector<float> maxLogProb(m, -FLT_MAX);
|
||||||
|
vector<float> signature;
|
||||||
|
vector<Point2f> fromPt, toPt;
|
||||||
|
|
||||||
|
for( i = 0; i < n; i++ )
|
||||||
|
{
|
||||||
|
KeyPoint kpt = keypoints[i];
|
||||||
|
CV_Assert(0 <= kpt.octave && kpt.octave < (int)pyr.size());
|
||||||
|
kpt.pt.x /= (float)(1 << kpt.octave);
|
||||||
|
kpt.pt.y /= (float)(1 << kpt.octave);
|
||||||
|
int k = fernClassifier(pyr[kpt.octave], kpt.pt, signature);
|
||||||
|
if( k >= 0 && (bestMatches[k] < 0 || signature[k] > maxLogProb[k]) )
|
||||||
|
{
|
||||||
|
maxLogProb[k] = signature[k];
|
||||||
|
bestMatches[k] = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(pairs)
|
||||||
|
pairs->resize(0);
|
||||||
|
|
||||||
|
for( i = 0; i < m; i++ )
|
||||||
|
if( bestMatches[i] >= 0 )
|
||||||
|
{
|
||||||
|
fromPt.push_back(modelPoints[i].pt);
|
||||||
|
toPt.push_back(keypoints[bestMatches[i]].pt);
|
||||||
|
}
|
||||||
|
|
||||||
|
if( fromPt.size() < 4 )
|
||||||
|
return false;
|
||||||
|
|
||||||
|
vector<uchar> mask;
|
||||||
|
matH = findHomography(fromPt, toPt, RANSAC, 10, mask);
|
||||||
|
if( matH.data )
|
||||||
|
{
|
||||||
|
const Mat_<double>& H = matH;
|
||||||
|
corners.resize(4);
|
||||||
|
for( i = 0; i < 4; i++ )
|
||||||
|
{
|
||||||
|
Point2f pt((float)(modelROI.x + (i == 0 || i == 3 ? 0 : modelROI.width)),
|
||||||
|
(float)(modelROI.y + (i <= 1 ? 0 : modelROI.height)));
|
||||||
|
double w = 1./(H(2,0)*pt.x + H(2,1)*pt.y + H(2,2));
|
||||||
|
corners[i] = Point2f((float)((H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w),
|
||||||
|
(float)((H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if( pairs )
|
||||||
|
{
|
||||||
|
for( i = j = 0; i < m; i++ )
|
||||||
|
if( bestMatches[i] >= 0 && mask[j++] )
|
||||||
|
{
|
||||||
|
pairs->push_back(i);
|
||||||
|
pairs->push_back(bestMatches[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return matH.data != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void PlanarObjectDetector::setVerbose(bool _verbose)
|
||||||
|
{
|
||||||
|
verbose = _verbose;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
3
modules/legacy/test/test_main.cpp
Normal file
3
modules/legacy/test/test_main.cpp
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "test_precomp.hpp"
|
||||||
|
|
||||||
|
CV_TEST_MAIN("cv")
|
1
modules/legacy/test/test_precomp.cpp
Normal file
1
modules/legacy/test/test_precomp.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "test_precomp.hpp"
|
11
modules/legacy/test/test_precomp.hpp
Normal file
11
modules/legacy/test/test_precomp.hpp
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/ts/ts.hpp"
|
||||||
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
|
#include "opencv2/highgui/highgui_c.h"
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#endif
|
2
modules/nonfree/CMakeLists.txt
Normal file
2
modules/nonfree/CMakeLists.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
set(the_description "Functionality with possible limitations on the use")
|
||||||
|
ocv_define_module(nonfree opencv_imgproc opencv_features2d)
|
172
modules/nonfree/doc/feature_detection.rst
Normal file
172
modules/nonfree/doc/feature_detection.rst
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
Feature Detection and Description
|
||||||
|
=================================
|
||||||
|
|
||||||
|
SIFT
|
||||||
|
----
|
||||||
|
.. ocv:class:: SIFT
|
||||||
|
|
||||||
|
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) approach. ::
|
||||||
|
|
||||||
|
class CV_EXPORTS SIFT
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
struct CommonParams
|
||||||
|
{
|
||||||
|
static const int DEFAULT_NOCTAVES = 4;
|
||||||
|
static const int DEFAULT_NOCTAVE_LAYERS = 3;
|
||||||
|
static const int DEFAULT_FIRST_OCTAVE = -1;
|
||||||
|
enum{ FIRST_ANGLE = 0, AVERAGE_ANGLE = 1 };
|
||||||
|
|
||||||
|
CommonParams();
|
||||||
|
CommonParams( int _nOctaves, int _nOctaveLayers, int _firstOctave,
|
||||||
|
int _angleMode );
|
||||||
|
int nOctaves, nOctaveLayers, firstOctave;
|
||||||
|
int angleMode;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct DetectorParams
|
||||||
|
{
|
||||||
|
static double GET_DEFAULT_THRESHOLD()
|
||||||
|
{ return 0.04 / SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS / 2.0; }
|
||||||
|
static double GET_DEFAULT_EDGE_THRESHOLD() { return 10.0; }
|
||||||
|
|
||||||
|
DetectorParams();
|
||||||
|
DetectorParams( double _threshold, double _edgeThreshold );
|
||||||
|
double threshold, edgeThreshold;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct DescriptorParams
|
||||||
|
{
|
||||||
|
static double GET_DEFAULT_MAGNIFICATION() { return 3.0; }
|
||||||
|
static const bool DEFAULT_IS_NORMALIZE = true;
|
||||||
|
static const int DESCRIPTOR_SIZE = 128;
|
||||||
|
|
||||||
|
DescriptorParams();
|
||||||
|
DescriptorParams( double _magnification, bool _isNormalize,
|
||||||
|
bool _recalculateAngles );
|
||||||
|
double magnification;
|
||||||
|
bool isNormalize;
|
||||||
|
bool recalculateAngles;
|
||||||
|
};
|
||||||
|
|
||||||
|
SIFT();
|
||||||
|
//! sift-detector constructor
|
||||||
|
SIFT( double _threshold, double _edgeThreshold,
|
||||||
|
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
|
||||||
|
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
|
||||||
|
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
|
||||||
|
int _angleMode=CommonParams::FIRST_ANGLE );
|
||||||
|
//! sift-descriptor constructor
|
||||||
|
SIFT( double _magnification, bool _isNormalize=true,
|
||||||
|
bool _recalculateAngles = true,
|
||||||
|
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
|
||||||
|
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
|
||||||
|
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
|
||||||
|
int _angleMode=CommonParams::FIRST_ANGLE );
|
||||||
|
SIFT( const CommonParams& _commParams,
|
||||||
|
const DetectorParams& _detectorParams = DetectorParams(),
|
||||||
|
const DescriptorParams& _descriptorParams = DescriptorParams() );
|
||||||
|
|
||||||
|
//! returns the descriptor size in floats (128)
|
||||||
|
int descriptorSize() const { return DescriptorParams::DESCRIPTOR_SIZE; }
|
||||||
|
//! finds the keypoints using the SIFT algorithm
|
||||||
|
void operator()(const Mat& img, const Mat& mask,
|
||||||
|
vector<KeyPoint>& keypoints) const;
|
||||||
|
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
|
||||||
|
//! Optionally it can compute descriptors for the user-provided keypoints
|
||||||
|
void operator()(const Mat& img, const Mat& mask,
|
||||||
|
vector<KeyPoint>& keypoints,
|
||||||
|
Mat& descriptors,
|
||||||
|
bool useProvidedKeypoints=false) const;
|
||||||
|
|
||||||
|
CommonParams getCommonParams () const { return commParams; }
|
||||||
|
DetectorParams getDetectorParams () const { return detectorParams; }
|
||||||
|
DescriptorParams getDescriptorParams () const { return descriptorParams; }
|
||||||
|
protected:
|
||||||
|
...
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
SURF
|
||||||
|
----
|
||||||
|
.. ocv:class:: SURF
|
||||||
|
|
||||||
|
Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
|
||||||
|
|
||||||
|
.. ocv:member:: int extended
|
||||||
|
|
||||||
|
* 0 means that the basic descriptors (64 elements each) shall be computed
|
||||||
|
* 1 means that the extended descriptors (128 elements each) shall be computed
|
||||||
|
|
||||||
|
.. ocv:member:: int upright
|
||||||
|
|
||||||
|
* 0 means that detector computes orientation of each feature.
|
||||||
|
* 1 means that the orientation is not computed (which is much, much faster). For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting ``upright=1``.
|
||||||
|
|
||||||
|
.. ocv:member:: double hessianThreshold
|
||||||
|
|
||||||
|
Threshold for the keypoint detector. Only features, whose hessian is larger than ``hessianThreshold`` are retained by the detector. Therefore, the larger the value, the less keypoints you will get. A good default value could be from 300 to 500, depending from the image contrast.
|
||||||
|
|
||||||
|
.. ocv:member:: int nOctaves
|
||||||
|
|
||||||
|
The number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default. If you want to get very large features, use the larger value. If you want just small features, decrease it.
|
||||||
|
|
||||||
|
.. ocv:member:: int nOctaveLayers
|
||||||
|
|
||||||
|
The number of images within each octave of a gaussian pyramid. It is set to 2 by default.
|
||||||
|
|
||||||
|
|
||||||
|
.. [Bay06] Bay, H. and Tuytelaars, T. and Van Gool, L. "SURF: Speeded Up Robust Features", 9th European Conference on Computer Vision, 2006
|
||||||
|
|
||||||
|
|
||||||
|
SURF::SURF
|
||||||
|
----------
|
||||||
|
The SURF extractor constructors.
|
||||||
|
|
||||||
|
.. ocv:function:: SURF::SURF()
|
||||||
|
|
||||||
|
.. ocv:function:: SURF::SURF(double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=false, bool upright=false)
|
||||||
|
|
||||||
|
.. ocv:pyfunction:: cv2.SURF(_hessianThreshold[, _nOctaves[, _nOctaveLayers[, _extended[, _upright]]]]) -> <SURF object>
|
||||||
|
|
||||||
|
:param hessianThreshold: Threshold for hessian keypoint detector used in SURF.
|
||||||
|
|
||||||
|
:param nOctaves: Number of pyramid octaves the keypoint detector will use.
|
||||||
|
|
||||||
|
:param nOctaveLayers: Number of octave layers within each octave.
|
||||||
|
|
||||||
|
:param extended: Extended descriptor flag (true - use extended 128-element descriptors; false - use 64-element descriptors).
|
||||||
|
|
||||||
|
:param upright: Up-right or rotated features flag (true - do not compute orientation of features; false - compute orientation).
|
||||||
|
|
||||||
|
|
||||||
|
SURF::operator()
|
||||||
|
----------------
|
||||||
|
Detects keypoints and computes SURF descriptors for them.
|
||||||
|
|
||||||
|
.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints)
|
||||||
|
.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints, vector<float>& descriptors, bool useProvidedKeypoints=false)
|
||||||
|
|
||||||
|
.. ocv:pyfunction:: cv2.SURF.detect(img, mask) -> keypoints
|
||||||
|
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, useProvidedKeypoints]) -> keypoints, descriptors
|
||||||
|
|
||||||
|
.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
|
||||||
|
|
||||||
|
.. ocv:pyoldfunction:: cv.ExtractSURF(image, mask, storage, params)-> (keypoints, descriptors)
|
||||||
|
|
||||||
|
:param image: Input 8-bit grayscale image
|
||||||
|
|
||||||
|
:param mask: Optional input mask that marks the regions where we should detect features.
|
||||||
|
|
||||||
|
:param keypoints: The input/output vector of keypoints
|
||||||
|
|
||||||
|
:param descriptors: The output concatenated vectors of descriptors. Each descriptor is 64- or 128-element vector, as returned by ``SURF::descriptorSize()``. So the total size of ``descriptors`` will be ``keypoints.size()*descriptorSize()``.
|
||||||
|
|
||||||
|
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
|
||||||
|
|
||||||
|
:param storage: Memory storage for the output keypoints and descriptors in OpenCV 1.x API.
|
||||||
|
|
||||||
|
:param params: SURF algorithm parameters in OpenCV 1.x API.
|
||||||
|
|
10
modules/nonfree/doc/nonfree.rst
Normal file
10
modules/nonfree/doc/nonfree.rst
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
********************************
|
||||||
|
nonfree. Non-free functionality
|
||||||
|
********************************
|
||||||
|
|
||||||
|
The module contains algorithms that may be patented in some countries or have some other limitations on the use.
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
feature_detection
|
155
modules/nonfree/include/opencv2/nonfree/features2d.hpp
Normal file
155
modules/nonfree/include/opencv2/nonfree/features2d.hpp
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__
|
||||||
|
#define __OPENCV_NONFREE_FEATURES_2D_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
|
/*!
|
||||||
|
SIFT implementation.
|
||||||
|
|
||||||
|
The class implements SIFT algorithm by D. Lowe.
|
||||||
|
*/
|
||||||
|
class CV_EXPORTS_W SIFT : public Feature2D
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit SIFT( int _nfeatures=0, int _nOctaveLayers=3,
|
||||||
|
double _contrastThreshold=0.04, double _edgeThreshold=10,
|
||||||
|
double _sigma=1.6);
|
||||||
|
|
||||||
|
//! returns the descriptor size in floats (128)
|
||||||
|
int descriptorSize() const;
|
||||||
|
|
||||||
|
//! returns the descriptor type
|
||||||
|
int descriptorType() const;
|
||||||
|
|
||||||
|
//! finds the keypoints using SIFT algorithm
|
||||||
|
void operator()(InputArray img, InputArray mask,
|
||||||
|
vector<KeyPoint>& keypoints) const;
|
||||||
|
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
|
||||||
|
//! Optionally it can compute descriptors for the user-provided keypoints
|
||||||
|
void operator()(InputArray img, InputArray mask,
|
||||||
|
vector<KeyPoint>& keypoints,
|
||||||
|
OutputArray descriptors,
|
||||||
|
bool useProvidedKeypoints=false) const;
|
||||||
|
|
||||||
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
|
void buildGaussianPyramid( const Mat& base, vector<Mat>& pyr, int nOctaves ) const;
|
||||||
|
void buildDoGPyramid( const vector<Mat>& pyr, vector<Mat>& dogpyr ) const;
|
||||||
|
void findScaleSpaceExtrema( const vector<Mat>& gauss_pyr, const vector<Mat>& dog_pyr,
|
||||||
|
vector<KeyPoint>& keypoints ) const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
||||||
|
void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
|
|
||||||
|
CV_PROP_RW int nfeatures;
|
||||||
|
CV_PROP_RW int nOctaveLayers;
|
||||||
|
CV_PROP_RW double contrastThreshold;
|
||||||
|
CV_PROP_RW double edgeThreshold;
|
||||||
|
CV_PROP_RW double sigma;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef SIFT SiftFeatureDetector;
|
||||||
|
typedef SIFT SiftDescriptorExtractor;
|
||||||
|
|
||||||
|
/*!
|
||||||
|
SURF implementation.
|
||||||
|
|
||||||
|
The class implements SURF algorithm by H. Bay et al.
|
||||||
|
*/
|
||||||
|
class CV_EXPORTS_W SURF : public Feature2D
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
//! the default constructor
|
||||||
|
SURF();
|
||||||
|
//! the full constructor taking all the necessary parameters
|
||||||
|
explicit SURF(double _hessianThreshold,
|
||||||
|
bool _extended=true, bool _upright=false,
|
||||||
|
int _nOctaves=4, int _nOctaveLayers=2);
|
||||||
|
|
||||||
|
//! returns the descriptor size in float's (64 or 128)
|
||||||
|
int descriptorSize() const;
|
||||||
|
|
||||||
|
//! returns the descriptor type
|
||||||
|
int descriptorType() const;
|
||||||
|
|
||||||
|
//! finds the keypoints using fast hessian detector used in SURF
|
||||||
|
void operator()(InputArray img, InputArray mask,
|
||||||
|
CV_OUT vector<KeyPoint>& keypoints) const;
|
||||||
|
//! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
|
||||||
|
void operator()(InputArray img, InputArray mask,
|
||||||
|
CV_OUT vector<KeyPoint>& keypoints,
|
||||||
|
OutputArray descriptors,
|
||||||
|
bool useProvidedKeypoints=false) const;
|
||||||
|
|
||||||
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
|
CV_PROP_RW double hessianThreshold;
|
||||||
|
CV_PROP_RW int nOctaves;
|
||||||
|
CV_PROP_RW int nOctaveLayers;
|
||||||
|
CV_PROP_RW bool extended;
|
||||||
|
CV_PROP_RW bool upright;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
||||||
|
void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef SURF SurfFeatureDetector;
|
||||||
|
typedef SURF SurfDescriptorExtractor;
|
||||||
|
|
||||||
|
} /* namespace cv */
|
||||||
|
|
||||||
|
#endif /* __cplusplus */
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* End of file. */
|
57
modules/nonfree/include/opencv2/nonfree/nonfree.hpp
Normal file
57
modules/nonfree/include/opencv2/nonfree/nonfree.hpp
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_NONFREE_HPP__
|
||||||
|
#define __OPENCV_NONFREE_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/nonfree/features2d.hpp"
|
||||||
|
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
|
CV_EXPORTS bool initModule_nonfree(void);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* End of file. */
|
3
modules/nonfree/perf/perf_main.cpp
Normal file
3
modules/nonfree/perf/perf_main.cpp
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "perf_precomp.hpp"
|
||||||
|
|
||||||
|
CV_PERF_TEST_MAIN(nonfree)
|
1
modules/nonfree/perf/perf_precomp.cpp
Normal file
1
modules/nonfree/perf/perf_precomp.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "perf_precomp.hpp"
|
12
modules/nonfree/perf/perf_precomp.hpp
Normal file
12
modules/nonfree/perf/perf_precomp.hpp
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||||
|
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/ts/ts.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
|
|
||||||
|
#if GTEST_CREATE_SHARED_LIBRARY
|
||||||
|
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
44
modules/nonfree/src/precomp.cpp
Normal file
44
modules/nonfree/src/precomp.cpp
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Intel License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#include "precomp.hpp"
|
||||||
|
|
||||||
|
/* End of file. */
|
58
modules/nonfree/src/precomp.hpp
Normal file
58
modules/nonfree/src/precomp.hpp
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_PRECOMP_H__
|
||||||
|
#define __OPENCV_PRECOMP_H__
|
||||||
|
|
||||||
|
#if _MSC_VER >= 1200
|
||||||
|
#pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_CVCONFIG_H
|
||||||
|
#include "cvconfig.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
#include "opencv2/core/internal.hpp"
|
||||||
|
|
||||||
|
#endif
|
772
modules/nonfree/src/sift.cpp
Normal file
772
modules/nonfree/src/sift.cpp
Normal file
@ -0,0 +1,772 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Intel License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
/**********************************************************************************************\
|
||||||
|
Implementation of SIFT is based on the code from http://blogs.oregonstate.edu/hess/code/sift/
|
||||||
|
Below is the original copyright.
|
||||||
|
|
||||||
|
// Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
|
||||||
|
// All rights reserved.
|
||||||
|
|
||||||
|
// The following patent has been issued for methods embodied in this
|
||||||
|
// software: "Method and apparatus for identifying scale invariant features
|
||||||
|
// in an image and use of same for locating an object in an image," David
|
||||||
|
// G. Lowe, US Patent 6,711,293 (March 23, 2004). Provisional application
|
||||||
|
// filed March 8, 1999. Asignee: The University of British Columbia. For
|
||||||
|
// further details, contact David Lowe (lowe@cs.ubc.ca) or the
|
||||||
|
// University-Industry Liaison Office of the University of British
|
||||||
|
// Columbia.
|
||||||
|
|
||||||
|
// Note that restrictions imposed by this patent (and possibly others)
|
||||||
|
// exist independently of and may be in conflict with the freedoms granted
|
||||||
|
// in this license, which refers to copyright of the program, not patents
|
||||||
|
// for any methods that it implements. Both copyright and patent law must
|
||||||
|
// be obeyed to legally use and redistribute this program and it is not the
|
||||||
|
// purpose of this license to induce you to infringe any patents or other
|
||||||
|
// property right claims or to contest validity of any such claims. If you
|
||||||
|
// redistribute or use the program, then this license merely protects you
|
||||||
|
// from committing copyright infringement. It does not protect you from
|
||||||
|
// committing patent infringement. So, before you do anything with this
|
||||||
|
// program, make sure that you have permission to do so not merely in terms
|
||||||
|
// of copyright, but also in terms of patent law.
|
||||||
|
|
||||||
|
// Please note that this license is not to be understood as a guarantee
|
||||||
|
// either. If you use the program according to this license, but in
|
||||||
|
// conflict with patent law, it does not mean that the licensor will refund
|
||||||
|
// you for any losses that you incur if you are sued for your patent
|
||||||
|
// infringement.
|
||||||
|
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
// * Redistributions of source code must retain the above copyright and
|
||||||
|
// patent notices, this list of conditions and the following
|
||||||
|
// disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer in
|
||||||
|
// the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Oregon State University nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||||
|
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||||
|
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||||
|
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
\**********************************************************************************************/
|
||||||
|
|
||||||
|
#include "precomp.hpp"
|
||||||
|
#include <iostream>
|
||||||
|
#include <stdarg.h>
|
||||||
|
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
|
/******************************* Defs and macros *****************************/
|
||||||
|
|
||||||
|
// default number of sampled intervals per octave
|
||||||
|
static const int SIFT_INTVLS = 3;
|
||||||
|
|
||||||
|
// default sigma for initial gaussian smoothing
|
||||||
|
static const float SIFT_SIGMA = 1.6f;
|
||||||
|
|
||||||
|
// default threshold on keypoint contrast |D(x)|
|
||||||
|
static const float SIFT_CONTR_THR = 0.04f;
|
||||||
|
|
||||||
|
// default threshold on keypoint ratio of principle curvatures
|
||||||
|
static const float SIFT_CURV_THR = 10.f;
|
||||||
|
|
||||||
|
// double image size before pyramid construction?
|
||||||
|
static const bool SIFT_IMG_DBL = true;
|
||||||
|
|
||||||
|
// default width of descriptor histogram array
|
||||||
|
static const int SIFT_DESCR_WIDTH = 4;
|
||||||
|
|
||||||
|
// default number of bins per histogram in descriptor array
|
||||||
|
static const int SIFT_DESCR_HIST_BINS = 8;
|
||||||
|
|
||||||
|
// assumed gaussian blur for input image
|
||||||
|
static const float SIFT_INIT_SIGMA = 0.5f;
|
||||||
|
|
||||||
|
// width of border in which to ignore keypoints
|
||||||
|
static const int SIFT_IMG_BORDER = 5;
|
||||||
|
|
||||||
|
// maximum steps of keypoint interpolation before failure
|
||||||
|
static const int SIFT_MAX_INTERP_STEPS = 5;
|
||||||
|
|
||||||
|
// default number of bins in histogram for orientation assignment
|
||||||
|
static const int SIFT_ORI_HIST_BINS = 36;
|
||||||
|
|
||||||
|
// determines gaussian sigma for orientation assignment
|
||||||
|
static const float SIFT_ORI_SIG_FCTR = 1.5f;
|
||||||
|
|
||||||
|
// determines the radius of the region used in orientation assignment
|
||||||
|
static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR;
|
||||||
|
|
||||||
|
// orientation magnitude relative to max that results in new feature
|
||||||
|
static const float SIFT_ORI_PEAK_RATIO = 0.8f;
|
||||||
|
|
||||||
|
// determines the size of a single descriptor orientation histogram
|
||||||
|
static const float SIFT_DESCR_SCL_FCTR = 3.f;
|
||||||
|
|
||||||
|
// threshold on magnitude of elements of descriptor vector
|
||||||
|
static const float SIFT_DESCR_MAG_THR = 0.2f;
|
||||||
|
|
||||||
|
// factor used to convert floating-point descriptor to unsigned char
|
||||||
|
static const float SIFT_INT_DESCR_FCTR = 512.f;
|
||||||
|
|
||||||
|
static const int SIFT_FIXPT_SCALE = 48;
|
||||||
|
|
||||||
|
|
||||||
|
static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
|
||||||
|
{
|
||||||
|
Mat gray, gray_fpt;
|
||||||
|
if( img.channels() == 3 || img.channels() == 4 )
|
||||||
|
cvtColor(img, gray, COLOR_BGR2GRAY);
|
||||||
|
else
|
||||||
|
img.copyTo(gray);
|
||||||
|
gray.convertTo(gray_fpt, CV_16S, SIFT_FIXPT_SCALE, 0);
|
||||||
|
|
||||||
|
float sig_diff;
|
||||||
|
|
||||||
|
if( doubleImageSize )
|
||||||
|
{
|
||||||
|
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
|
||||||
|
Mat dbl;
|
||||||
|
resize(gray_fpt, dbl, Size(gray.cols*2, gray.rows*2), 0, 0, INTER_LINEAR);
|
||||||
|
GaussianBlur(dbl, dbl, Size(), sig_diff, sig_diff);
|
||||||
|
return dbl;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
|
||||||
|
GaussianBlur(gray_fpt, gray_fpt, Size(), sig_diff, sig_diff);
|
||||||
|
return gray_fpt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SIFT::buildGaussianPyramid( const Mat& base, vector<Mat>& pyr, int nOctaves ) const
|
||||||
|
{
|
||||||
|
vector<double> sig(nOctaveLayers + 3);
|
||||||
|
pyr.resize(nOctaves*(nOctaveLayers + 3));
|
||||||
|
|
||||||
|
// precompute Gaussian sigmas using the following formula:
|
||||||
|
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
|
||||||
|
sig[0] = sigma;
|
||||||
|
double k = pow( 2., 1. / nOctaveLayers );
|
||||||
|
for( int i = 1; i < nOctaveLayers + 3; i++ )
|
||||||
|
{
|
||||||
|
double sig_prev = pow(k, (double)(i-1))*sigma;
|
||||||
|
double sig_total = sig_prev*k;
|
||||||
|
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
|
||||||
|
}
|
||||||
|
|
||||||
|
for( int o = 0; o < nOctaves; o++ )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < nOctaveLayers + 3; i++ )
|
||||||
|
{
|
||||||
|
Mat& dst = pyr[o*(nOctaveLayers + 3) + i];
|
||||||
|
if( o == 0 && i == 0 )
|
||||||
|
dst = base;
|
||||||
|
// base of new octave is halved image from end of previous octave
|
||||||
|
else if( i == 0 )
|
||||||
|
{
|
||||||
|
const Mat& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
|
||||||
|
resize(src, dst, Size(src.cols/2, src.rows/2),
|
||||||
|
0, 0, INTER_NEAREST);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const Mat& src = pyr[o*(nOctaveLayers + 3) + i-1];
|
||||||
|
GaussianBlur(src, dst, Size(), sig[i], sig[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SIFT::buildDoGPyramid( const vector<Mat>& gpyr, vector<Mat>& dogpyr ) const
|
||||||
|
{
|
||||||
|
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
|
||||||
|
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
|
||||||
|
|
||||||
|
for( int o = 0; o < nOctaves; o++ )
|
||||||
|
{
|
||||||
|
for( int i = 0; i < nOctaveLayers + 2; i++ )
|
||||||
|
{
|
||||||
|
const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
|
||||||
|
const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
|
||||||
|
Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
|
||||||
|
subtract(src2, src1, dst, noArray(), CV_16S);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Computes a gradient orientation histogram at a specified pixel
|
||||||
|
static float calcOrientationHist( const Mat& img, Point pt, int radius,
|
||||||
|
float sigma, float* hist, int n )
|
||||||
|
{
|
||||||
|
int i, j, k, len = (radius*2+1)*(radius*2+1);
|
||||||
|
|
||||||
|
float expf_scale = -1.f/(2.f * sigma * sigma);
|
||||||
|
AutoBuffer<float> buf(len*4 + n+4);
|
||||||
|
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
|
||||||
|
float* temphist = W + len + 2;
|
||||||
|
|
||||||
|
for( i = 0; i < n; i++ )
|
||||||
|
temphist[i] = 0.f;
|
||||||
|
|
||||||
|
for( i = -radius, k = 0; i <= radius; i++ )
|
||||||
|
{
|
||||||
|
int y = pt.y + i;
|
||||||
|
if( y <= 0 || y >= img.rows - 1 )
|
||||||
|
continue;
|
||||||
|
for( j = -radius; j <= radius; j++ )
|
||||||
|
{
|
||||||
|
int x = pt.x + j;
|
||||||
|
if( x <= 0 || x >= img.cols - 1 )
|
||||||
|
continue;
|
||||||
|
|
||||||
|
float dx = img.at<short>(y, x+1) - img.at<short>(y, x-1);
|
||||||
|
float dy = img.at<short>(y-1, x) - img.at<short>(y+1, x);
|
||||||
|
|
||||||
|
X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale;
|
||||||
|
k++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
len = k;
|
||||||
|
|
||||||
|
// compute gradient values, orientations and the weights over the pixel neighborhood
|
||||||
|
exp(W, W, len);
|
||||||
|
fastAtan2(Y, X, Ori, len, true);
|
||||||
|
magnitude(X, Y, Mag, len);
|
||||||
|
|
||||||
|
for( k = 0; k < len; k++ )
|
||||||
|
{
|
||||||
|
int bin = cvRound((n/360.f)*Ori[k]);
|
||||||
|
if( bin >= n )
|
||||||
|
bin -= n;
|
||||||
|
if( bin < 0 )
|
||||||
|
bin += n;
|
||||||
|
temphist[bin] += W[k]*Mag[k];
|
||||||
|
}
|
||||||
|
|
||||||
|
// smooth the histogram
|
||||||
|
temphist[-1] = temphist[n-1];
|
||||||
|
temphist[-2] = temphist[n-2];
|
||||||
|
temphist[n] = temphist[0];
|
||||||
|
temphist[n+1] = temphist[1];
|
||||||
|
for( i = 0; i < n; i++ )
|
||||||
|
{
|
||||||
|
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
|
||||||
|
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
|
||||||
|
temphist[i]*(6.f/16.f);
|
||||||
|
}
|
||||||
|
|
||||||
|
float maxval = hist[0];
|
||||||
|
for( i = 1; i < n; i++ )
|
||||||
|
maxval = std::max(maxval, hist[i]);
|
||||||
|
|
||||||
|
return maxval;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Interpolates a scale-space extremum's location and scale to subpixel
|
||||||
|
// accuracy to form an image feature. Rejects features with low contrast.
|
||||||
|
// Based on Section 4 of Lowe's paper.
|
||||||
|
static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int octv,
|
||||||
|
int& layer, int& r, int& c, int nOctaveLayers,
|
||||||
|
float contrastThreshold, float edgeThreshold, float sigma )
|
||||||
|
{
|
||||||
|
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
|
||||||
|
const float deriv_scale = img_scale*0.5f;
|
||||||
|
const float second_deriv_scale = img_scale;
|
||||||
|
const float cross_deriv_scale = img_scale*0.25f;
|
||||||
|
|
||||||
|
float xi=0, xr=0, xc=0, contr;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
for( ; i < SIFT_MAX_INTERP_STEPS; i++ )
|
||||||
|
{
|
||||||
|
int idx = octv*(nOctaveLayers+2) + layer;
|
||||||
|
const Mat& img = dog_pyr[idx];
|
||||||
|
const Mat& prev = dog_pyr[idx-1];
|
||||||
|
const Mat& next = dog_pyr[idx+1];
|
||||||
|
|
||||||
|
Matx31f dD((img.at<short>(r, c+1) - img.at<short>(r, c-1))*deriv_scale,
|
||||||
|
(img.at<short>(r+1, c) - img.at<short>(r-1, c))*deriv_scale,
|
||||||
|
(next.at<short>(r, c) - prev.at<short>(r, c))*deriv_scale);
|
||||||
|
|
||||||
|
float v2 = img.at<short>(r, c)*2;
|
||||||
|
float dxx = (img.at<short>(r, c+1) + img.at<short>(r, c-1) - v2)*second_deriv_scale;
|
||||||
|
float dyy = (img.at<short>(r+1, c) + img.at<short>(r-1, c) - v2)*second_deriv_scale;
|
||||||
|
float dss = (next.at<short>(r, c) + prev.at<short>(r, c) - v2)*second_deriv_scale;
|
||||||
|
float dxy = (img.at<short>(r+1, c+1) - img.at<short>(r+1, c-1) -
|
||||||
|
img.at<short>(r-1, c+1) + img.at<short>(r-1, c-1))*cross_deriv_scale;
|
||||||
|
float dxs = (next.at<short>(r, c+1) - next.at<short>(r, c-1) -
|
||||||
|
prev.at<short>(r, c+1) + prev.at<short>(r, c-1))*cross_deriv_scale;
|
||||||
|
float dys = (next.at<short>(r+1, c) - next.at<short>(r-1, c) -
|
||||||
|
prev.at<short>(r+1, c) + prev.at<short>(r-1, c))*cross_deriv_scale;
|
||||||
|
|
||||||
|
Matx33f H(dxx, dxy, dxs,
|
||||||
|
dxy, dyy, dys,
|
||||||
|
dxs, dys, dss);
|
||||||
|
|
||||||
|
Matx31f X = H.solve<1>(dD, DECOMP_LU);
|
||||||
|
|
||||||
|
xi = -X(2, 0);
|
||||||
|
xr = -X(1, 0);
|
||||||
|
xc = -X(0, 0);
|
||||||
|
|
||||||
|
if( std::abs( xi ) < 0.5f && std::abs( xr ) < 0.5f && std::abs( xc ) < 0.5f )
|
||||||
|
break;
|
||||||
|
|
||||||
|
c += cvRound( xc );
|
||||||
|
r += cvRound( xr );
|
||||||
|
layer += cvRound( xi );
|
||||||
|
|
||||||
|
if( layer < 1 || layer > nOctaveLayers ||
|
||||||
|
c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER ||
|
||||||
|
r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER )
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ensure convergence of interpolation */
|
||||||
|
if( i >= SIFT_MAX_INTERP_STEPS )
|
||||||
|
return false;
|
||||||
|
|
||||||
|
{
|
||||||
|
int idx = octv*(nOctaveLayers+2) + layer;
|
||||||
|
const Mat& img = dog_pyr[idx];
|
||||||
|
const Mat& prev = dog_pyr[idx-1];
|
||||||
|
const Mat& next = dog_pyr[idx+1];
|
||||||
|
Matx31f dD((img.at<short>(r, c+1) - img.at<short>(r, c-1))*deriv_scale,
|
||||||
|
(img.at<short>(r+1, c) - img.at<short>(r-1, c))*deriv_scale,
|
||||||
|
(next.at<short>(r, c) - prev.at<short>(r, c))*deriv_scale);
|
||||||
|
float t = dD.dot(Matx31f(xc, xr, xi));
|
||||||
|
|
||||||
|
contr = img.at<short>(r, c)*img_scale + t * 0.5f;
|
||||||
|
if( std::abs( contr ) * nOctaveLayers < contrastThreshold )
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* principal curvatures are computed using the trace and det of Hessian */
|
||||||
|
float v2 = img.at<short>(r, c)*2;
|
||||||
|
float dxx = (img.at<short>(r, c+1) + img.at<short>(r, c-1) - v2)*second_deriv_scale;
|
||||||
|
float dyy = (img.at<short>(r+1, c) + img.at<short>(r-1, c) - v2)*second_deriv_scale;
|
||||||
|
float dxy = (img.at<short>(r+1, c+1) - img.at<short>(r+1, c-1) -
|
||||||
|
img.at<short>(r-1, c+1) + img.at<short>(r-1, c-1)) * cross_deriv_scale;
|
||||||
|
float tr = dxx + dyy;
|
||||||
|
float det = dxx * dyy - dxy * dxy;
|
||||||
|
|
||||||
|
if( det <= 0 || tr*tr*edgeThreshold >= (edgeThreshold + 1)*(edgeThreshold + 1)*det )
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
kpt.pt.x = (c + xc) * (1 << octv);
|
||||||
|
kpt.pt.y = (r + xr) * (1 << octv);
|
||||||
|
kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
|
||||||
|
kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Detects features at extrema in DoG scale space. Bad features are discarded
|
||||||
|
// based on contrast and ratio of principal curvatures.
|
||||||
|
void SIFT::findScaleSpaceExtrema( const vector<Mat>& gauss_pyr, const vector<Mat>& dog_pyr,
|
||||||
|
vector<KeyPoint>& keypoints ) const
|
||||||
|
{
|
||||||
|
int nOctaves = (int)gauss_pyr.size()/(nOctaveLayers + 3);
|
||||||
|
int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
|
||||||
|
const int n = SIFT_ORI_HIST_BINS;
|
||||||
|
float hist[n];
|
||||||
|
KeyPoint kpt;
|
||||||
|
|
||||||
|
keypoints.clear();
|
||||||
|
|
||||||
|
for( int o = 0; o < nOctaves; o++ )
|
||||||
|
for( int i = 1; i <= nOctaveLayers; i++ )
|
||||||
|
{
|
||||||
|
int idx = o*(nOctaveLayers+2)+i;
|
||||||
|
const Mat& img = dog_pyr[idx];
|
||||||
|
const Mat& prev = dog_pyr[idx-1];
|
||||||
|
const Mat& next = dog_pyr[idx+1];
|
||||||
|
int step = (int)img.step1();
|
||||||
|
int rows = img.rows, cols = img.cols;
|
||||||
|
|
||||||
|
for( int r = SIFT_IMG_BORDER; r < rows-SIFT_IMG_BORDER; r++)
|
||||||
|
{
|
||||||
|
const short* currptr = img.ptr<short>(r);
|
||||||
|
const short* prevptr = prev.ptr<short>(r);
|
||||||
|
const short* nextptr = next.ptr<short>(r);
|
||||||
|
|
||||||
|
for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++)
|
||||||
|
{
|
||||||
|
int val = currptr[c];
|
||||||
|
|
||||||
|
// find local extrema with pixel accuracy
|
||||||
|
if( std::abs(val) > threshold &&
|
||||||
|
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
|
||||||
|
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
|
||||||
|
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
|
||||||
|
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
|
||||||
|
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
|
||||||
|
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
|
||||||
|
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
|
||||||
|
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
|
||||||
|
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
|
||||||
|
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
|
||||||
|
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
|
||||||
|
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
|
||||||
|
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
|
||||||
|
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
|
||||||
|
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
|
||||||
|
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
|
||||||
|
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
|
||||||
|
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
|
||||||
|
{
|
||||||
|
int r1 = r, c1 = c, layer = i;
|
||||||
|
if( !adjustLocalExtrema(dog_pyr, kpt, o, layer, r1, c1, nOctaveLayers,
|
||||||
|
contrastThreshold, edgeThreshold, sigma) )
|
||||||
|
continue;
|
||||||
|
float scl_octv = kpt.size*0.5f/(1 << o);
|
||||||
|
float omax = calcOrientationHist(gauss_pyr[o*(nOctaveLayers+3) + layer],
|
||||||
|
Point(c1, r1),
|
||||||
|
cvRound(SIFT_ORI_RADIUS * scl_octv),
|
||||||
|
SIFT_ORI_SIG_FCTR * scl_octv,
|
||||||
|
hist, n);
|
||||||
|
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
|
||||||
|
for( int j = 0; j < n; j++ )
|
||||||
|
{
|
||||||
|
int l = j > 0 ? j - 1 : n - 1;
|
||||||
|
int r = j < n-1 ? j + 1 : 0;
|
||||||
|
|
||||||
|
if( hist[j] > hist[l] && hist[j] > hist[r] && hist[j] >= mag_thr )
|
||||||
|
{
|
||||||
|
float bin = j + 0.5f * (hist[l]-hist[r]) / (hist[l] - 2*hist[j] + hist[r]);
|
||||||
|
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
|
||||||
|
kpt.angle = (float)((360.f/n) * bin);
|
||||||
|
keypoints.push_back(kpt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float scl,
|
||||||
|
int d, int n, float* dst )
|
||||||
|
{
|
||||||
|
Point pt(cvRound(ptf.x), cvRound(ptf.y));
|
||||||
|
float cos_t = cosf(ori*(float)(CV_PI/180));
|
||||||
|
float sin_t = sinf(ori*(float)(CV_PI/180));
|
||||||
|
float bins_per_rad = n / 360.f;
|
||||||
|
float exp_scale = -1.f/(d * d * 0.5f);
|
||||||
|
float hist_width = SIFT_DESCR_SCL_FCTR * scl;
|
||||||
|
int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f);
|
||||||
|
cos_t /= hist_width;
|
||||||
|
sin_t /= hist_width;
|
||||||
|
|
||||||
|
int i, j, k, len = (radius*2+1)*(radius*2+1), histlen = (d+2)*(d+2)*(n+2);
|
||||||
|
int rows = img.rows, cols = img.cols;
|
||||||
|
|
||||||
|
AutoBuffer<float> buf(len*6 + histlen);
|
||||||
|
float *X = buf, *Y = X + len, *Mag = Y, *Ori = Mag + len, *W = Ori + len;
|
||||||
|
float *RBin = W + len, *CBin = RBin + len, *hist = CBin + len;
|
||||||
|
|
||||||
|
for( i = 0; i < d+2; i++ )
|
||||||
|
{
|
||||||
|
for( j = 0; j < d+2; j++ )
|
||||||
|
for( k = 0; k < n+2; k++ )
|
||||||
|
hist[(i*(d+2) + j)*(n+2) + k] = 0.;
|
||||||
|
}
|
||||||
|
|
||||||
|
for( i = -radius, k = 0; i <= radius; i++ )
|
||||||
|
for( j = -radius; j <= radius; j++ )
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
Calculate sample's histogram array coords rotated relative to ori.
|
||||||
|
Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
|
||||||
|
r_rot = 1.5) have full weight placed in row 1 after interpolation.
|
||||||
|
*/
|
||||||
|
float c_rot = j * cos_t - i * sin_t;
|
||||||
|
float r_rot = j * sin_t + i * cos_t;
|
||||||
|
float rbin = r_rot + d/2 - 0.5f;
|
||||||
|
float cbin = c_rot + d/2 - 0.5f;
|
||||||
|
int r = pt.y + i, c = pt.x + j;
|
||||||
|
|
||||||
|
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
|
||||||
|
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
|
||||||
|
{
|
||||||
|
float dx = img.at<short>(r, c+1) - img.at<short>(r, c-1);
|
||||||
|
float dy = img.at<short>(r-1, c) - img.at<short>(r+1, c);
|
||||||
|
X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
|
||||||
|
W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
|
||||||
|
k++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
len = k;
|
||||||
|
fastAtan2(Y, X, Ori, len, true);
|
||||||
|
magnitude(X, Y, Mag, len);
|
||||||
|
exp(W, W, len);
|
||||||
|
|
||||||
|
for( k = 0; k < len; k++ )
|
||||||
|
{
|
||||||
|
float rbin = RBin[k], cbin = CBin[k];
|
||||||
|
float obin = (Ori[k] - ori)*bins_per_rad;
|
||||||
|
float mag = Mag[k]*W[k];
|
||||||
|
|
||||||
|
int r0 = cvFloor( rbin );
|
||||||
|
int c0 = cvFloor( cbin );
|
||||||
|
int o0 = cvFloor( obin );
|
||||||
|
rbin -= r0;
|
||||||
|
cbin -= c0;
|
||||||
|
obin -= o0;
|
||||||
|
|
||||||
|
if( o0 < 0 )
|
||||||
|
o0 += n;
|
||||||
|
if( o0 >= n )
|
||||||
|
o0 -= n;
|
||||||
|
|
||||||
|
// histogram update using tri-linear interpolation
|
||||||
|
float v_r1 = mag*rbin, v_r0 = mag - v_r1;
|
||||||
|
float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
|
||||||
|
float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01;
|
||||||
|
float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111;
|
||||||
|
float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
|
||||||
|
float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
|
||||||
|
float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
|
||||||
|
|
||||||
|
int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
|
||||||
|
hist[idx] += v_rco000;
|
||||||
|
hist[idx+1] += v_rco001;
|
||||||
|
hist[idx+(n+2)] += v_rco010;
|
||||||
|
hist[idx+(n+3)] += v_rco011;
|
||||||
|
hist[idx+(d+2)*(n+2)] += v_rco100;
|
||||||
|
hist[idx+(d+2)*(n+2)+1] += v_rco101;
|
||||||
|
hist[idx+(d+3)*(n+2)] += v_rco110;
|
||||||
|
hist[idx+(d+3)*(n+2)+1] += v_rco111;
|
||||||
|
}
|
||||||
|
|
||||||
|
// finalize histogram, since the orientation histograms are circular
|
||||||
|
for( i = 0; i < d; i++ )
|
||||||
|
for( j = 0; j < d; j++ )
|
||||||
|
{
|
||||||
|
int idx = ((i+1)*(d+2) + (j+1))*(n+2);
|
||||||
|
hist[idx] += hist[idx+n];
|
||||||
|
hist[idx+1] += hist[idx+n+1];
|
||||||
|
for( k = 0; k < n; k++ )
|
||||||
|
dst[(i*d + j)*n + k] = hist[idx+k];
|
||||||
|
}
|
||||||
|
// copy histogram to the descriptor,
|
||||||
|
// apply hysteresis thresholding
|
||||||
|
// and scale the result, so that it can be easily converted
|
||||||
|
// to byte array
|
||||||
|
float nrm2 = 0;
|
||||||
|
len = d*d*n;
|
||||||
|
for( k = 0; k < len; k++ )
|
||||||
|
nrm2 += dst[k]*dst[k];
|
||||||
|
float thr = std::sqrt(nrm2)*SIFT_DESCR_MAG_THR;
|
||||||
|
for( i = 0, nrm2 = 0; i < k; i++ )
|
||||||
|
{
|
||||||
|
float val = std::min(dst[i], thr);
|
||||||
|
dst[i] = val;
|
||||||
|
nrm2 += val*val;
|
||||||
|
}
|
||||||
|
nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON);
|
||||||
|
for( k = 0; k < len; k++ )
|
||||||
|
{
|
||||||
|
dst[k] = saturate_cast<uchar>(dst[k]*nrm2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void calcDescriptors(const vector<Mat>& gpyr, const vector<KeyPoint>& keypoints,
|
||||||
|
Mat& descriptors, int nOctaveLayers )
|
||||||
|
{
|
||||||
|
int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
|
||||||
|
|
||||||
|
for( size_t i = 0; i < keypoints.size(); i++ )
|
||||||
|
{
|
||||||
|
KeyPoint kpt = keypoints[i];
|
||||||
|
int octv=kpt.octave & 255, layer=(kpt.octave >> 8) & 255;
|
||||||
|
float scale = 1.f/(1 << octv);
|
||||||
|
float size=kpt.size*scale;
|
||||||
|
Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
|
||||||
|
const Mat& img = gpyr[octv*(nOctaveLayers + 3) + layer];
|
||||||
|
|
||||||
|
calcSIFTDescriptor(img, ptf, kpt.angle, size*0.5f, d, n, descriptors.ptr<float>(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
SIFT::SIFT( int _nfeatures, int _nOctaveLayers,
|
||||||
|
double _contrastThreshold, double _edgeThreshold, double _sigma )
|
||||||
|
: nfeatures(_nfeatures), nOctaveLayers(_nOctaveLayers),
|
||||||
|
contrastThreshold(_contrastThreshold), edgeThreshold(_edgeThreshold), sigma(_sigma)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int SIFT::descriptorSize() const
|
||||||
|
{
|
||||||
|
return SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int SIFT::descriptorType() const
|
||||||
|
{
|
||||||
|
return CV_32F;
|
||||||
|
}
|
||||||
|
|
||||||
|
static Algorithm* createSIFT()
|
||||||
|
{
|
||||||
|
return new SIFT;
|
||||||
|
}
|
||||||
|
static AlgorithmInfo sift_info("Feature2D.SIFT", createSIFT);
|
||||||
|
|
||||||
|
AlgorithmInfo* SIFT::info() const
|
||||||
|
{
|
||||||
|
static volatile bool initialized = false;
|
||||||
|
if( !initialized )
|
||||||
|
{
|
||||||
|
sift_info.addParam(this, "nFeatures", nfeatures);
|
||||||
|
sift_info.addParam(this, "nOctaveLayers", nOctaveLayers);
|
||||||
|
sift_info.addParam(this, "contrastThreshold", contrastThreshold);
|
||||||
|
sift_info.addParam(this, "edgeThreshold", edgeThreshold);
|
||||||
|
sift_info.addParam(this, "sigma", sigma);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
return &sift_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SIFT::operator()(InputArray _image, InputArray _mask,
|
||||||
|
vector<KeyPoint>& keypoints) const
|
||||||
|
{
|
||||||
|
(*this)(_image, _mask, keypoints, noArray());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SIFT::operator()(InputArray _image, InputArray _mask,
|
||||||
|
vector<KeyPoint>& keypoints,
|
||||||
|
OutputArray _descriptors,
|
||||||
|
bool useProvidedKeypoints) const
|
||||||
|
{
|
||||||
|
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||||
|
|
||||||
|
if( image.empty() || image.depth() != CV_8U )
|
||||||
|
CV_Error( CV_StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
|
||||||
|
|
||||||
|
if( !mask.empty() && mask.type() != CV_8UC1 )
|
||||||
|
CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
|
||||||
|
|
||||||
|
Mat base = createInitialImage(image, false, sigma);
|
||||||
|
vector<Mat> gpyr, dogpyr;
|
||||||
|
int nOctaves = cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2);
|
||||||
|
|
||||||
|
//double t, tf = getTickFrequency();
|
||||||
|
//t = (double)getTickCount();
|
||||||
|
buildGaussianPyramid(base, gpyr, nOctaves);
|
||||||
|
buildDoGPyramid(gpyr, dogpyr);
|
||||||
|
|
||||||
|
//t = (double)getTickCount() - t;
|
||||||
|
//printf("pyramid construction time: %g\n", t*1000./tf);
|
||||||
|
|
||||||
|
if( !useProvidedKeypoints )
|
||||||
|
{
|
||||||
|
//t = (double)getTickCount();
|
||||||
|
findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
|
||||||
|
KeyPointsFilter::removeDuplicated( keypoints );
|
||||||
|
|
||||||
|
if( !mask.empty() )
|
||||||
|
KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
||||||
|
|
||||||
|
if( nfeatures > 0 )
|
||||||
|
KeyPointsFilter::retainBest(keypoints, nfeatures);
|
||||||
|
//t = (double)getTickCount() - t;
|
||||||
|
//printf("keypoint detection time: %g\n", t*1000./tf);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// filter keypoints by mask
|
||||||
|
//KeyPointsFilter::runByPixelsMask( keypoints, mask );
|
||||||
|
}
|
||||||
|
|
||||||
|
if( _descriptors.needed() )
|
||||||
|
{
|
||||||
|
//t = (double)getTickCount();
|
||||||
|
int dsize = descriptorSize();
|
||||||
|
_descriptors.create((int)keypoints.size(), dsize, CV_32F);
|
||||||
|
Mat descriptors = _descriptors.getMat();
|
||||||
|
|
||||||
|
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers);
|
||||||
|
//t = (double)getTickCount() - t;
|
||||||
|
//printf("descriptor extraction time: %g\n", t*1000./tf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void SIFT::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
|
||||||
|
{
|
||||||
|
(*this)(image, mask, keypoints, noArray());
|
||||||
|
}
|
||||||
|
|
||||||
|
void SIFT::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
|
||||||
|
{
|
||||||
|
(*this)(image, Mat(), keypoints, descriptors, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
1029
modules/nonfree/test/test_features2d.cpp
Normal file
1029
modules/nonfree/test/test_features2d.cpp
Normal file
File diff suppressed because it is too large
Load Diff
3
modules/nonfree/test/test_main.cpp
Normal file
3
modules/nonfree/test/test_main.cpp
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "test_precomp.hpp"
|
||||||
|
|
||||||
|
CV_TEST_MAIN("cv")
|
1
modules/nonfree/test/test_precomp.cpp
Normal file
1
modules/nonfree/test/test_precomp.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "test_precomp.hpp"
|
10
modules/nonfree/test/test_precomp.hpp
Normal file
10
modules/nonfree/test/test_precomp.hpp
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/ts/ts.hpp"
|
||||||
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#endif
|
@ -586,58 +586,9 @@ public:
|
|||||||
CV_PROP int nlevels;
|
CV_PROP int nlevels;
|
||||||
};
|
};
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* Planar Object Detection *
|
|
||||||
\****************************************************************************************/
|
|
||||||
|
|
||||||
class CV_EXPORTS PlanarObjectDetector
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
PlanarObjectDetector();
|
|
||||||
PlanarObjectDetector(const FileNode& node);
|
|
||||||
PlanarObjectDetector(const vector<Mat>& pyr, int _npoints=300,
|
|
||||||
int _patchSize=FernClassifier::PATCH_SIZE,
|
|
||||||
int _nstructs=FernClassifier::DEFAULT_STRUCTS,
|
|
||||||
int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
|
|
||||||
int _nviews=FernClassifier::DEFAULT_VIEWS,
|
|
||||||
const LDetector& detector=LDetector(),
|
|
||||||
const PatchGenerator& patchGenerator=PatchGenerator());
|
|
||||||
virtual ~PlanarObjectDetector();
|
|
||||||
virtual void train(const vector<Mat>& pyr, int _npoints=300,
|
|
||||||
int _patchSize=FernClassifier::PATCH_SIZE,
|
|
||||||
int _nstructs=FernClassifier::DEFAULT_STRUCTS,
|
|
||||||
int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
|
|
||||||
int _nviews=FernClassifier::DEFAULT_VIEWS,
|
|
||||||
const LDetector& detector=LDetector(),
|
|
||||||
const PatchGenerator& patchGenerator=PatchGenerator());
|
|
||||||
virtual void train(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
|
|
||||||
int _patchSize=FernClassifier::PATCH_SIZE,
|
|
||||||
int _nstructs=FernClassifier::DEFAULT_STRUCTS,
|
|
||||||
int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
|
|
||||||
int _nviews=FernClassifier::DEFAULT_VIEWS,
|
|
||||||
const LDetector& detector=LDetector(),
|
|
||||||
const PatchGenerator& patchGenerator=PatchGenerator());
|
|
||||||
Rect getModelROI() const;
|
|
||||||
vector<KeyPoint> getModelPoints() const;
|
|
||||||
const LDetector& getDetector() const;
|
|
||||||
const FernClassifier& getClassifier() const;
|
|
||||||
void setVerbose(bool verbose);
|
|
||||||
|
|
||||||
void read(const FileNode& node);
|
|
||||||
void write(FileStorage& fs, const String& name=String()) const;
|
|
||||||
bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector<Point2f>& corners) const;
|
|
||||||
bool operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
|
|
||||||
CV_OUT Mat& H, CV_OUT vector<Point2f>& corners,
|
|
||||||
CV_OUT vector<int>* pairs=0) const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
bool verbose;
|
|
||||||
Rect modelROI;
|
|
||||||
vector<KeyPoint> modelPoints;
|
|
||||||
LDetector ldetector;
|
|
||||||
FernClassifier fernClassifier;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
struct CV_EXPORTS DataMatrixCode {
|
struct CV_EXPORTS DataMatrixCode {
|
||||||
char msg[4]; //TODO std::string
|
char msg[4]; //TODO std::string
|
||||||
Mat original;
|
Mat original;
|
||||||
|
@ -46,176 +46,4 @@
|
|||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
////////////////////////////////////// Planar Object Detector ////////////////////////////////////
|
|
||||||
|
|
||||||
PlanarObjectDetector::PlanarObjectDetector()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
PlanarObjectDetector::PlanarObjectDetector(const FileNode& node)
|
|
||||||
{
|
|
||||||
read(node);
|
|
||||||
}
|
|
||||||
|
|
||||||
PlanarObjectDetector::PlanarObjectDetector(const vector<Mat>& pyr, int npoints,
|
|
||||||
int patchSize, int nstructs, int structSize,
|
|
||||||
int nviews, const LDetector& detector,
|
|
||||||
const PatchGenerator& patchGenerator)
|
|
||||||
{
|
|
||||||
train(pyr, npoints, patchSize, nstructs,
|
|
||||||
structSize, nviews, detector, patchGenerator);
|
|
||||||
}
|
|
||||||
|
|
||||||
PlanarObjectDetector::~PlanarObjectDetector()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
vector<KeyPoint> PlanarObjectDetector::getModelPoints() const
|
|
||||||
{
|
|
||||||
return modelPoints;
|
|
||||||
}
|
|
||||||
|
|
||||||
void PlanarObjectDetector::train(const vector<Mat>& pyr, int npoints,
|
|
||||||
int patchSize, int nstructs, int structSize,
|
|
||||||
int nviews, const LDetector& detector,
|
|
||||||
const PatchGenerator& patchGenerator)
|
|
||||||
{
|
|
||||||
modelROI = Rect(0, 0, pyr[0].cols, pyr[0].rows);
|
|
||||||
ldetector = detector;
|
|
||||||
ldetector.setVerbose(verbose);
|
|
||||||
ldetector.getMostStable2D(pyr[0], modelPoints, npoints, patchGenerator);
|
|
||||||
|
|
||||||
npoints = (int)modelPoints.size();
|
|
||||||
fernClassifier.setVerbose(verbose);
|
|
||||||
fernClassifier.trainFromSingleView(pyr[0], modelPoints,
|
|
||||||
patchSize, (int)modelPoints.size(), nstructs, structSize, nviews,
|
|
||||||
FernClassifier::COMPRESSION_NONE, patchGenerator);
|
|
||||||
}
|
|
||||||
|
|
||||||
void PlanarObjectDetector::train(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
|
|
||||||
int patchSize, int nstructs, int structSize,
|
|
||||||
int nviews, const LDetector& detector,
|
|
||||||
const PatchGenerator& patchGenerator)
|
|
||||||
{
|
|
||||||
modelROI = Rect(0, 0, pyr[0].cols, pyr[0].rows);
|
|
||||||
ldetector = detector;
|
|
||||||
ldetector.setVerbose(verbose);
|
|
||||||
modelPoints.resize(keypoints.size());
|
|
||||||
std::copy(keypoints.begin(), keypoints.end(), modelPoints.begin());
|
|
||||||
|
|
||||||
fernClassifier.setVerbose(verbose);
|
|
||||||
fernClassifier.trainFromSingleView(pyr[0], modelPoints,
|
|
||||||
patchSize, (int)modelPoints.size(), nstructs, structSize, nviews,
|
|
||||||
FernClassifier::COMPRESSION_NONE, patchGenerator);
|
|
||||||
}
|
|
||||||
|
|
||||||
void PlanarObjectDetector::read(const FileNode& node)
|
|
||||||
{
|
|
||||||
FileNodeIterator it = node["model-roi"].begin(), it_end;
|
|
||||||
it >> modelROI.x >> modelROI.y >> modelROI.width >> modelROI.height;
|
|
||||||
ldetector.read(node["detector"]);
|
|
||||||
fernClassifier.read(node["fern-classifier"]);
|
|
||||||
cv::read(node["model-points"], modelPoints);
|
|
||||||
CV_Assert(modelPoints.size() == (size_t)fernClassifier.getClassCount());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void PlanarObjectDetector::write(FileStorage& fs, const String& objname) const
|
|
||||||
{
|
|
||||||
WriteStructContext ws(fs, objname, CV_NODE_MAP);
|
|
||||||
|
|
||||||
{
|
|
||||||
WriteStructContext wsroi(fs, "model-roi", CV_NODE_SEQ + CV_NODE_FLOW);
|
|
||||||
cv::write(fs, modelROI.x);
|
|
||||||
cv::write(fs, modelROI.y);
|
|
||||||
cv::write(fs, modelROI.width);
|
|
||||||
cv::write(fs, modelROI.height);
|
|
||||||
}
|
|
||||||
ldetector.write(fs, "detector");
|
|
||||||
cv::write(fs, "model-points", modelPoints);
|
|
||||||
fernClassifier.write(fs, "fern-classifier");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool PlanarObjectDetector::operator()(const Mat& image, Mat& H, vector<Point2f>& corners) const
|
|
||||||
{
|
|
||||||
vector<Mat> pyr;
|
|
||||||
buildPyramid(image, pyr, ldetector.nOctaves - 1);
|
|
||||||
vector<KeyPoint> keypoints;
|
|
||||||
ldetector(pyr, keypoints);
|
|
||||||
|
|
||||||
return (*this)(pyr, keypoints, H, corners);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool PlanarObjectDetector::operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
|
|
||||||
Mat& matH, vector<Point2f>& corners, vector<int>* pairs) const
|
|
||||||
{
|
|
||||||
int i, j, m = (int)modelPoints.size(), n = (int)keypoints.size();
|
|
||||||
vector<int> bestMatches(m, -1);
|
|
||||||
vector<float> maxLogProb(m, -FLT_MAX);
|
|
||||||
vector<float> signature;
|
|
||||||
vector<Point2f> fromPt, toPt;
|
|
||||||
|
|
||||||
for( i = 0; i < n; i++ )
|
|
||||||
{
|
|
||||||
KeyPoint kpt = keypoints[i];
|
|
||||||
CV_Assert(0 <= kpt.octave && kpt.octave < (int)pyr.size());
|
|
||||||
kpt.pt.x /= (float)(1 << kpt.octave);
|
|
||||||
kpt.pt.y /= (float)(1 << kpt.octave);
|
|
||||||
int k = fernClassifier(pyr[kpt.octave], kpt.pt, signature);
|
|
||||||
if( k >= 0 && (bestMatches[k] < 0 || signature[k] > maxLogProb[k]) )
|
|
||||||
{
|
|
||||||
maxLogProb[k] = signature[k];
|
|
||||||
bestMatches[k] = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if(pairs)
|
|
||||||
pairs->resize(0);
|
|
||||||
|
|
||||||
for( i = 0; i < m; i++ )
|
|
||||||
if( bestMatches[i] >= 0 )
|
|
||||||
{
|
|
||||||
fromPt.push_back(modelPoints[i].pt);
|
|
||||||
toPt.push_back(keypoints[bestMatches[i]].pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
if( fromPt.size() < 4 )
|
|
||||||
return false;
|
|
||||||
|
|
||||||
vector<uchar> mask;
|
|
||||||
matH = findHomography(fromPt, toPt, RANSAC, 10, mask);
|
|
||||||
if( matH.data )
|
|
||||||
{
|
|
||||||
const Mat_<double>& H = matH;
|
|
||||||
corners.resize(4);
|
|
||||||
for( i = 0; i < 4; i++ )
|
|
||||||
{
|
|
||||||
Point2f pt((float)(modelROI.x + (i == 0 || i == 3 ? 0 : modelROI.width)),
|
|
||||||
(float)(modelROI.y + (i <= 1 ? 0 : modelROI.height)));
|
|
||||||
double w = 1./(H(2,0)*pt.x + H(2,1)*pt.y + H(2,2));
|
|
||||||
corners[i] = Point2f((float)((H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w),
|
|
||||||
(float)((H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if( pairs )
|
|
||||||
{
|
|
||||||
for( i = j = 0; i < m; i++ )
|
|
||||||
if( bestMatches[i] >= 0 && mask[j++] )
|
|
||||||
{
|
|
||||||
pairs->push_back(i);
|
|
||||||
pairs->push_back(bestMatches[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return matH.data != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void PlanarObjectDetector::setVerbose(bool _verbose)
|
|
||||||
{
|
|
||||||
verbose = _verbose;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
2
modules/photo/CMakeLists.txt
Normal file
2
modules/photo/CMakeLists.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
set(the_description "Computational Photography")
|
||||||
|
ocv_define_module(photo opencv_imgproc)
|
33
modules/photo/doc/inpainting.rst
Normal file
33
modules/photo/doc/inpainting.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
Inpainting
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. highlight:: cpp
|
||||||
|
|
||||||
|
inpaint
|
||||||
|
-----------
|
||||||
|
Restores the selected region in an image using the region neighborhood.
|
||||||
|
|
||||||
|
.. ocv:function:: void inpaint( InputArray src, InputArray inpaintMask, OutputArray dst, double inpaintRadius, int flags )
|
||||||
|
|
||||||
|
.. ocv:pyfunction:: cv2.inpaint(src, inpaintMask, inpaintRange, flags[, dst]) -> dst
|
||||||
|
|
||||||
|
.. ocv:cfunction:: void cvInpaint( const CvArr* src, const CvArr* mask, CvArr* dst, double inpaintRadius, int flags)
|
||||||
|
.. ocv:pyoldfunction:: cv.Inpaint(src, mask, dst, inpaintRadius, flags) -> None
|
||||||
|
|
||||||
|
:param src: Input 8-bit 1-channel or 3-channel image.
|
||||||
|
|
||||||
|
:param inpaintMask: Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted.
|
||||||
|
|
||||||
|
:param dst: Output image with the same size and type as ``src`` .
|
||||||
|
|
||||||
|
:param inpaintRadius: Radius of a circlular neighborhood of each point inpainted that is considered by the algorithm.
|
||||||
|
|
||||||
|
:param flags: Inpainting method that could be one of the following:
|
||||||
|
|
||||||
|
* **INPAINT_NS** Navier-Stokes based method.
|
||||||
|
|
||||||
|
* **INPAINT_TELEA** Method by Alexandru Telea [Telea04]_.
|
||||||
|
|
||||||
|
The function reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video. See
|
||||||
|
http://en.wikipedia.org/wiki/Inpainting
|
||||||
|
for more details.
|
10
modules/photo/doc/photo.rst
Normal file
10
modules/photo/doc/photo.rst
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
********************************
|
||||||
|
photo. Computational Photography
|
||||||
|
********************************
|
||||||
|
|
||||||
|
.. highlight:: cpp
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
inpainting
|
72
modules/photo/include/opencv2/photo/photo.hpp
Normal file
72
modules/photo/include/opencv2/photo/photo.hpp
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_PHOTO_HPP__
|
||||||
|
#define __OPENCV_PHOTO_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/core/core.hpp"
|
||||||
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
|
||||||
|
/*! \namespace cv
|
||||||
|
Namespace where all the C++ OpenCV functionality resides
|
||||||
|
*/
|
||||||
|
namespace cv
|
||||||
|
{
|
||||||
|
|
||||||
|
//! the inpainting algorithm
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm
|
||||||
|
INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm
|
||||||
|
};
|
||||||
|
|
||||||
|
//! restores the damaged image areas using one of the available intpainting algorithms
|
||||||
|
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
|
||||||
|
OutputArray dst, double inpaintRange, int flags );
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -1,38 +1,38 @@
|
|||||||
#include "perf_precomp.hpp"
|
#include "perf_precomp.hpp"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace perf;
|
using namespace perf;
|
||||||
using std::tr1::make_tuple;
|
using std::tr1::make_tuple;
|
||||||
using std::tr1::get;
|
using std::tr1::get;
|
||||||
|
|
||||||
CV_ENUM(InpaintingMethod, INPAINT_NS, INPAINT_TELEA)
|
CV_ENUM(InpaintingMethod, INPAINT_NS, INPAINT_TELEA)
|
||||||
typedef std::tr1::tuple<Size, InpaintingMethod> InpaintArea_InpaintingMethod_t;
|
typedef std::tr1::tuple<Size, InpaintingMethod> InpaintArea_InpaintingMethod_t;
|
||||||
typedef perf::TestBaseWithParam<InpaintArea_InpaintingMethod_t> InpaintArea_InpaintingMethod;
|
typedef perf::TestBaseWithParam<InpaintArea_InpaintingMethod_t> InpaintArea_InpaintingMethod;
|
||||||
|
|
||||||
|
|
||||||
PERF_TEST_P(InpaintArea_InpaintingMethod, inpaint,
|
PERF_TEST_P(InpaintArea_InpaintingMethod, inpaint,
|
||||||
testing::Combine(
|
testing::Combine(
|
||||||
SZ_ALL_SMALL,
|
SZ_ALL_SMALL,
|
||||||
testing::ValuesIn(InpaintingMethod::all())
|
testing::ValuesIn(InpaintingMethod::all())
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
Mat src = imread(getDataPath("gpu/hog/road.png"));
|
Mat src = imread(getDataPath("gpu/hog/road.png"));
|
||||||
|
|
||||||
Size sz = get<0>(GetParam());
|
Size sz = get<0>(GetParam());
|
||||||
int inpaintingMethod = get<1>(GetParam());
|
int inpaintingMethod = get<1>(GetParam());
|
||||||
|
|
||||||
Mat mask(src.size(), CV_8UC1, Scalar(0));
|
Mat mask(src.size(), CV_8UC1, Scalar(0));
|
||||||
Mat result(src.size(), src.type());
|
Mat result(src.size(), src.type());
|
||||||
|
|
||||||
Rect inpaintArea(src.cols/3, src.rows/3, sz.width, sz.height);
|
Rect inpaintArea(src.cols/3, src.rows/3, sz.width, sz.height);
|
||||||
mask(inpaintArea).setTo(255);
|
mask(inpaintArea).setTo(255);
|
||||||
|
|
||||||
declare.in(src, mask).out(result).time(30);
|
declare.in(src, mask).out(result).time(30);
|
||||||
|
|
||||||
TEST_CYCLE() inpaint(src, mask, result, 10.0, inpaintingMethod);
|
TEST_CYCLE() inpaint(src, mask, result, 10.0, inpaintingMethod);
|
||||||
|
|
||||||
Mat inpaintedArea = result(inpaintArea);
|
Mat inpaintedArea = result(inpaintArea);
|
||||||
SANITY_CHECK(inpaintedArea);
|
SANITY_CHECK(inpaintedArea);
|
||||||
}
|
}
|
3
modules/photo/perf/perf_main.cpp
Normal file
3
modules/photo/perf/perf_main.cpp
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "perf_precomp.hpp"
|
||||||
|
|
||||||
|
CV_PERF_TEST_MAIN(photo)
|
1
modules/photo/perf/perf_precomp.cpp
Normal file
1
modules/photo/perf/perf_precomp.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "perf_precomp.hpp"
|
12
modules/photo/perf/perf_precomp.hpp
Normal file
12
modules/photo/perf/perf_precomp.hpp
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||||
|
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/ts/ts.hpp"
|
||||||
|
#include "opencv2/photo/photo.hpp"
|
||||||
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
|
|
||||||
|
#if GTEST_CREATE_SHARED_LIBRARY
|
||||||
|
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -46,6 +46,7 @@
|
|||||||
// */
|
// */
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
|
||||||
#undef CV_MAT_ELEM_PTR_FAST
|
#undef CV_MAT_ELEM_PTR_FAST
|
||||||
#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \
|
#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \
|
||||||
@ -383,8 +384,7 @@ icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQu
|
|||||||
}
|
}
|
||||||
sat = (float)((Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f)+0.5f));
|
sat = (float)((Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f)+0.5f));
|
||||||
{
|
{
|
||||||
int isat = cvRound(sat);
|
CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = cv::saturate_cast<uchar>(sat);
|
||||||
CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = CV_CAST_8U(isat);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -496,8 +496,7 @@ icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQu
|
|||||||
}
|
}
|
||||||
sat = (float)((Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f)+0.5f));
|
sat = (float)((Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f)+0.5f));
|
||||||
{
|
{
|
||||||
int isat = cvRound(sat);
|
CV_MAT_ELEM(*out,uchar,i-1,j-1) = cv::saturate_cast<uchar>(sat);
|
||||||
CV_MAT_ELEM(*out,uchar,i-1,j-1) = CV_CAST_8U(isat);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -594,10 +593,7 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = cv::saturate_cast<uchar>((double)Ia/s);
|
||||||
int out_val = cvRound((double)Ia/s);
|
|
||||||
CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = CV_CAST_8U(out_val);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
|
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
|
||||||
@ -685,10 +681,7 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
CV_MAT_ELEM(*out,uchar,i-1,j-1) = cv::saturate_cast<uchar>((double)Ia/s);
|
||||||
int out_val = cvRound((double)Ia/s);
|
|
||||||
CV_MAT_ELEM(*out,uchar,i-1,j-1) = CV_CAST_8U(out_val);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
|
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
|
||||||
@ -724,7 +717,7 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
CV_IMPL void
|
void
|
||||||
cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_img,
|
cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_img,
|
||||||
double inpaintRange, int flags )
|
double inpaintRange, int flags )
|
||||||
{
|
{
|
44
modules/photo/src/precomp.cpp
Normal file
44
modules/photo/src/precomp.cpp
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Intel License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#include "precomp.hpp"
|
||||||
|
|
||||||
|
/* End of file. */
|
56
modules/photo/src/precomp.hpp
Normal file
56
modules/photo/src/precomp.hpp
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_PRECOMP_H__
|
||||||
|
#define __OPENCV_PRECOMP_H__
|
||||||
|
|
||||||
|
#if _MSC_VER >= 1200
|
||||||
|
#pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_CVCONFIG_H
|
||||||
|
#include "cvconfig.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "opencv2/photo/photo.hpp"
|
||||||
|
|
||||||
|
#endif
|
3
modules/photo/test/test_main.cpp
Normal file
3
modules/photo/test/test_main.cpp
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "test_precomp.hpp"
|
||||||
|
|
||||||
|
CV_TEST_MAIN("cv")
|
1
modules/photo/test/test_precomp.cpp
Normal file
1
modules/photo/test/test_precomp.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "test_precomp.hpp"
|
9
modules/photo/test/test_precomp.hpp
Normal file
9
modules/photo/test/test_precomp.hpp
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/ts/ts.hpp"
|
||||||
|
#include "opencv2/photo/photo.hpp"
|
||||||
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#endif
|
@ -10,7 +10,7 @@ if(ANDROID OR IOS OR NOT PYTHONLIBS_FOUND OR NOT PYTHON_USE_NUMPY)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(the_description "The python bindings")
|
set(the_description "The python bindings")
|
||||||
ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
|
ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_photo opencv_nonfree opencv_objdetect opencv_legacy opencv_contrib)
|
||||||
|
|
||||||
ocv_include_directories(${PYTHON_INCLUDE_PATH})
|
ocv_include_directories(${PYTHON_INCLUDE_PATH})
|
||||||
ocv_include_directories(
|
ocv_include_directories(
|
||||||
@ -19,12 +19,14 @@ ocv_include_directories(
|
|||||||
"${OpenCV_SOURCE_DIR}/modules/flann/include"
|
"${OpenCV_SOURCE_DIR}/modules/flann/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/imgproc/include"
|
"${OpenCV_SOURCE_DIR}/modules/imgproc/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/video/include"
|
"${OpenCV_SOURCE_DIR}/modules/video/include"
|
||||||
|
"${OpenCV_SOURCE_DIR}/modules/photo/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/highgui/include"
|
"${OpenCV_SOURCE_DIR}/modules/highgui/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/ml/include"
|
"${OpenCV_SOURCE_DIR}/modules/ml/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/features2d/include"
|
"${OpenCV_SOURCE_DIR}/modules/features2d/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/flann/include"
|
"${OpenCV_SOURCE_DIR}/modules/flann/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/calib3d/include"
|
"${OpenCV_SOURCE_DIR}/modules/calib3d/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/objdetect/include"
|
"${OpenCV_SOURCE_DIR}/modules/objdetect/include"
|
||||||
|
"${OpenCV_SOURCE_DIR}/modules/nonfree/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/legacy/include"
|
"${OpenCV_SOURCE_DIR}/modules/legacy/include"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/contrib/include"
|
"${OpenCV_SOURCE_DIR}/modules/contrib/include"
|
||||||
)
|
)
|
||||||
@ -36,9 +38,11 @@ set(opencv_hdrs "${OpenCV_SOURCE_DIR}/modules/core/include/opencv2/core/core.hpp
|
|||||||
"${OpenCV_SOURCE_DIR}/modules/imgproc/include/opencv2/imgproc/imgproc.hpp"
|
"${OpenCV_SOURCE_DIR}/modules/imgproc/include/opencv2/imgproc/imgproc.hpp"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/video/include/opencv2/video/background_segm.hpp"
|
"${OpenCV_SOURCE_DIR}/modules/video/include/opencv2/video/background_segm.hpp"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/video/include/opencv2/video/tracking.hpp"
|
"${OpenCV_SOURCE_DIR}/modules/video/include/opencv2/video/tracking.hpp"
|
||||||
|
"${OpenCV_SOURCE_DIR}/modules/photo/include/opencv2/photo/photo.hpp"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/highgui/include/opencv2/highgui/highgui.hpp"
|
"${OpenCV_SOURCE_DIR}/modules/highgui/include/opencv2/highgui/highgui.hpp"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/ml/include/opencv2/ml/ml.hpp"
|
"${OpenCV_SOURCE_DIR}/modules/ml/include/opencv2/ml/ml.hpp"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/features2d/include/opencv2/features2d/features2d.hpp"
|
"${OpenCV_SOURCE_DIR}/modules/features2d/include/opencv2/features2d/features2d.hpp"
|
||||||
|
"${OpenCV_SOURCE_DIR}/modules/nonfree/include/opencv2/nonfree/features2d.hpp"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/calib3d/include/opencv2/calib3d/calib3d.hpp"
|
"${OpenCV_SOURCE_DIR}/modules/calib3d/include/opencv2/calib3d/calib3d.hpp"
|
||||||
"${OpenCV_SOURCE_DIR}/modules/objdetect/include/opencv2/objdetect/objdetect.hpp")
|
"${OpenCV_SOURCE_DIR}/modules/objdetect/include/opencv2/objdetect/objdetect.hpp")
|
||||||
|
|
||||||
@ -71,7 +75,7 @@ add_custom_command(
|
|||||||
|
|
||||||
set(cv2_target "opencv_python")
|
set(cv2_target "opencv_python")
|
||||||
add_library(${cv2_target} SHARED src2/cv2.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i ${cv2_generated_hdrs} src2/cv2.cv.hpp)
|
add_library(${cv2_target} SHARED src2/cv2.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i ${cv2_generated_hdrs} src2/cv2.cv.hpp)
|
||||||
target_link_libraries(${cv2_target} ${PYTHON_LIBRARIES} opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
|
target_link_libraries(${cv2_target} ${PYTHON_LIBRARIES} opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib opencv_photo)
|
||||||
|
|
||||||
set_target_properties(${cv2_target} PROPERTIES PREFIX "")
|
set_target_properties(${cv2_target} PROPERTIES PREFIX "")
|
||||||
set_target_properties(${cv2_target} PROPERTIES OUTPUT_NAME "cv2")
|
set_target_properties(${cv2_target} PROPERTIES OUTPUT_NAME "cv2")
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
#include "opencv2/objdetect/objdetect.hpp"
|
#include "opencv2/objdetect/objdetect.hpp"
|
||||||
#include "opencv2/video/tracking.hpp"
|
#include "opencv2/video/tracking.hpp"
|
||||||
#include "opencv2/video/background_segm.hpp"
|
#include "opencv2/video/background_segm.hpp"
|
||||||
|
#include "opencv2/photo/photo.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
|
|
||||||
using cv::flann::IndexParams;
|
using cv::flann::IndexParams;
|
||||||
|
@ -16,5 +16,7 @@ OpenCV API Reference
|
|||||||
ml/doc/ml.rst
|
ml/doc/ml.rst
|
||||||
flann/doc/flann.rst
|
flann/doc/flann.rst
|
||||||
gpu/doc/gpu.rst
|
gpu/doc/gpu.rst
|
||||||
|
photo/doc/photo.rst
|
||||||
stitching/doc/stitching.rst
|
stitching/doc/stitching.rst
|
||||||
|
nonfree/doc/nonfree.rst
|
||||||
|
|
||||||
|
@ -87,13 +87,13 @@ private:
|
|||||||
|
|
||||||
Ptr<FeatureDetector> detector_;
|
Ptr<FeatureDetector> detector_;
|
||||||
Ptr<DescriptorExtractor> extractor_;
|
Ptr<DescriptorExtractor> extractor_;
|
||||||
Ptr<SURF> surf;
|
Ptr<Feature2D> surf;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder
|
class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
OrbFeaturesFinder(Size _grid_size = Size(3,1), size_t n_features = 1500, const ORB::CommonParams & detector_params = ORB::CommonParams(1.3f, 5));
|
OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void find(const Mat &image, ImageFeatures &features);
|
void find(const Mat &image, ImageFeatures &features);
|
||||||
|
@ -315,12 +315,27 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int
|
|||||||
{
|
{
|
||||||
if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
|
if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
|
||||||
{
|
{
|
||||||
surf = new SURF(hess_thresh, num_octaves, num_layers);
|
surf = Algorithm::create<Feature2D>("Feature2D.SURF");
|
||||||
|
if( surf.empty() )
|
||||||
|
CV_Error( CV_StsNotImplemented, "OpenCV was built without SURF support" );
|
||||||
|
surf->set("hessianThreshold", hess_thresh);
|
||||||
|
surf->set("nOctaves", num_octaves);
|
||||||
|
surf->set("nOctaveLayers", num_layers);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
detector_ = new SurfFeatureDetector(hess_thresh, num_octaves, num_layers);
|
detector_ = Algorithm::create<FeatureDetector>("Feature2D.SURF");
|
||||||
extractor_ = new SurfDescriptorExtractor(num_octaves_descr, num_layers_descr);
|
extractor_ = Algorithm::create<DescriptorExtractor>("Feature2D.SURF");
|
||||||
|
|
||||||
|
if( detector_.empty() || extractor_.empty() )
|
||||||
|
CV_Error( CV_StsNotImplemented, "OpenCV was built without SURF support" );
|
||||||
|
|
||||||
|
detector_->set("hessianThreshold", hess_thresh);
|
||||||
|
detector_->set("nOctaves", num_octaves);
|
||||||
|
detector_->set("nOctaveLayers", num_layers);
|
||||||
|
|
||||||
|
extractor_->set("nOctaves", num_octaves_descr);
|
||||||
|
extractor_->set("nOctaveLayers", num_layers_descr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,10 +357,10 @@ void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, size_t n_features, const ORB::CommonParams & detector_params)
|
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
|
||||||
{
|
{
|
||||||
grid_size = _grid_size;
|
grid_size = _grid_size;
|
||||||
orb = new ORB(n_features * (99 + grid_size.area())/100/grid_size.area(), detector_params);
|
orb = new ORB(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
|
||||||
}
|
}
|
||||||
|
|
||||||
void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
|
void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
|
||||||
|
@ -416,7 +416,7 @@ public:
|
|||||||
//! the default constructor
|
//! the default constructor
|
||||||
BackgroundSubtractorMOG2();
|
BackgroundSubtractorMOG2();
|
||||||
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
|
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
|
||||||
BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=1);
|
BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=true);
|
||||||
//! the destructor
|
//! the destructor
|
||||||
virtual ~BackgroundSubtractorMOG2();
|
virtual ~BackgroundSubtractorMOG2();
|
||||||
//! the update operator
|
//! the update operator
|
||||||
|
@ -3,8 +3,9 @@
|
|||||||
#
|
#
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
|
|
||||||
SET(OPENCV_C_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui opencv_ml opencv_video opencv_objdetect
|
SET(OPENCV_C_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
|
||||||
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib)
|
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree
|
||||||
|
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib)
|
||||||
|
|
||||||
ocv_check_dependencies(${OPENCV_C_SAMPLES_REQUIRED_DEPS})
|
ocv_check_dependencies(${OPENCV_C_SAMPLES_REQUIRED_DEPS})
|
||||||
|
|
||||||
|
@ -8,7 +8,10 @@
|
|||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
#include "opencv2/calib3d/calib3d.hpp"
|
#include "opencv2/calib3d/calib3d.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/legacy/legacy.hpp"
|
||||||
|
#include "opencv2/legacy/compat.hpp"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -214,6 +217,7 @@ int main(int argc, char** argv)
|
|||||||
const char* object_filename = argc == 3 ? argv[1] : "box.png";
|
const char* object_filename = argc == 3 ? argv[1] : "box.png";
|
||||||
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
|
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
|
||||||
|
|
||||||
|
cv::initModule_nonfree();
|
||||||
help();
|
help();
|
||||||
|
|
||||||
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
#include "opencv2/core/core.hpp"
|
#include "opencv2/core/core.hpp"
|
||||||
#include "opencv2/imgproc/imgproc.hpp"
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
|
#include "opencv2/legacy/legacy.hpp"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
@ -123,7 +125,7 @@ void testCalonderClassifier( const string& classifierFilename, const string& img
|
|||||||
Mat descriptors2; de.compute( img2, keypoints2, descriptors2 );
|
Mat descriptors2; de.compute( img2, keypoints2, descriptors2 );
|
||||||
|
|
||||||
// Match descriptors
|
// Match descriptors
|
||||||
BruteForceMatcher<L1<float> > matcher;
|
BFMatcher matcher(NORM_L1);
|
||||||
vector<DMatch> matches;
|
vector<DMatch> matches;
|
||||||
matcher.match( descriptors1, descriptors2, matches );
|
matcher.match( descriptors1, descriptors2, matches );
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include "opencv2/imgproc/imgproc.hpp"
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
#include "opencv2/objdetect/objdetect.hpp"
|
#include "opencv2/objdetect/objdetect.hpp"
|
||||||
|
#include "opencv2/legacy/legacy.hpp"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
@ -1,134 +1,86 @@
|
|||||||
/* This sample code was originally provided by Liu Liu
|
/* This sample code was originally provided by Liu Liu
|
||||||
* Copyright<EFBFBD> 2009, Liu Liu All rights reserved.
|
* Copyright (C) 2009, Liu Liu All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
|
using namespace cv;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
void help()
|
void help()
|
||||||
{
|
{
|
||||||
printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
|
cout << "\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
|
||||||
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
|
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
|
||||||
"\nCall:\n"
|
"\nCall:\n"
|
||||||
"./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n");
|
"./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
static CvScalar colors[] =
|
static const Vec3b bcolors[] =
|
||||||
{
|
{
|
||||||
{{0,0,255}},
|
Vec3b(0,0,255),
|
||||||
{{0,128,255}},
|
Vec3b(0,128,255),
|
||||||
{{0,255,255}},
|
Vec3b(0,255,255),
|
||||||
{{0,255,0}},
|
Vec3b(0,255,0),
|
||||||
{{255,128,0}},
|
Vec3b(255,128,0),
|
||||||
{{255,255,0}},
|
Vec3b(255,255,0),
|
||||||
{{255,0,0}},
|
Vec3b(255,0,0),
|
||||||
{{255,0,255}},
|
Vec3b(255,0,255),
|
||||||
{{255,255,255}},
|
Vec3b(255,255,255)
|
||||||
{{196,255,255}},
|
|
||||||
{{255,255,196}}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static uchar bcolors[][3] =
|
|
||||||
{
|
|
||||||
{0,0,255},
|
|
||||||
{0,128,255},
|
|
||||||
{0,255,255},
|
|
||||||
{0,255,0},
|
|
||||||
{255,128,0},
|
|
||||||
{255,255,0},
|
|
||||||
{255,0,0},
|
|
||||||
{255,0,255},
|
|
||||||
{255,255,255}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
int main( int argc, char** argv )
|
int main( int argc, char** argv )
|
||||||
{
|
{
|
||||||
char path[1024];
|
string path;
|
||||||
IplImage* img;
|
Mat img0, img, yuv, gray, ellipses;
|
||||||
help();
|
help();
|
||||||
if (argc!=2)
|
|
||||||
|
img0 = imread( argc != 2 ? "puzzle.png" : argv[1], 1 );
|
||||||
|
if( img0.empty() )
|
||||||
|
{
|
||||||
|
if( argc != 2 )
|
||||||
|
cout << "\nUsage: mser_sample <path_to_image>\n";
|
||||||
|
else
|
||||||
|
cout << "Unable to load image " << argv[1] << endl;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
cvtColor(img0, yuv, COLOR_BGR2YCrCb);
|
||||||
|
cvtColor(img0, gray, COLOR_BGR2GRAY);
|
||||||
|
cvtColor(gray, img, COLOR_GRAY2BGR);
|
||||||
|
img.copyTo(ellipses);
|
||||||
|
|
||||||
|
vector<vector<Point> > contours;
|
||||||
|
double t = (double)getTickCount();
|
||||||
|
MSER()(yuv, contours);
|
||||||
|
t = (double)getTickCount() - t;
|
||||||
|
printf( "MSER extracted %d contours in %g ms.\n", (int)contours.size(),
|
||||||
|
t*1000./getTickFrequency() );
|
||||||
|
|
||||||
|
// draw mser's with different colors
|
||||||
|
for( int i = (int)contours.size()-1; i >= 0; i-- )
|
||||||
{
|
{
|
||||||
strcpy(path,"puzzle.png");
|
const vector<Point>& r = contours[i];
|
||||||
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
|
for ( int j = 0; j < (int)r.size(); j++ )
|
||||||
if (!img)
|
|
||||||
{
|
{
|
||||||
printf("\nUsage: mser_sample <path_to_image>\n");
|
Point pt = r[j];
|
||||||
return 0;
|
img.at<Vec3b>(r[j]) = bcolors[i%9];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// find ellipse (it seems cvfitellipse2 have error or sth?)
|
||||||
|
RotatedRect box = fitEllipse( r );
|
||||||
|
|
||||||
|
box.angle=(float)CV_PI/2-box.angle;
|
||||||
|
ellipse( ellipses, box, Scalar(196,255,255), 2 );
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
imshow( "original", img0 );
|
||||||
strcpy(path,argv[1]);
|
imshow( "response", img );
|
||||||
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
|
imshow( "ellipses", ellipses );
|
||||||
}
|
|
||||||
|
waitKey(0);
|
||||||
if (!img)
|
|
||||||
{
|
|
||||||
printf("Unable to load image %s\n",path);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
|
|
||||||
IplImage* ellipses = cvCloneImage(rsp);
|
|
||||||
cvCvtColor(img,ellipses,CV_GRAY2BGR);
|
|
||||||
CvSeq* contours;
|
|
||||||
CvMemStorage* storage= cvCreateMemStorage();
|
|
||||||
IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
|
|
||||||
cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
|
|
||||||
CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
|
|
||||||
|
|
||||||
double t = (double)cvGetTickCount();
|
|
||||||
cvExtractMSER( hsv, NULL, &contours, storage, params );
|
|
||||||
t = cvGetTickCount() - t;
|
|
||||||
printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
|
|
||||||
uchar* rsptr = (uchar*)rsp->imageData;
|
|
||||||
// draw mser with different color
|
|
||||||
for ( int i = contours->total-1; i >= 0; i-- )
|
|
||||||
{
|
|
||||||
CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
|
|
||||||
for ( int j = 0; j < r->total; j++ )
|
|
||||||
{
|
|
||||||
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
|
|
||||||
rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
|
|
||||||
rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
|
|
||||||
rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// find ellipse ( it seems cvfitellipse2 have error or sth?
|
|
||||||
for ( int i = 0; i < contours->total; i++ )
|
|
||||||
{
|
|
||||||
CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
|
|
||||||
CvBox2D box = cvFitEllipse2( r );
|
|
||||||
box.angle=(float)CV_PI/2-box.angle;
|
|
||||||
|
|
||||||
if ( r->color > 0 )
|
|
||||||
cvEllipseBox( ellipses, box, colors[9], 2 );
|
|
||||||
else
|
|
||||||
cvEllipseBox( ellipses, box, colors[2], 2 );
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
cvSaveImage( "rsp.png", rsp );
|
|
||||||
|
|
||||||
cvNamedWindow( "original", 0 );
|
|
||||||
cvShowImage( "original", img );
|
|
||||||
|
|
||||||
cvNamedWindow( "response", 0 );
|
|
||||||
cvShowImage( "response", rsp );
|
|
||||||
|
|
||||||
cvNamedWindow( "ellipses", 0 );
|
|
||||||
cvShowImage( "ellipses", ellipses );
|
|
||||||
|
|
||||||
cvWaitKey(0);
|
|
||||||
|
|
||||||
cvDestroyWindow( "original" );
|
|
||||||
cvDestroyWindow( "response" );
|
|
||||||
cvDestroyWindow( "ellipses" );
|
|
||||||
cvReleaseImage(&rsp);
|
|
||||||
cvReleaseImage(&img);
|
|
||||||
cvReleaseImage(&ellipses);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,9 @@
|
|||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
|
#include "opencv2/legacy/legacy.hpp"
|
||||||
|
#include "opencv2/legacy/compat.hpp"
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
@ -25,8 +28,8 @@ void help()
|
|||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
|
|
||||||
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
|
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
|
||||||
const vector<KeyPoint>& features2, const vector<int>& desc_idx);
|
const vector<KeyPoint>& features2, const vector<int>& desc_idx);
|
||||||
|
|
||||||
int main(int argc, char** argv)
|
int main(int argc, char** argv)
|
||||||
{
|
{
|
||||||
@ -45,8 +48,8 @@ int main(int argc, char** argv)
|
|||||||
std::string img2_name = path_name + "/" + std::string(argv[3]);
|
std::string img2_name = path_name + "/" + std::string(argv[3]);
|
||||||
|
|
||||||
printf("Reading the images...\n");
|
printf("Reading the images...\n");
|
||||||
IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
|
Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||||
IplImage* img2 = cvLoadImage(img2_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
|
Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||||
|
|
||||||
// extract keypoints from the first image
|
// extract keypoints from the first image
|
||||||
SURF surf_extractor(5.0e3);
|
SURF surf_extractor(5.0e3);
|
||||||
@ -61,7 +64,9 @@ int main(int argc, char** argv)
|
|||||||
// create descriptors
|
// create descriptors
|
||||||
OneWayDescriptorBase descriptors(patch_size, pose_count, OneWayDescriptorBase::GetPCAFilename(), path_name,
|
OneWayDescriptorBase descriptors(patch_size, pose_count, OneWayDescriptorBase::GetPCAFilename(), path_name,
|
||||||
images_list);
|
images_list);
|
||||||
descriptors.CreateDescriptorsFromImage(img1, keypoints1);
|
IplImage img1_c = img1;
|
||||||
|
IplImage img2_c = img2;
|
||||||
|
descriptors.CreateDescriptorsFromImage(&img1_c, keypoints1);
|
||||||
printf("done\n");
|
printf("done\n");
|
||||||
|
|
||||||
// extract keypoints from the second image
|
// extract keypoints from the second image
|
||||||
@ -77,43 +82,37 @@ int main(int argc, char** argv)
|
|||||||
{
|
{
|
||||||
int pose_idx = 0;
|
int pose_idx = 0;
|
||||||
float distance = 0;
|
float distance = 0;
|
||||||
descriptors.FindDescriptor(img2, keypoints2[i].pt, desc_idx[i], pose_idx, distance);
|
descriptors.FindDescriptor(&img2_c, keypoints2[i].pt, desc_idx[i], pose_idx, distance);
|
||||||
}
|
}
|
||||||
printf("done\n");
|
printf("done\n");
|
||||||
|
|
||||||
IplImage* img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, desc_idx);
|
Mat img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, desc_idx);
|
||||||
|
|
||||||
cvNamedWindow("correspondences", 1);
|
imshow("correspondences", img_corr);
|
||||||
cvShowImage("correspondences", img_corr);
|
waitKey(0);
|
||||||
cvWaitKey(0);
|
|
||||||
|
|
||||||
cvReleaseImage(&img1);
|
|
||||||
cvReleaseImage(&img2);
|
|
||||||
cvReleaseImage(&img_corr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
|
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
|
||||||
const vector<KeyPoint>& features2, const vector<int>& desc_idx)
|
const vector<KeyPoint>& features2, const vector<int>& desc_idx)
|
||||||
{
|
{
|
||||||
IplImage* img_corr = cvCreateImage(cvSize(img1->width + img2->width, MAX(img1->height, img2->height)),
|
Mat part, img_corr(Size(img1.cols + img2.cols, MAX(img1.rows, img2.rows)), CV_8UC3);
|
||||||
IPL_DEPTH_8U, 3);
|
img_corr = Scalar::all(0);
|
||||||
cvSetImageROI(img_corr, cvRect(0, 0, img1->width, img1->height));
|
part = img_corr(Rect(0, 0, img1.cols, img1.rows));
|
||||||
cvCvtColor(img1, img_corr, CV_GRAY2RGB);
|
cvtColor(img1, part, COLOR_GRAY2RGB);
|
||||||
cvSetImageROI(img_corr, cvRect(img1->width, 0, img2->width, img2->height));
|
part = img_corr(Rect(img1.cols, 0, img2.cols, img2.rows));
|
||||||
cvCvtColor(img2, img_corr, CV_GRAY2RGB);
|
cvtColor(img1, part, COLOR_GRAY2RGB);
|
||||||
cvResetImageROI(img_corr);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < features1.size(); i++)
|
for (size_t i = 0; i < features1.size(); i++)
|
||||||
{
|
{
|
||||||
cvCircle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
|
circle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < features2.size(); i++)
|
for (size_t i = 0; i < features2.size(); i++)
|
||||||
{
|
{
|
||||||
CvPoint pt = cvPoint((int)features2[i].pt.x + img1->width, (int)features2[i].pt.y);
|
Point pt((int)features2[i].pt.x + img1.cols, (int)features2[i].pt.y);
|
||||||
cvCircle(img_corr, pt, 3, CV_RGB(255, 0, 0));
|
circle(img_corr, pt, 3, Scalar(0, 0, 255));
|
||||||
cvLine(img_corr, features1[desc_idx[i]].pt, pt, CV_RGB(0, 255, 0));
|
line(img_corr, features1[desc_idx[i]].pt, pt, Scalar(0, 255, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
return img_corr;
|
return img_corr;
|
||||||
}
|
}
|
||||||
|
@ -3,8 +3,9 @@
|
|||||||
#
|
#
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
|
|
||||||
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui opencv_ml opencv_video opencv_objdetect
|
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
|
||||||
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching)
|
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree
|
||||||
|
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching)
|
||||||
|
|
||||||
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
|
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ int main(int argc, const char ** argv)
|
|||||||
|
|
||||||
//Do matching using features2d
|
//Do matching using features2d
|
||||||
cout << "matching with BruteForceMatcher<Hamming>" << endl;
|
cout << "matching with BruteForceMatcher<Hamming>" << endl;
|
||||||
BruteForceMatcher<Hamming> matcher_popcount;
|
BFMatcher matcher_popcount(NORM_HAMMING);
|
||||||
vector<DMatch> matches_popcount;
|
vector<DMatch> matches_popcount;
|
||||||
double pop_time = match(kpts_1, kpts_2, matcher_popcount, desc_1, desc_2, matches_popcount);
|
double pop_time = match(kpts_1, kpts_2, matcher_popcount, desc_1, desc_2, matches_popcount);
|
||||||
cout << "done BruteForceMatcher<Hamming> matching. took " << pop_time << " seconds" << endl;
|
cout << "done BruteForceMatcher<Hamming> matching. took " << pop_time << " seconds" << endl;
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include "opencv2/imgproc/imgproc.hpp"
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
|
#include "opencv2/legacy/legacy.hpp"
|
||||||
|
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
@ -879,7 +880,7 @@ public:
|
|||||||
{
|
{
|
||||||
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
|
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
|
||||||
defaultDescMatcher = new VectorDescriptorMatch( new CalonderDescriptorExtractor<float>( classifierFile ),
|
defaultDescMatcher = new VectorDescriptorMatch( new CalonderDescriptorExtractor<float>( classifierFile ),
|
||||||
new BruteForceMatcher<L2<float> > );
|
new BFMatcher(NORM_L2) );
|
||||||
specificDescMatcher = defaultDescMatcher;
|
specificDescMatcher = defaultDescMatcher;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
#include "opencv2/calib3d/calib3d.hpp"
|
#include "opencv2/calib3d/calib3d.hpp"
|
||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
|
||||||
@ -14,8 +15,8 @@ void help()
|
|||||||
printf("For example: ./generic_descriptor_match ../c/scene_l.bmp ../c/scene_r.bmp FERN fern_params.xml\n");
|
printf("For example: ./generic_descriptor_match ../c/scene_l.bmp ../c/scene_r.bmp FERN fern_params.xml\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
|
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
|
||||||
const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx);
|
const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx);
|
||||||
|
|
||||||
int main(int argc, char** argv)
|
int main(int argc, char** argv)
|
||||||
{
|
{
|
||||||
@ -38,8 +39,8 @@ int main(int argc, char** argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//printf("Reading the images...\n");
|
//printf("Reading the images...\n");
|
||||||
IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
|
Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||||
IplImage* img2 = cvLoadImage(img2_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
|
Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||||
|
|
||||||
// extract keypoints from the first image
|
// extract keypoints from the first image
|
||||||
SURF surf_extractor(5.0e3);
|
SURF surf_extractor(5.0e3);
|
||||||
@ -60,38 +61,32 @@ int main(int argc, char** argv)
|
|||||||
descriptorMatcher->match( img2, keypoints2, img1, keypoints1, matches2to1 );
|
descriptorMatcher->match( img2, keypoints2, img1, keypoints1, matches2to1 );
|
||||||
printf("Done\n");
|
printf("Done\n");
|
||||||
|
|
||||||
IplImage* img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, matches2to1);
|
Mat img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, matches2to1);
|
||||||
|
|
||||||
cvNamedWindow("correspondences", 1);
|
imshow("correspondences", img_corr);
|
||||||
cvShowImage("correspondences", img_corr);
|
waitKey(0);
|
||||||
cvWaitKey(0);
|
|
||||||
|
|
||||||
cvReleaseImage(&img1);
|
|
||||||
cvReleaseImage(&img2);
|
|
||||||
cvReleaseImage(&img_corr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
|
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
|
||||||
const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx)
|
const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx)
|
||||||
{
|
{
|
||||||
IplImage* img_corr = cvCreateImage(cvSize(img1->width + img2->width, MAX(img1->height, img2->height)),
|
Mat part, img_corr(Size(img1.cols + img2.cols, MAX(img1.rows, img2.rows)), CV_8UC3);
|
||||||
IPL_DEPTH_8U, 3);
|
img_corr = Scalar::all(0);
|
||||||
cvSetImageROI(img_corr, cvRect(0, 0, img1->width, img1->height));
|
part = img_corr(Rect(0, 0, img1.cols, img1.rows));
|
||||||
cvCvtColor(img1, img_corr, CV_GRAY2RGB);
|
cvtColor(img1, part, COLOR_GRAY2RGB);
|
||||||
cvSetImageROI(img_corr, cvRect(img1->width, 0, img2->width, img2->height));
|
part = img_corr(Rect(img1.cols, 0, img2.cols, img2.rows));
|
||||||
cvCvtColor(img2, img_corr, CV_GRAY2RGB);
|
cvtColor(img1, part, COLOR_GRAY2RGB);
|
||||||
cvResetImageROI(img_corr);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < features1.size(); i++)
|
for (size_t i = 0; i < features1.size(); i++)
|
||||||
{
|
{
|
||||||
cvCircle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
|
circle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < features2.size(); i++)
|
for (size_t i = 0; i < features2.size(); i++)
|
||||||
{
|
{
|
||||||
CvPoint pt = cvPoint(cvRound(features2[i].pt.x + img1->width), cvRound(features2[i].pt.y));
|
Point pt(cvRound(features2[i].pt.x + img1.cols), cvRound(features2[i].pt.y));
|
||||||
cvCircle(img_corr, pt, 3, CV_RGB(255, 0, 0));
|
circle(img_corr, pt, 3, Scalar(0, 0, 255));
|
||||||
cvLine(img_corr, features1[desc_idx[i].trainIdx].pt, pt, CV_RGB(0, 255, 0));
|
line(img_corr, features1[desc_idx[i].trainIdx].pt, pt, Scalar(0, 255, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
return img_corr;
|
return img_corr;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
#include "opencv2/imgproc/imgproc.hpp"
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
#include "opencv2/photo/photo.hpp"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include "opencv2/core/core.hpp"
|
#include "opencv2/core/core.hpp"
|
||||||
#include "opencv2/features2d/features2d.hpp"
|
#include "opencv2/features2d/features2d.hpp"
|
||||||
#include "opencv2/highgui/highgui.hpp"
|
#include "opencv2/highgui/highgui.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
|
|
||||||
@ -42,7 +43,7 @@ int main(int argc, char** argv)
|
|||||||
extractor.compute(img2, keypoints2, descriptors2);
|
extractor.compute(img2, keypoints2, descriptors2);
|
||||||
|
|
||||||
// matching descriptors
|
// matching descriptors
|
||||||
BruteForceMatcher<L2<float> > matcher;
|
BFMatcher matcher(NORM_L2);
|
||||||
vector<DMatch> matches;
|
vector<DMatch> matches;
|
||||||
matcher.match(descriptors1, descriptors2, matches);
|
matcher.match(descriptors1, descriptors2, matches);
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ int main(int ac, char ** av)
|
|||||||
|
|
||||||
vector<DMatch> matches;
|
vector<DMatch> matches;
|
||||||
|
|
||||||
BruteForceMatcher<Hamming> desc_matcher;
|
BFMatcher desc_matcher(NORM_HAMMING);
|
||||||
|
|
||||||
vector<Point2f> train_pts, query_pts;
|
vector<Point2f> train_pts, query_pts;
|
||||||
vector<KeyPoint> train_kpts, query_kpts;
|
vector<KeyPoint> train_kpts, query_kpts;
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui
|
SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui
|
||||||
opencv_ml opencv_video opencv_objdetect opencv_features2d
|
opencv_ml opencv_video opencv_objdetect opencv_features2d
|
||||||
opencv_calib3d opencv_legacy opencv_contrib opencv_gpu)
|
opencv_calib3d opencv_legacy opencv_contrib opencv_gpu
|
||||||
|
opencv_nonfree)
|
||||||
|
|
||||||
ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
|
ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include "opencv2/calib3d/calib3d.hpp"
|
#include "opencv2/calib3d/calib3d.hpp"
|
||||||
#include "opencv2/video/video.hpp"
|
#include "opencv2/video/video.hpp"
|
||||||
#include "opencv2/gpu/gpu.hpp"
|
#include "opencv2/gpu/gpu.hpp"
|
||||||
|
#include "opencv2/nonfree/nonfree.hpp"
|
||||||
#include "performance.h"
|
#include "performance.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
@ -352,7 +353,7 @@ TEST(BruteForceMatcher)
|
|||||||
|
|
||||||
int desc_len = 64;
|
int desc_len = 64;
|
||||||
|
|
||||||
BruteForceMatcher< L2<float> > matcher;
|
BFMatcher matcher(NORM_L2);
|
||||||
|
|
||||||
Mat query;
|
Mat query;
|
||||||
gen(query, 3000, desc_len, CV_32F, 0, 1);
|
gen(query, 3000, desc_len, CV_32F, 0, 1);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user