OpenCV with the refactored features2d compiles! contrib is broken for now; the tests are not tried yet
This commit is contained in:
parent
2e915026a0
commit
09df1a286b
@ -110,6 +110,10 @@ public:
|
||||
CV_OUT std::vector<KeyPoint>& keypoints,
|
||||
InputArray mask=noArray() );
|
||||
|
||||
virtual void detect( InputArrayOfArrays images,
|
||||
std::vector<std::vector<KeyPoint> >& keypoints,
|
||||
InputArrayOfArrays masks=noArray() );
|
||||
|
||||
/*
|
||||
* Compute the descriptors for a set of keypoints in an image.
|
||||
* image The image.
|
||||
@ -120,6 +124,10 @@ public:
|
||||
CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors );
|
||||
|
||||
virtual void compute( InputArrayOfArrays images,
|
||||
std::vector<std::vector<KeyPoint> >& keypoints,
|
||||
OutputArrayOfArrays descriptors );
|
||||
|
||||
/* Detects keypoints and computes the descriptors */
|
||||
CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask,
|
||||
CV_OUT std::vector<KeyPoint>& keypoints,
|
||||
@ -146,7 +154,7 @@ public:
|
||||
CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f);
|
||||
// custom setup
|
||||
CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList,
|
||||
float dMax=5.85f, float dMin=8.2f, const std::vector<int> indexChange=std::vector<int>());
|
||||
float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>());
|
||||
};
|
||||
|
||||
/*!
|
||||
@ -174,6 +182,13 @@ public:
|
||||
class CV_EXPORTS_W MSER : public Feature2D
|
||||
{
|
||||
public:
|
||||
enum
|
||||
{
|
||||
DELTA=10000, MIN_AREA=10001, MAX_AREA=10002, PASS2_ONLY=10003,
|
||||
MAX_EVOLUTION=10004, AREA_THRESHOLD=10005,
|
||||
MIN_MARGIN=10006, EDGE_BLUR_SIZE=10007
|
||||
};
|
||||
|
||||
//! the full constructor
|
||||
CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,
|
||||
double _max_variation=0.25, double _min_diversity=.2,
|
||||
@ -181,7 +196,7 @@ public:
|
||||
double _min_margin=0.003, int _edge_blur_size=5 );
|
||||
|
||||
CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label,
|
||||
OutputArray stats=noArray() ) const = 0;
|
||||
OutputArray stats=noArray() ) = 0;
|
||||
};
|
||||
|
||||
//! detects corners using FAST algorithm by E. Rosten
|
||||
@ -199,13 +214,16 @@ public:
|
||||
TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2
|
||||
};
|
||||
|
||||
CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10, bool nonmaxSuppression=true, int type=TYPE_9_16 );
|
||||
CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,
|
||||
bool nonmaxSuppression=true,
|
||||
int type=FastFeatureDetector::TYPE_9_16 );
|
||||
};
|
||||
|
||||
|
||||
class CV_EXPORTS_W GFTTDetector : public Feature2D
|
||||
{
|
||||
public:
|
||||
enum { USE_HARRIS_DETECTOR=10000 };
|
||||
CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
|
||||
int blockSize=3, bool useHarrisDetector=false, double k=0.04 );
|
||||
};
|
||||
@ -282,7 +300,7 @@ public:
|
||||
DESCRIPTOR_MLDB = 5
|
||||
};
|
||||
|
||||
CV_WRAP static Ptr<AKAZE> create(int descriptor_type=DESCRIPTOR_MLDB,
|
||||
CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB,
|
||||
int descriptor_size = 0, int descriptor_channels = 3,
|
||||
float threshold = 0.001f, int octaves = 4,
|
||||
int sublevels = 4, int diffusivity = KAZE::DIFF_PM_G2);
|
||||
@ -535,8 +553,6 @@ public:
|
||||
virtual bool isMaskSupported() const { return true; }
|
||||
|
||||
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
|
||||
|
||||
AlgorithmInfo* info() const;
|
||||
protected:
|
||||
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
|
||||
InputArrayOfArrays masks=noArray(), bool compactResult=false );
|
||||
@ -569,8 +585,6 @@ public:
|
||||
virtual bool isMaskSupported() const;
|
||||
|
||||
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
|
||||
|
||||
AlgorithmInfo* info() const;
|
||||
protected:
|
||||
static void convertToDMatches( const DescriptorCollection& descriptors,
|
||||
const Mat& indices, const Mat& distances,
|
||||
|
@ -22,10 +22,10 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES)
|
||||
mframe.copyTo(frame);
|
||||
|
||||
declare.in(frame);
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
|
||||
vector<KeyPoint> points;
|
||||
|
||||
OCL_TEST_CYCLE() detector(frame, mask, points);
|
||||
OCL_TEST_CYCLE() detector->detect(frame, points, mask);
|
||||
|
||||
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-5);
|
||||
@ -44,14 +44,14 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES)
|
||||
|
||||
declare.in(frame);
|
||||
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
|
||||
vector<KeyPoint> points;
|
||||
detector(frame, mask, points);
|
||||
detector->detect(frame, points, mask);
|
||||
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
|
||||
|
||||
UMat descriptors;
|
||||
|
||||
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, true);
|
||||
OCL_TEST_CYCLE() detector->compute(frame, points, descriptors);
|
||||
|
||||
SANITY_CHECK(descriptors);
|
||||
}
|
||||
@ -68,12 +68,12 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
|
||||
mframe.copyTo(frame);
|
||||
|
||||
declare.in(frame);
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
|
||||
|
||||
vector<KeyPoint> points;
|
||||
UMat descriptors;
|
||||
|
||||
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, false);
|
||||
OCL_TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
|
||||
|
||||
::perf::sort(points, descriptors);
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-5);
|
||||
|
@ -22,10 +22,10 @@ PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES))
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame);
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
|
||||
vector<KeyPoint> points;
|
||||
|
||||
TEST_CYCLE() detector(frame, mask, points);
|
||||
TEST_CYCLE() detector->detect(frame, points, mask);
|
||||
|
||||
sort(points.begin(), points.end(), comparators::KeypointGreater());
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-5);
|
||||
@ -42,14 +42,14 @@ PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES))
|
||||
Mat mask;
|
||||
declare.in(frame);
|
||||
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
|
||||
vector<KeyPoint> points;
|
||||
detector(frame, mask, points);
|
||||
detector->detect(frame, points, mask);
|
||||
sort(points.begin(), points.end(), comparators::KeypointGreater());
|
||||
|
||||
Mat descriptors;
|
||||
|
||||
TEST_CYCLE() detector(frame, mask, points, descriptors, true);
|
||||
TEST_CYCLE() detector->compute(frame, points, descriptors);
|
||||
|
||||
SANITY_CHECK(descriptors);
|
||||
}
|
||||
@ -64,12 +64,12 @@ PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES))
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame);
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
|
||||
|
||||
vector<KeyPoint> points;
|
||||
Mat descriptors;
|
||||
|
||||
TEST_CYCLE() detector(frame, mask, points, descriptors, false);
|
||||
TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
|
||||
|
||||
perf::sort(points, descriptors);
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-5);
|
||||
|
@ -52,22 +52,15 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd
|
||||
#include "kaze/AKAZEFeatures.h"
|
||||
|
||||
#include <iostream>
|
||||
using namespace std;
|
||||
|
||||
namespace cv
|
||||
{
|
||||
AKAZE::AKAZE()
|
||||
: descriptor(DESCRIPTOR_MLDB)
|
||||
, descriptor_channels(3)
|
||||
, descriptor_size(0)
|
||||
, threshold(0.001f)
|
||||
, octaves(4)
|
||||
, sublevels(4)
|
||||
, diffusivity(DIFF_PM_G2)
|
||||
{
|
||||
}
|
||||
using namespace std;
|
||||
|
||||
AKAZE::AKAZE(int _descriptor_type, int _descriptor_size, int _descriptor_channels,
|
||||
class AKAZE_Impl : public AKAZE
|
||||
{
|
||||
public:
|
||||
AKAZE_Impl(int _descriptor_type, int _descriptor_size, int _descriptor_channels,
|
||||
float _threshold, int _octaves, int _sublevels, int _diffusivity)
|
||||
: descriptor(_descriptor_type)
|
||||
, descriptor_channels(_descriptor_channels)
|
||||
@ -76,181 +69,139 @@ namespace cv
|
||||
, octaves(_octaves)
|
||||
, sublevels(_sublevels)
|
||||
, diffusivity(_diffusivity)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
AKAZE::~AKAZE()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
// returns the descriptor size in bytes
|
||||
int AKAZE::descriptorSize() const
|
||||
{
|
||||
switch (descriptor)
|
||||
{
|
||||
case cv::DESCRIPTOR_KAZE:
|
||||
case cv::DESCRIPTOR_KAZE_UPRIGHT:
|
||||
return 64;
|
||||
|
||||
case cv::DESCRIPTOR_MLDB:
|
||||
case cv::DESCRIPTOR_MLDB_UPRIGHT:
|
||||
// We use the full length binary descriptor -> 486 bits
|
||||
if (descriptor_size == 0)
|
||||
{
|
||||
int t = (6 + 36 + 120) * descriptor_channels;
|
||||
return (int)ceil(t / 8.);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We use the random bit selection length binary descriptor
|
||||
return (int)ceil(descriptor_size / 8.);
|
||||
}
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// returns the descriptor type
|
||||
int AKAZE::descriptorType() const
|
||||
{
|
||||
switch (descriptor)
|
||||
virtual ~AKAZE_Impl()
|
||||
{
|
||||
case cv::DESCRIPTOR_KAZE:
|
||||
case cv::DESCRIPTOR_KAZE_UPRIGHT:
|
||||
return CV_32F;
|
||||
|
||||
case cv::DESCRIPTOR_MLDB:
|
||||
case cv::DESCRIPTOR_MLDB_UPRIGHT:
|
||||
return CV_8U;
|
||||
}
|
||||
|
||||
// returns the descriptor size in bytes
|
||||
int descriptorSize() const
|
||||
{
|
||||
switch (descriptor)
|
||||
{
|
||||
case DESCRIPTOR_KAZE:
|
||||
case DESCRIPTOR_KAZE_UPRIGHT:
|
||||
return 64;
|
||||
|
||||
case DESCRIPTOR_MLDB:
|
||||
case DESCRIPTOR_MLDB_UPRIGHT:
|
||||
// We use the full length binary descriptor -> 486 bits
|
||||
if (descriptor_size == 0)
|
||||
{
|
||||
int t = (6 + 36 + 120) * descriptor_channels;
|
||||
return (int)ceil(t / 8.);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We use the random bit selection length binary descriptor
|
||||
return (int)ceil(descriptor_size / 8.);
|
||||
}
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns the default norm type
|
||||
int AKAZE::defaultNorm() const
|
||||
{
|
||||
switch (descriptor)
|
||||
// returns the descriptor type
|
||||
int descriptorType() const
|
||||
{
|
||||
case cv::DESCRIPTOR_KAZE:
|
||||
case cv::DESCRIPTOR_KAZE_UPRIGHT:
|
||||
return cv::NORM_L2;
|
||||
switch (descriptor)
|
||||
{
|
||||
case DESCRIPTOR_KAZE:
|
||||
case DESCRIPTOR_KAZE_UPRIGHT:
|
||||
return CV_32F;
|
||||
|
||||
case cv::DESCRIPTOR_MLDB:
|
||||
case cv::DESCRIPTOR_MLDB_UPRIGHT:
|
||||
return cv::NORM_HAMMING;
|
||||
case DESCRIPTOR_MLDB:
|
||||
case DESCRIPTOR_MLDB_UPRIGHT:
|
||||
return CV_8U;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AKAZE::operator()(InputArray image, InputArray mask,
|
||||
std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors,
|
||||
bool useProvidedKeypoints) const
|
||||
{
|
||||
cv::Mat img = image.getMat();
|
||||
if (img.type() != CV_8UC1)
|
||||
cvtColor(image, img, COLOR_BGR2GRAY);
|
||||
|
||||
Mat img1_32;
|
||||
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
||||
|
||||
cv::Mat& desc = descriptors.getMatRef();
|
||||
|
||||
AKAZEOptions options;
|
||||
options.descriptor = descriptor;
|
||||
options.descriptor_channels = descriptor_channels;
|
||||
options.descriptor_size = descriptor_size;
|
||||
options.img_width = img.cols;
|
||||
options.img_height = img.rows;
|
||||
options.dthreshold = threshold;
|
||||
options.omax = octaves;
|
||||
options.nsublevels = sublevels;
|
||||
options.diffusivity = diffusivity;
|
||||
|
||||
AKAZEFeatures impl(options);
|
||||
impl.Create_Nonlinear_Scale_Space(img1_32);
|
||||
|
||||
if (!useProvidedKeypoints)
|
||||
// returns the default norm type
|
||||
int defaultNorm() const
|
||||
{
|
||||
impl.Feature_Detection(keypoints);
|
||||
switch (descriptor)
|
||||
{
|
||||
case DESCRIPTOR_KAZE:
|
||||
case DESCRIPTOR_KAZE_UPRIGHT:
|
||||
return NORM_L2;
|
||||
|
||||
case DESCRIPTOR_MLDB:
|
||||
case DESCRIPTOR_MLDB_UPRIGHT:
|
||||
return NORM_HAMMING;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mask.empty())
|
||||
void detectAndCompute(InputArray image, InputArray mask,
|
||||
std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors,
|
||||
bool useProvidedKeypoints)
|
||||
{
|
||||
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
|
||||
Mat img = image.getMat();
|
||||
if (img.type() != CV_8UC1)
|
||||
cvtColor(image, img, COLOR_BGR2GRAY);
|
||||
|
||||
Mat img1_32;
|
||||
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
||||
|
||||
AKAZEOptions options;
|
||||
options.descriptor = descriptor;
|
||||
options.descriptor_channels = descriptor_channels;
|
||||
options.descriptor_size = descriptor_size;
|
||||
options.img_width = img.cols;
|
||||
options.img_height = img.rows;
|
||||
options.dthreshold = threshold;
|
||||
options.omax = octaves;
|
||||
options.nsublevels = sublevels;
|
||||
options.diffusivity = diffusivity;
|
||||
|
||||
AKAZEFeatures impl(options);
|
||||
impl.Create_Nonlinear_Scale_Space(img1_32);
|
||||
|
||||
if (!useProvidedKeypoints)
|
||||
{
|
||||
impl.Feature_Detection(keypoints);
|
||||
}
|
||||
|
||||
if (!mask.empty())
|
||||
{
|
||||
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
|
||||
}
|
||||
|
||||
if( descriptors.needed() )
|
||||
{
|
||||
Mat& desc = descriptors.getMatRef();
|
||||
impl.Compute_Descriptors(keypoints, desc);
|
||||
|
||||
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
|
||||
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
|
||||
}
|
||||
}
|
||||
|
||||
impl.Compute_Descriptors(keypoints, desc);
|
||||
int descriptor;
|
||||
int descriptor_channels;
|
||||
int descriptor_size;
|
||||
float threshold;
|
||||
int octaves;
|
||||
int sublevels;
|
||||
int diffusivity;
|
||||
};
|
||||
|
||||
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
|
||||
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
|
||||
}
|
||||
|
||||
void AKAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||
Ptr<AKAZE> AKAZE::create(int descriptor_type,
|
||||
int descriptor_size, int descriptor_channels,
|
||||
float threshold, int octaves,
|
||||
int sublevels, int diffusivity)
|
||||
{
|
||||
cv::Mat img = image.getMat();
|
||||
if (img.type() != CV_8UC1)
|
||||
cvtColor(image, img, COLOR_BGR2GRAY);
|
||||
|
||||
Mat img1_32;
|
||||
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
||||
|
||||
AKAZEOptions options;
|
||||
options.descriptor = descriptor;
|
||||
options.descriptor_channels = descriptor_channels;
|
||||
options.descriptor_size = descriptor_size;
|
||||
options.img_width = img.cols;
|
||||
options.img_height = img.rows;
|
||||
options.dthreshold = threshold;
|
||||
options.omax = octaves;
|
||||
options.nsublevels = sublevels;
|
||||
options.diffusivity = diffusivity;
|
||||
|
||||
AKAZEFeatures impl(options);
|
||||
impl.Create_Nonlinear_Scale_Space(img1_32);
|
||||
impl.Feature_Detection(keypoints);
|
||||
|
||||
if (!mask.empty())
|
||||
{
|
||||
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
|
||||
}
|
||||
}
|
||||
|
||||
void AKAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
|
||||
{
|
||||
cv::Mat img = image.getMat();
|
||||
if (img.type() != CV_8UC1)
|
||||
cvtColor(image, img, COLOR_BGR2GRAY);
|
||||
|
||||
Mat img1_32;
|
||||
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
|
||||
|
||||
cv::Mat& desc = descriptors.getMatRef();
|
||||
|
||||
AKAZEOptions options;
|
||||
options.descriptor = descriptor;
|
||||
options.descriptor_channels = descriptor_channels;
|
||||
options.descriptor_size = descriptor_size;
|
||||
options.img_width = img.cols;
|
||||
options.img_height = img.rows;
|
||||
options.dthreshold = threshold;
|
||||
options.omax = octaves;
|
||||
options.nsublevels = sublevels;
|
||||
options.diffusivity = diffusivity;
|
||||
|
||||
AKAZEFeatures impl(options);
|
||||
impl.Create_Nonlinear_Scale_Space(img1_32);
|
||||
impl.Compute_Descriptors(keypoints, desc);
|
||||
|
||||
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
|
||||
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
|
||||
return makePtr<AKAZE_Impl>(descriptor_type, descriptor_size, descriptor_channels,
|
||||
threshold, octaves, sublevels, diffusivity);
|
||||
}
|
||||
}
|
||||
|
@ -75,11 +75,10 @@ protected:
|
||||
double confidence;
|
||||
};
|
||||
|
||||
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||
virtual void detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() );
|
||||
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> ¢ers) const;
|
||||
|
||||
Params params;
|
||||
AlgorithmInfo* info() const;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -173,22 +172,22 @@ void SimpleBlobDetector::Params::write(cv::FileStorage& fs) const
|
||||
fs << "maxConvexity" << maxConvexity;
|
||||
}
|
||||
|
||||
SimpleBlobDetector::SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters) :
|
||||
SimpleBlobDetectorImpl::SimpleBlobDetectorImpl(const SimpleBlobDetector::Params ¶meters) :
|
||||
params(parameters)
|
||||
{
|
||||
}
|
||||
|
||||
void SimpleBlobDetector::read( const cv::FileNode& fn )
|
||||
void SimpleBlobDetectorImpl::read( const cv::FileNode& fn )
|
||||
{
|
||||
params.read(fn);
|
||||
}
|
||||
|
||||
void SimpleBlobDetector::write( cv::FileStorage& fs ) const
|
||||
void SimpleBlobDetectorImpl::write( cv::FileStorage& fs ) const
|
||||
{
|
||||
params.write(fs);
|
||||
}
|
||||
|
||||
void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> ¢ers) const
|
||||
void SimpleBlobDetectorImpl::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> ¢ers) const
|
||||
{
|
||||
Mat image = _image.getMat(), binaryImage = _binaryImage.getMat();
|
||||
(void)image;
|
||||
@ -302,7 +301,7 @@ void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, s
|
||||
#endif
|
||||
}
|
||||
|
||||
void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray) const
|
||||
void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray)
|
||||
{
|
||||
//TODO: support mask
|
||||
keypoints.clear();
|
||||
@ -365,3 +364,10 @@ void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>&
|
||||
keypoints.push_back(kpt);
|
||||
}
|
||||
}
|
||||
|
||||
Ptr<SimpleBlobDetector> SimpleBlobDetector::create(const SimpleBlobDetector::Params& params)
|
||||
{
|
||||
return makePtr<SimpleBlobDetectorImpl>(params);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -42,9 +42,7 @@
|
||||
the IEEE International Conference on Computer Vision (ICCV2011).
|
||||
*/
|
||||
|
||||
#include <opencv2/features2d.hpp>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include "precomp.hpp"
|
||||
#include <fstream>
|
||||
#include <stdlib.h>
|
||||
|
||||
@ -53,7 +51,6 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
|
||||
class BRISK_Impl : public BRISK
|
||||
{
|
||||
public:
|
||||
@ -62,18 +59,37 @@ public:
|
||||
explicit BRISK_Impl(const std::vector<float> &radiusList, const std::vector<int> &numberList,
|
||||
float dMax=5.85f, float dMin=8.2f, const std::vector<int> indexChange=std::vector<int>());
|
||||
|
||||
virtual ~BRISK_Impl();
|
||||
|
||||
int descriptorSize() const
|
||||
{
|
||||
return strings_;
|
||||
}
|
||||
|
||||
int descriptorType() const
|
||||
{
|
||||
return CV_8U;
|
||||
}
|
||||
|
||||
int defaultNorm() const
|
||||
{
|
||||
return NORM_HAMMING;
|
||||
}
|
||||
|
||||
// call this to generate the kernel:
|
||||
// circle of radius r (pixels), with n points;
|
||||
// short pairings with dMax, long pairings with dMin
|
||||
void generateKernel(std::vector<float> &radiusList,
|
||||
std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
|
||||
std::vector<int> indexChange=std::vector<int>());
|
||||
void generateKernel(const std::vector<float> &radiusList,
|
||||
const std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
|
||||
const std::vector<int> &indexChange=std::vector<int>());
|
||||
|
||||
void detectAndCompute( InputArray image, InputArray mask,
|
||||
CV_OUT std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors,
|
||||
bool useProvidedKeypoints );
|
||||
|
||||
protected:
|
||||
|
||||
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
||||
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||
|
||||
void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
||||
void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors, bool doDescriptors, bool doOrientation,
|
||||
@ -256,16 +272,16 @@ protected:
|
||||
static const float basicSize_;
|
||||
};
|
||||
|
||||
const float BRISK::basicSize_ = 12.0f;
|
||||
const unsigned int BRISK::scales_ = 64;
|
||||
const float BRISK::scalerange_ = 30.f; // 40->4 Octaves - else, this needs to be adjusted...
|
||||
const unsigned int BRISK::n_rot_ = 1024; // discretization of the rotation look-up
|
||||
const float BRISK_Impl::basicSize_ = 12.0f;
|
||||
const unsigned int BRISK_Impl::scales_ = 64;
|
||||
const float BRISK_Impl::scalerange_ = 30.f; // 40->4 Octaves - else, this needs to be adjusted...
|
||||
const unsigned int BRISK_Impl::n_rot_ = 1024; // discretization of the rotation look-up
|
||||
|
||||
const float BriskScaleSpace::safetyFactor_ = 1.0f;
|
||||
const float BriskScaleSpace::basicSize_ = 12.0f;
|
||||
|
||||
// constructors
|
||||
BRISK::BRISK(int thresh, int octaves_in, float patternScale)
|
||||
BRISK_Impl::BRISK_Impl(int thresh, int octaves_in, float patternScale)
|
||||
{
|
||||
threshold = thresh;
|
||||
octaves = octaves_in;
|
||||
@ -291,10 +307,12 @@ BRISK::BRISK(int thresh, int octaves_in, float patternScale)
|
||||
nList[4] = 20;
|
||||
|
||||
generateKernel(rList, nList, (float)(5.85 * patternScale), (float)(8.2 * patternScale));
|
||||
|
||||
}
|
||||
BRISK::BRISK(std::vector<float> &radiusList, std::vector<int> &numberList, float dMax, float dMin,
|
||||
std::vector<int> indexChange)
|
||||
|
||||
BRISK_Impl::BRISK_Impl(const std::vector<float> &radiusList,
|
||||
const std::vector<int> &numberList,
|
||||
float dMax, float dMin,
|
||||
const std::vector<int> indexChange)
|
||||
{
|
||||
generateKernel(radiusList, numberList, dMax, dMin, indexChange);
|
||||
threshold = 20;
|
||||
@ -302,10 +320,12 @@ BRISK::BRISK(std::vector<float> &radiusList, std::vector<int> &numberList, float
|
||||
}
|
||||
|
||||
void
|
||||
BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberList, float dMax,
|
||||
float dMin, std::vector<int> indexChange)
|
||||
BRISK_Impl::generateKernel(const std::vector<float> &radiusList,
|
||||
const std::vector<int> &numberList,
|
||||
float dMax, float dMin,
|
||||
const std::vector<int>& _indexChange)
|
||||
{
|
||||
|
||||
std::vector<int> indexChange = _indexChange;
|
||||
dMax_ = dMax;
|
||||
dMin_ = dMin;
|
||||
|
||||
@ -427,7 +447,7 @@ BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberLi
|
||||
|
||||
// simple alternative:
|
||||
inline int
|
||||
BRISK::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const float key_x,
|
||||
BRISK_Impl::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const float key_x,
|
||||
const float key_y, const unsigned int scale, const unsigned int rot,
|
||||
const unsigned int point) const
|
||||
{
|
||||
@ -594,8 +614,8 @@ RoiPredicate(const float minX, const float minY, const float maxX, const float m
|
||||
|
||||
// computes the descriptor
|
||||
void
|
||||
BRISK::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
|
||||
OutputArray _descriptors, bool useProvidedKeypoints) const
|
||||
BRISK_Impl::detectAndCompute( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
|
||||
OutputArray _descriptors, bool useProvidedKeypoints)
|
||||
{
|
||||
bool doOrientation=true;
|
||||
if (useProvidedKeypoints)
|
||||
@ -609,7 +629,7 @@ BRISK::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& k
|
||||
}
|
||||
|
||||
void
|
||||
BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
|
||||
BRISK_Impl::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
|
||||
OutputArray _descriptors, bool doDescriptors, bool doOrientation,
|
||||
bool useProvidedKeypoints) const
|
||||
{
|
||||
@ -775,25 +795,8 @@ BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, s
|
||||
delete[] _values;
|
||||
}
|
||||
|
||||
int
|
||||
BRISK::descriptorSize() const
|
||||
{
|
||||
return strings_;
|
||||
}
|
||||
|
||||
int
|
||||
BRISK::descriptorType() const
|
||||
{
|
||||
return CV_8U;
|
||||
}
|
||||
|
||||
int
|
||||
BRISK::defaultNorm() const
|
||||
{
|
||||
return NORM_HAMMING;
|
||||
}
|
||||
|
||||
BRISK::~BRISK()
|
||||
BRISK_Impl::~BRISK_Impl()
|
||||
{
|
||||
delete[] patternPoints_;
|
||||
delete[] shortPairs_;
|
||||
@ -803,14 +806,7 @@ BRISK::~BRISK()
|
||||
}
|
||||
|
||||
void
|
||||
BRISK::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
|
||||
{
|
||||
computeKeypointsNoOrientation(image, mask, keypoints);
|
||||
computeDescriptorsAndOrOrientation(image, mask, keypoints, cv::noArray(), false, true, true);
|
||||
}
|
||||
|
||||
void
|
||||
BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints) const
|
||||
BRISK_Impl::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints) const
|
||||
{
|
||||
Mat image = _image.getMat(), mask = _mask.getMat();
|
||||
if( image.type() != CV_8UC1 )
|
||||
@ -821,20 +817,7 @@ BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::v
|
||||
briskScaleSpace.getKeypoints(threshold, keypoints);
|
||||
|
||||
// remove invalid points
|
||||
removeInvalidPoints(mask, keypoints);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
BRISK::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||
{
|
||||
(*this)(image.getMat(), mask.getMat(), keypoints);
|
||||
}
|
||||
|
||||
void
|
||||
BRISK::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
|
||||
{
|
||||
(*this)(image, Mat(), keypoints, descriptors, true);
|
||||
KeyPointsFilter::runByPixelsMask(keypoints, mask);
|
||||
}
|
||||
|
||||
// construct telling the octaves number:
|
||||
@ -2084,7 +2067,7 @@ BriskLayer::BriskLayer(const cv::Mat& img_in, float scale_in, float offset_in)
|
||||
scale_ = scale_in;
|
||||
offset_ = offset_in;
|
||||
// create an agast detector
|
||||
fast_9_16_ = makePtr<FastFeatureDetector>(1, true, FastFeatureDetector::TYPE_9_16);
|
||||
fast_9_16_ = FastFeatureDetector::create(1, true, FastFeatureDetector::TYPE_9_16);
|
||||
makeOffsets(pixel_5_8_, (int)img_.step, 8);
|
||||
makeOffsets(pixel_9_16_, (int)img_.step, 16);
|
||||
}
|
||||
@ -2106,7 +2089,7 @@ BriskLayer::BriskLayer(const BriskLayer& layer, int mode)
|
||||
offset_ = 0.5f * scale_ - 0.5f;
|
||||
}
|
||||
scores_ = cv::Mat::zeros(img_.rows, img_.cols, CV_8U);
|
||||
fast_9_16_ = makePtr<FastFeatureDetector>(1, false, FastFeatureDetector::TYPE_9_16);
|
||||
fast_9_16_ = FastFeatureDetector::create(1, false, FastFeatureDetector::TYPE_9_16);
|
||||
makeOffsets(pixel_5_8_, (int)img_.step, 8);
|
||||
makeOffsets(pixel_9_16_, (int)img_.step, 16);
|
||||
}
|
||||
@ -2318,4 +2301,16 @@ BriskLayer::twothirdsample(const cv::Mat& srcimg, cv::Mat& dstimg)
|
||||
resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA);
|
||||
}
|
||||
|
||||
Ptr<BRISK> BRISK::create(int thresh, int octaves, float patternScale)
|
||||
{
|
||||
return makePtr<BRISK_Impl>(thresh, octaves, patternScale);
|
||||
}
|
||||
|
||||
// custom setup
|
||||
Ptr<BRISK> BRISK::create(const std::vector<float> &radiusList, const std::vector<int> &numberList,
|
||||
float dMax, float dMin, const std::vector<int>& indexChange)
|
||||
{
|
||||
return makePtr<BRISK_Impl>(radiusList, numberList, dMax, dMin, indexChange);
|
||||
}
|
||||
|
||||
}
|
||||
|
159
modules/features2d/src/feature2d.cpp
Normal file
159
modules/features2d/src/feature2d.cpp
Normal file
@ -0,0 +1,159 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
using std::vector;
|
||||
|
||||
Feature2D::~Feature2D() {}
|
||||
|
||||
/*
|
||||
* Detect keypoints in an image.
|
||||
* image The image.
|
||||
* keypoints The detected keypoints.
|
||||
* mask Mask specifying where to look for keypoints (optional). Must be a char
|
||||
* matrix with non-zero values in the region of interest.
|
||||
*/
|
||||
void Feature2D::detect( InputArray image,
|
||||
std::vector<KeyPoint>& keypoints,
|
||||
InputArray mask )
|
||||
{
|
||||
detectAndCompute(image, mask, keypoints, noArray(), false);
|
||||
}
|
||||
|
||||
|
||||
void Feature2D::detect( InputArrayOfArrays _images,
|
||||
std::vector<std::vector<KeyPoint> >& keypoints,
|
||||
InputArrayOfArrays _masks )
|
||||
{
|
||||
vector<Mat> images, masks;
|
||||
|
||||
_images.getMatVector(images);
|
||||
size_t i, nimages = images.size();
|
||||
|
||||
if( !_masks.empty() )
|
||||
{
|
||||
_masks.getMatVector(masks);
|
||||
CV_Assert(masks.size() == nimages);
|
||||
}
|
||||
|
||||
keypoints.resize(nimages);
|
||||
|
||||
for( i = 0; i < nimages; i++ )
|
||||
{
|
||||
detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] );
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the descriptors for a set of keypoints in an image.
|
||||
* image The image.
|
||||
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
|
||||
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
|
||||
*/
|
||||
void Feature2D::compute( InputArray image,
|
||||
std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors )
|
||||
{
|
||||
detectAndCompute(image, noArray(), keypoints, descriptors, true);
|
||||
}
|
||||
|
||||
void Feature2D::compute( InputArrayOfArrays _images,
|
||||
std::vector<std::vector<KeyPoint> >& keypoints,
|
||||
OutputArrayOfArrays _descriptors )
|
||||
{
|
||||
if( !_descriptors.needed() )
|
||||
return;
|
||||
|
||||
vector<Mat> images;
|
||||
|
||||
_images.getMatVector(images);
|
||||
size_t i, nimages = images.size();
|
||||
|
||||
CV_Assert( keypoints.size() == nimages );
|
||||
CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT );
|
||||
|
||||
vector<Mat>& descriptors = *(vector<Mat>*)_descriptors.getObj();
|
||||
descriptors.resize(nimages);
|
||||
|
||||
for( i = 0; i < nimages; i++ )
|
||||
{
|
||||
compute(images[i], keypoints[i], descriptors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Detects keypoints and computes the descriptors */
|
||||
void Feature2D::detectAndCompute( InputArray, InputArray,
|
||||
std::vector<KeyPoint>&,
|
||||
OutputArray,
|
||||
bool )
|
||||
{
|
||||
CV_Error(Error::StsNotImplemented, "");
|
||||
}
|
||||
|
||||
int Feature2D::descriptorSize() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int Feature2D::descriptorType() const
|
||||
{
|
||||
return CV_32F;
|
||||
}
|
||||
|
||||
int Feature2D::defaultNorm() const
|
||||
{
|
||||
int tp = descriptorType();
|
||||
return tp == CV_8U ? NORM_HAMMING : NORM_L2;
|
||||
}
|
||||
|
||||
// Return true if detector object is empty
|
||||
bool Feature2D::empty() const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
@ -1,199 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#if 0
|
||||
|
||||
using namespace cv;
|
||||
|
||||
Ptr<Feature2D> Feature2D::create( const String& feature2DType )
|
||||
{
|
||||
return Algorithm::create<Feature2D>("Feature2D." + feature2DType);
|
||||
}
|
||||
|
||||
/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
|
||||
|
||||
/* NOTE!!!
|
||||
All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d().
|
||||
Otherwise, linker may throw away some seemingly unused stuff.
|
||||
*/
|
||||
|
||||
CV_INIT_ALGORITHM(BRISK, "Feature2D.BRISK",
|
||||
obj.info()->addParam(obj, "thres", obj.threshold);
|
||||
obj.info()->addParam(obj, "octaves", obj.octaves))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
|
||||
obj.info()->addParam(obj, "threshold", obj.threshold);
|
||||
obj.info()->addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
|
||||
obj.info()->addParam(obj, "type", obj.type))
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_INIT_ALGORITHM(MSER, "Feature2D.MSER",
|
||||
obj.info()->addParam(obj, "delta", obj.delta);
|
||||
obj.info()->addParam(obj, "minArea", obj.minArea);
|
||||
obj.info()->addParam(obj, "maxArea", obj.maxArea);
|
||||
obj.info()->addParam(obj, "maxVariation", obj.maxVariation);
|
||||
obj.info()->addParam(obj, "minDiversity", obj.minDiversity);
|
||||
obj.info()->addParam(obj, "maxEvolution", obj.maxEvolution);
|
||||
obj.info()->addParam(obj, "areaThreshold", obj.areaThreshold);
|
||||
obj.info()->addParam(obj, "minMargin", obj.minMargin);
|
||||
obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
|
||||
obj.info()->addParam(obj, "nFeatures", obj.nfeatures);
|
||||
obj.info()->addParam(obj, "scaleFactor", obj.scaleFactor);
|
||||
obj.info()->addParam(obj, "nLevels", obj.nlevels);
|
||||
obj.info()->addParam(obj, "firstLevel", obj.firstLevel);
|
||||
obj.info()->addParam(obj, "edgeThreshold", obj.edgeThreshold);
|
||||
obj.info()->addParam(obj, "patchSize", obj.patchSize);
|
||||
obj.info()->addParam(obj, "WTA_K", obj.WTA_K);
|
||||
obj.info()->addParam(obj, "scoreType", obj.scoreType);
|
||||
obj.info()->addParam(obj, "fastThreshold", obj.fastThreshold))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
|
||||
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
|
||||
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
|
||||
obj.info()->addParam(obj, "minDistance", obj.minDistance);
|
||||
obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
|
||||
obj.info()->addParam(obj, "k", obj.k))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_INIT_ALGORITHM(KAZE, "Feature2D.KAZE",
|
||||
obj.info()->addParam(obj, "upright", obj.upright);
|
||||
obj.info()->addParam(obj, "extended", obj.extended);
|
||||
obj.info()->addParam(obj, "threshold", obj.threshold);
|
||||
obj.info()->addParam(obj, "octaves", obj.octaves);
|
||||
obj.info()->addParam(obj, "sublevels", obj.sublevels);
|
||||
obj.info()->addParam(obj, "diffusivity", obj.diffusivity))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_INIT_ALGORITHM(AKAZE, "Feature2D.AKAZE",
|
||||
obj.info()->addParam(obj, "descriptor", obj.descriptor);
|
||||
obj.info()->addParam(obj, "descriptor_channels", obj.descriptor_channels);
|
||||
obj.info()->addParam(obj, "descriptor_size", obj.descriptor_size);
|
||||
obj.info()->addParam(obj, "threshold", obj.threshold);
|
||||
obj.info()->addParam(obj, "octaves", obj.octaves);
|
||||
obj.info()->addParam(obj, "sublevels", obj.sublevels);
|
||||
obj.info()->addParam(obj, "diffusivity", obj.diffusivity))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
|
||||
CV_INIT_ALGORITHM(SimpleBlobDetector, "Feature2D.SimpleBlob",
|
||||
obj.info()->addParam(obj, "thresholdStep", obj.params.thresholdStep);
|
||||
obj.info()->addParam(obj, "minThreshold", obj.params.minThreshold);
|
||||
obj.info()->addParam(obj, "maxThreshold", obj.params.maxThreshold);
|
||||
obj.info()->addParam_(obj, "minRepeatability", (sizeof(size_t) == sizeof(uint64))?Param::UINT64 : Param::UNSIGNED_INT, &obj.params.minRepeatability, false, 0, 0);
|
||||
obj.info()->addParam(obj, "minDistBetweenBlobs", obj.params.minDistBetweenBlobs);
|
||||
obj.info()->addParam(obj, "filterByColor", obj.params.filterByColor);
|
||||
obj.info()->addParam(obj, "blobColor", obj.params.blobColor);
|
||||
obj.info()->addParam(obj, "filterByArea", obj.params.filterByArea);
|
||||
obj.info()->addParam(obj, "maxArea", obj.params.maxArea);
|
||||
obj.info()->addParam(obj, "filterByCircularity", obj.params.filterByCircularity);
|
||||
obj.info()->addParam(obj, "maxCircularity", obj.params.maxCircularity);
|
||||
obj.info()->addParam(obj, "filterByInertia", obj.params.filterByInertia);
|
||||
obj.info()->addParam(obj, "maxInertiaRatio", obj.params.maxInertiaRatio);
|
||||
obj.info()->addParam(obj, "filterByConvexity", obj.params.filterByConvexity);
|
||||
obj.info()->addParam(obj, "maxConvexity", obj.params.maxConvexity);
|
||||
)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class CV_EXPORTS HarrisDetector : public GFTTDetector
|
||||
{
|
||||
public:
|
||||
HarrisDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
|
||||
int blockSize=3, bool useHarrisDetector=true, double k=0.04 );
|
||||
AlgorithmInfo* info() const;
|
||||
};
|
||||
|
||||
inline HarrisDetector::HarrisDetector( int _maxCorners, double _qualityLevel, double _minDistance,
|
||||
int _blockSize, bool _useHarrisDetector, double _k )
|
||||
: GFTTDetector( _maxCorners, _qualityLevel, _minDistance, _blockSize, _useHarrisDetector, _k ) {}
|
||||
|
||||
CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
|
||||
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
|
||||
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
|
||||
obj.info()->addParam(obj, "minDistance", obj.minDistance);
|
||||
obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
|
||||
obj.info()->addParam(obj, "k", obj.k))
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_INIT_ALGORITHM(BFMatcher, "DescriptorMatcher.BFMatcher",
|
||||
obj.info()->addParam(obj, "normType", obj.normType);
|
||||
obj.info()->addParam(obj, "crossCheck", obj.crossCheck))
|
||||
|
||||
CV_INIT_ALGORITHM(FlannBasedMatcher, "DescriptorMatcher.FlannBasedMatcher",)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool cv::initModule_features2d(void)
|
||||
{
|
||||
bool all = true;
|
||||
all &= !BRISK_info_auto.name().empty();
|
||||
all &= !FastFeatureDetector_info_auto.name().empty();
|
||||
all &= !MSER_info_auto.name().empty();
|
||||
all &= !ORB_info_auto.name().empty();
|
||||
all &= !GFTTDetector_info_auto.name().empty();
|
||||
all &= !KAZE_info_auto.name().empty();
|
||||
all &= !AKAZE_info_auto.name().empty();
|
||||
all &= !HarrisDetector_info_auto.name().empty();
|
||||
all &= !BFMatcher_info_auto.name().empty();
|
||||
all &= !FlannBasedMatcher_info_auto.name().empty();
|
||||
|
||||
return all;
|
||||
}
|
||||
|
||||
#endif
|
@ -140,5 +140,11 @@ namespace cv
|
||||
int diffusivity;
|
||||
};
|
||||
|
||||
|
||||
Ptr<KAZE> KAZE::create(bool extended, bool upright,
|
||||
float threshold,
|
||||
int octaves, int sublevels,
|
||||
int diffusivity)
|
||||
{
|
||||
return makePtr<KAZE_Impl>(extended, upright, threshold, octaves, sublevels, diffusivity);
|
||||
}
|
||||
}
|
||||
|
@ -22,12 +22,12 @@ struct AKAZEOptions {
|
||||
, soffset(1.6f)
|
||||
, derivative_factor(1.5f)
|
||||
, sderivatives(1.0)
|
||||
, diffusivity(cv::DIFF_PM_G2)
|
||||
, diffusivity(KAZE::DIFF_PM_G2)
|
||||
|
||||
, dthreshold(0.001f)
|
||||
, min_dthreshold(0.00001f)
|
||||
|
||||
, descriptor(cv::DESCRIPTOR_MLDB)
|
||||
, descriptor(AKAZE::DESCRIPTOR_MLDB)
|
||||
, descriptor_size(0)
|
||||
, descriptor_channels(3)
|
||||
, descriptor_pattern_size(10)
|
||||
|
@ -61,14 +61,14 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) {
|
||||
|
||||
for (int j = 0; j < options_.nsublevels; j++) {
|
||||
TEvolution step;
|
||||
step.Lx = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Ly = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lxx = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lxy = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lyy = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lt = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Ldet = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lsmooth = cv::Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lx = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Ly = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lxx = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lxy = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lyy = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lt = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Ldet = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.Lsmooth = Mat::zeros(level_height, level_width, CV_32F);
|
||||
step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i);
|
||||
step.sigma_size = fRound(step.esigma);
|
||||
step.etime = 0.5f*(step.esigma*step.esigma);
|
||||
@ -97,7 +97,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) {
|
||||
* @param img Input image for which the nonlinear scale space needs to be created
|
||||
* @return 0 if the nonlinear scale space was created successfully, -1 otherwise
|
||||
*/
|
||||
int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img)
|
||||
int AKAZEFeatures::Create_Nonlinear_Scale_Space(const Mat& img)
|
||||
{
|
||||
CV_Assert(evolution_.size() > 0);
|
||||
|
||||
@ -107,8 +107,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img)
|
||||
evolution_[0].Lt.copyTo(evolution_[0].Lsmooth);
|
||||
|
||||
// Allocate memory for the flow and step images
|
||||
cv::Mat Lflow = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
cv::Mat Lstep = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
Mat Lflow = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
Mat Lstep = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
|
||||
// First compute the kcontrast factor
|
||||
options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, 1.0f, options_.kcontrast_nbins, 0, 0);
|
||||
@ -121,8 +121,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img)
|
||||
options_.kcontrast = options_.kcontrast*0.75f;
|
||||
|
||||
// Allocate memory for the resized flow and step images
|
||||
Lflow = cv::Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F);
|
||||
Lstep = cv::Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F);
|
||||
Lflow = Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F);
|
||||
Lstep = Mat::zeros(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32F);
|
||||
}
|
||||
else {
|
||||
evolution_[i - 1].Lt.copyTo(evolution_[i].Lt);
|
||||
@ -136,16 +136,16 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img)
|
||||
|
||||
// Compute the conductivity equation
|
||||
switch (options_.diffusivity) {
|
||||
case cv::DIFF_PM_G1:
|
||||
case KAZE::DIFF_PM_G1:
|
||||
pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
break;
|
||||
case cv::DIFF_PM_G2:
|
||||
case KAZE::DIFF_PM_G2:
|
||||
pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
break;
|
||||
case cv::DIFF_WEICKERT:
|
||||
case KAZE::DIFF_WEICKERT:
|
||||
weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
break;
|
||||
case cv::DIFF_CHARBONNIER:
|
||||
case KAZE::DIFF_CHARBONNIER:
|
||||
charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
break;
|
||||
default:
|
||||
@ -155,7 +155,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img)
|
||||
|
||||
// Perform FED n inner steps
|
||||
for (int j = 0; j < nsteps_[i - 1]; j++) {
|
||||
cv::details::kaze::nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]);
|
||||
nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img)
|
||||
* @brief This method selects interesting keypoints through the nonlinear scale space
|
||||
* @param kpts Vector of detected keypoints
|
||||
*/
|
||||
void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts)
|
||||
void AKAZEFeatures::Feature_Detection(std::vector<KeyPoint>& kpts)
|
||||
{
|
||||
kpts.clear();
|
||||
Compute_Determinant_Hessian_Response();
|
||||
@ -176,7 +176,7 @@ void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts)
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
class MultiscaleDerivativesAKAZEInvoker : public cv::ParallelLoopBody
|
||||
class MultiscaleDerivativesAKAZEInvoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
explicit MultiscaleDerivativesAKAZEInvoker(std::vector<TEvolution>& ev, const AKAZEOptions& opt)
|
||||
@ -185,7 +185,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(const cv::Range& range) const
|
||||
void operator()(const Range& range) const
|
||||
{
|
||||
std::vector<TEvolution>& evolution = *evolution_;
|
||||
|
||||
@ -219,7 +219,7 @@ private:
|
||||
*/
|
||||
void AKAZEFeatures::Compute_Multiscale_Derivatives(void)
|
||||
{
|
||||
cv::parallel_for_(cv::Range(0, (int)evolution_.size()),
|
||||
parallel_for_(Range(0, (int)evolution_.size()),
|
||||
MultiscaleDerivativesAKAZEInvoker(evolution_, options_));
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) {
|
||||
* @brief This method finds extrema in the nonlinear scale space
|
||||
* @param kpts Vector of detected keypoints
|
||||
*/
|
||||
void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts)
|
||||
void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<KeyPoint>& kpts)
|
||||
{
|
||||
|
||||
float value = 0.0;
|
||||
@ -261,8 +261,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts)
|
||||
int npoints = 0, id_repeated = 0;
|
||||
int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0;
|
||||
bool is_extremum = false, is_repeated = false, is_out = false;
|
||||
cv::KeyPoint point;
|
||||
vector<cv::KeyPoint> kpts_aux;
|
||||
KeyPoint point;
|
||||
vector<KeyPoint> kpts_aux;
|
||||
|
||||
// Set maximum size
|
||||
if (options_.descriptor == AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_MLDB) {
|
||||
@ -365,7 +365,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts)
|
||||
for (size_t i = 0; i < kpts_aux.size(); i++) {
|
||||
|
||||
is_repeated = false;
|
||||
const cv::KeyPoint& pt = kpts_aux[i];
|
||||
const KeyPoint& pt = kpts_aux[i];
|
||||
for (size_t j = i + 1; j < kpts_aux.size(); j++) {
|
||||
|
||||
// Compare response with the upper scale
|
||||
@ -392,7 +392,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts)
|
||||
* @brief This method performs subpixel refinement of the detected keypoints
|
||||
* @param kpts Vector of detected keypoints
|
||||
*/
|
||||
void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts)
|
||||
void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<KeyPoint>& kpts)
|
||||
{
|
||||
float Dx = 0.0, Dy = 0.0, ratio = 0.0;
|
||||
float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0;
|
||||
@ -433,7 +433,7 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts)
|
||||
b(0) = -Dx;
|
||||
b(1) = -Dy;
|
||||
|
||||
cv::solve(A, b, dst, DECOMP_LU);
|
||||
solve(A, b, dst, DECOMP_LU);
|
||||
|
||||
if (fabs(dst(0)) <= 1.0f && fabs(dst(1)) <= 1.0f) {
|
||||
kpts[i].pt.x = x + dst(0);
|
||||
@ -456,10 +456,10 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts)
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody
|
||||
class SURF_Descriptor_Upright_64_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
SURF_Descriptor_Upright_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution)
|
||||
SURF_Descriptor_Upright_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, std::vector<TEvolution>& evolution)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -474,18 +474,18 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const;
|
||||
void Get_SURF_Descriptor_Upright_64(const KeyPoint& kpt, float* desc) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
};
|
||||
|
||||
class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody
|
||||
class SURF_Descriptor_64_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
SURF_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution)
|
||||
SURF_Descriptor_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, std::vector<TEvolution>& evolution)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -501,18 +501,18 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
|
||||
void Get_SURF_Descriptor_64(const KeyPoint& kpt, float* desc) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
};
|
||||
|
||||
class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody
|
||||
class MSURF_Upright_Descriptor_64_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
MSURF_Upright_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution)
|
||||
MSURF_Upright_Descriptor_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, std::vector<TEvolution>& evolution)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -527,18 +527,18 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
|
||||
void Get_MSURF_Upright_Descriptor_64(const KeyPoint& kpt, float* desc) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
};
|
||||
|
||||
class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody
|
||||
class MSURF_Descriptor_64_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
MSURF_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution)
|
||||
MSURF_Descriptor_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, std::vector<TEvolution>& evolution)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -554,18 +554,18 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
|
||||
void Get_MSURF_Descriptor_64(const KeyPoint& kpt, float* desc) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
};
|
||||
|
||||
class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody
|
||||
class Upright_MLDB_Full_Descriptor_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
Upright_MLDB_Full_Descriptor_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
|
||||
Upright_MLDB_Full_Descriptor_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -581,24 +581,24 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const;
|
||||
void Get_Upright_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char* desc) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
AKAZEOptions* options_;
|
||||
};
|
||||
|
||||
class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody
|
||||
class Upright_MLDB_Descriptor_Subset_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
Upright_MLDB_Descriptor_Subset_Invoker(std::vector<cv::KeyPoint>& kpts,
|
||||
cv::Mat& desc,
|
||||
Upright_MLDB_Descriptor_Subset_Invoker(std::vector<KeyPoint>& kpts,
|
||||
Mat& desc,
|
||||
std::vector<TEvolution>& evolution,
|
||||
AKAZEOptions& options,
|
||||
cv::Mat descriptorSamples,
|
||||
cv::Mat descriptorBits)
|
||||
Mat descriptorSamples,
|
||||
Mat descriptorBits)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -616,22 +616,22 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const;
|
||||
void Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char* desc) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
AKAZEOptions* options_;
|
||||
|
||||
cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.
|
||||
cv::Mat descriptorBits_;
|
||||
Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.
|
||||
Mat descriptorBits_;
|
||||
};
|
||||
|
||||
class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody
|
||||
class MLDB_Full_Descriptor_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
MLDB_Full_Descriptor_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
|
||||
MLDB_Full_Descriptor_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -648,28 +648,28 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const;
|
||||
void Get_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char* desc) const;
|
||||
void MLDB_Fill_Values(float* values, int sample_step, int level,
|
||||
float xf, float yf, float co, float si, float scale) const;
|
||||
void MLDB_Binary_Comparisons(float* values, unsigned char* desc,
|
||||
int count, int& dpos) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
AKAZEOptions* options_;
|
||||
};
|
||||
|
||||
class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody
|
||||
class MLDB_Descriptor_Subset_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
MLDB_Descriptor_Subset_Invoker(std::vector<cv::KeyPoint>& kpts,
|
||||
cv::Mat& desc,
|
||||
MLDB_Descriptor_Subset_Invoker(std::vector<KeyPoint>& kpts,
|
||||
Mat& desc,
|
||||
std::vector<TEvolution>& evolution,
|
||||
AKAZEOptions& options,
|
||||
cv::Mat descriptorSamples,
|
||||
cv::Mat descriptorBits)
|
||||
Mat descriptorSamples,
|
||||
Mat descriptorBits)
|
||||
: keypoints_(&kpts)
|
||||
, descriptors_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -688,16 +688,16 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const;
|
||||
void Get_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char* desc) const;
|
||||
|
||||
private:
|
||||
std::vector<cv::KeyPoint>* keypoints_;
|
||||
cv::Mat* descriptors_;
|
||||
std::vector<KeyPoint>* keypoints_;
|
||||
Mat* descriptors_;
|
||||
std::vector<TEvolution>* evolution_;
|
||||
AKAZEOptions* options_;
|
||||
|
||||
cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.
|
||||
cv::Mat descriptorBits_;
|
||||
Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.
|
||||
Mat descriptorBits_;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -705,7 +705,7 @@ private:
|
||||
* @param kpts Vector of detected keypoints
|
||||
* @param desc Matrix to store the descriptors
|
||||
*/
|
||||
void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc)
|
||||
void AKAZEFeatures::Compute_Descriptors(std::vector<KeyPoint>& kpts, Mat& desc)
|
||||
{
|
||||
for(size_t i = 0; i < kpts.size(); i++)
|
||||
{
|
||||
@ -714,17 +714,17 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
|
||||
|
||||
// Allocate memory for the matrix with the descriptors
|
||||
if (options_.descriptor < AKAZE::DESCRIPTOR_MLDB_UPRIGHT) {
|
||||
desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1);
|
||||
desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1);
|
||||
}
|
||||
else {
|
||||
// We use the full length binary descriptor -> 486 bits
|
||||
if (options_.descriptor_size == 0) {
|
||||
int t = (6 + 36 + 120)*options_.descriptor_channels;
|
||||
desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1);
|
||||
desc = Mat::zeros((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1);
|
||||
}
|
||||
else {
|
||||
// We use the random bit selection length binary descriptor
|
||||
desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1);
|
||||
desc = Mat::zeros((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -732,28 +732,28 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
|
||||
{
|
||||
case AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation
|
||||
{
|
||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_));
|
||||
parallel_for_(Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_));
|
||||
}
|
||||
break;
|
||||
case AKAZE::DESCRIPTOR_KAZE:
|
||||
{
|
||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_));
|
||||
parallel_for_(Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_));
|
||||
}
|
||||
break;
|
||||
case AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation
|
||||
{
|
||||
if (options_.descriptor_size == 0)
|
||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||
parallel_for_(Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||
else
|
||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
|
||||
parallel_for_(Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
|
||||
}
|
||||
break;
|
||||
case AKAZE::DESCRIPTOR_MLDB:
|
||||
{
|
||||
if (options_.descriptor_size == 0)
|
||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||
parallel_for_(Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||
else
|
||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
|
||||
parallel_for_(Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -766,7 +766,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
|
||||
* @note The orientation is computed using a similar approach as described in the
|
||||
* original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006
|
||||
*/
|
||||
void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_)
|
||||
void AKAZEFeatures::Compute_Main_Orientation(KeyPoint& kpt, const std::vector<TEvolution>& evolution_)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right
|
||||
@ -854,7 +854,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto
|
||||
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
|
||||
* ECCV 2008
|
||||
*/
|
||||
void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const {
|
||||
void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const KeyPoint& kpt, float *desc) const {
|
||||
|
||||
float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
|
||||
float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
|
||||
@ -977,7 +977,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const
|
||||
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
|
||||
* ECCV 2008
|
||||
*/
|
||||
void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const {
|
||||
void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const KeyPoint& kpt, float *desc) const {
|
||||
|
||||
float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
|
||||
float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
|
||||
@ -1101,7 +1101,7 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp
|
||||
* @param kpt Input keypoint
|
||||
* @param desc Descriptor vector
|
||||
*/
|
||||
void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const {
|
||||
void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char *desc) const {
|
||||
|
||||
float di = 0.0, dx = 0.0, dy = 0.0;
|
||||
float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0;
|
||||
@ -1114,9 +1114,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons
|
||||
const std::vector<TEvolution>& evolution = *evolution_;
|
||||
|
||||
// Matrices for the M-LDB descriptor
|
||||
cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1);
|
||||
cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1);
|
||||
cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1);
|
||||
Mat values_1 = Mat::zeros(4, options.descriptor_channels, CV_32FC1);
|
||||
Mat values_2 = Mat::zeros(9, options.descriptor_channels, CV_32FC1);
|
||||
Mat values_3 = Mat::zeros(16, options.descriptor_channels, CV_32FC1);
|
||||
|
||||
// Get the information from the keypoint
|
||||
ratio = (float)(1 << kpt.octave);
|
||||
@ -1395,7 +1395,7 @@ void MLDB_Full_Descriptor_Invoker::MLDB_Binary_Comparisons(float* values, unsign
|
||||
* @param kpt Input keypoint
|
||||
* @param desc Descriptor vector
|
||||
*/
|
||||
void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const {
|
||||
void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char *desc) const {
|
||||
|
||||
const int max_channels = 3;
|
||||
CV_Assert(options_->descriptor_channels <= max_channels);
|
||||
@ -1428,7 +1428,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
|
||||
* @param kpt Input keypoint
|
||||
* @param desc Descriptor vector
|
||||
*/
|
||||
void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const {
|
||||
void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char *desc) const {
|
||||
|
||||
float di = 0.f, dx = 0.f, dy = 0.f;
|
||||
float rx = 0.f, ry = 0.f;
|
||||
@ -1449,7 +1449,7 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi
|
||||
float si = sin(angle);
|
||||
|
||||
// Allocate memory for the matrix of values
|
||||
cv::Mat values = cv::Mat_<float>::zeros((4 + 9 + 16)*options.descriptor_channels, 1);
|
||||
Mat values = Mat_<float>::zeros((4 + 9 + 16)*options.descriptor_channels, 1);
|
||||
|
||||
// Sample everything, but only do the comparisons
|
||||
vector<int> steps(3);
|
||||
@ -1522,7 +1522,7 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi
|
||||
* @param kpt Input keypoint
|
||||
* @param desc Descriptor vector
|
||||
*/
|
||||
void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const {
|
||||
void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char *desc) const {
|
||||
|
||||
float di = 0.0f, dx = 0.0f, dy = 0.0f;
|
||||
float rx = 0.0f, ry = 0.0f;
|
||||
@ -1540,7 +1540,7 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(
|
||||
float xf = kpt.pt.x / ratio;
|
||||
|
||||
// Allocate memory for the matrix of values
|
||||
Mat values = cv::Mat_<float>::zeros((4 + 9 + 16)*options.descriptor_channels, 1);
|
||||
Mat values = Mat_<float>::zeros((4 + 9 + 16)*options.descriptor_channels, 1);
|
||||
|
||||
vector<int> steps(3);
|
||||
steps.at(0) = options.descriptor_pattern_size;
|
||||
@ -1614,7 +1614,7 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(
|
||||
* @note The function keeps the 18 bits (3-channels by 6 comparisons) of the
|
||||
* coarser grid, since it provides the most robust estimations
|
||||
*/
|
||||
void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int nbits,
|
||||
void generateDescriptorSubsample(Mat& sampleList, Mat& comparisons, int nbits,
|
||||
int pattern_size, int nchannels) {
|
||||
|
||||
int ssz = 0;
|
||||
@ -1718,4 +1718,3 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -11,10 +11,12 @@
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Includes
|
||||
#include "../precomp.hpp"
|
||||
#include "AKAZEConfig.h"
|
||||
#include "TEvolution.h"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
/* ************************************************************************* */
|
||||
// AKAZE Class Declaration
|
||||
class AKAZEFeatures {
|
||||
@ -22,7 +24,7 @@ class AKAZEFeatures {
|
||||
private:
|
||||
|
||||
AKAZEOptions options_; ///< Configuration options for AKAZE
|
||||
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
|
||||
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
|
||||
|
||||
/// FED parameters
|
||||
int ncycles_; ///< Number of cycles
|
||||
@ -59,4 +61,6 @@ public:
|
||||
void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons,
|
||||
int nbits, int pattern_size, int nchannels);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -20,14 +20,15 @@
|
||||
* @date Jan 21, 2012
|
||||
* @author Pablo F. Alcantarilla
|
||||
*/
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "KAZEFeatures.h"
|
||||
#include "utils.h"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// Namespaces
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::details::kaze;
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
@ -52,19 +53,20 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options)
|
||||
void KAZEFeatures::Allocate_Memory_Evolution(void) {
|
||||
|
||||
// Allocate the dimension of the matrices for the evolution
|
||||
for (int i = 0; i <= options_.omax - 1; i++) {
|
||||
for (int j = 0; j <= options_.nsublevels - 1; j++) {
|
||||
|
||||
for (int i = 0; i <= options_.omax - 1; i++)
|
||||
{
|
||||
for (int j = 0; j <= options_.nsublevels - 1; j++)
|
||||
{
|
||||
TEvolution aux;
|
||||
aux.Lx = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Ly = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lxx = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lxy = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lyy = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lt = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lsmooth = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Ldet = cv::Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.esigma = options_.soffset*pow((float)2.0f, (float)(j) / (float)(options_.nsublevels)+i);
|
||||
aux.Lx = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Ly = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lxx = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lxy = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lyy = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lt = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Lsmooth = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.Ldet = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
|
||||
aux.esigma = options_.soffset*pow((float)2.0f, (float)(j) / (float)(options_.nsublevels)+i);
|
||||
aux.etime = 0.5f*(aux.esigma*aux.esigma);
|
||||
aux.sigma_size = fRound(aux.esigma);
|
||||
aux.octave = i;
|
||||
@ -74,7 +76,8 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) {
|
||||
}
|
||||
|
||||
// Allocate memory for the FED number of cycles and time steps
|
||||
for (size_t i = 1; i < evolution_.size(); i++) {
|
||||
for (size_t i = 1; i < evolution_.size(); i++)
|
||||
{
|
||||
int naux = 0;
|
||||
vector<float> tau;
|
||||
float ttime = 0.0;
|
||||
@ -92,47 +95,43 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) {
|
||||
* @param img Input image for which the nonlinear scale space needs to be created
|
||||
* @return 0 if the nonlinear scale space was created successfully. -1 otherwise
|
||||
*/
|
||||
int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img)
|
||||
int KAZEFeatures::Create_Nonlinear_Scale_Space(const Mat &img)
|
||||
{
|
||||
CV_Assert(evolution_.size() > 0);
|
||||
|
||||
// Copy the original image to the first level of the evolution
|
||||
img.copyTo(evolution_[0].Lt);
|
||||
gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset);
|
||||
gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options_.sderivatives);
|
||||
gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset);
|
||||
gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options_.sderivatives);
|
||||
|
||||
// Firstly compute the kcontrast factor
|
||||
Compute_KContrast(evolution_[0].Lt, options_.kcontrast_percentille);
|
||||
|
||||
// Allocate memory for the flow and step images
|
||||
cv::Mat Lflow = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
cv::Mat Lstep = cv::Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
Mat Lflow = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
Mat Lstep = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
|
||||
|
||||
// Now generate the rest of evolution levels
|
||||
for (size_t i = 1; i < evolution_.size(); i++) {
|
||||
|
||||
for (size_t i = 1; i < evolution_.size(); i++)
|
||||
{
|
||||
evolution_[i - 1].Lt.copyTo(evolution_[i].Lt);
|
||||
gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options_.sderivatives);
|
||||
gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options_.sderivatives);
|
||||
|
||||
// Compute the Gaussian derivatives Lx and Ly
|
||||
Scharr(evolution_[i].Lsmooth, evolution_[i].Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT);
|
||||
Scharr(evolution_[i].Lsmooth, evolution_[i].Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT);
|
||||
|
||||
// Compute the conductivity equation
|
||||
if (options_.diffusivity == cv::DIFF_PM_G1) {
|
||||
pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
}
|
||||
else if (options_.diffusivity == cv::DIFF_PM_G2) {
|
||||
pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
}
|
||||
else if (options_.diffusivity == cv::DIFF_WEICKERT) {
|
||||
weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
}
|
||||
if (options_.diffusivity == KAZE::DIFF_PM_G1)
|
||||
pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
else if (options_.diffusivity == KAZE::DIFF_PM_G2)
|
||||
pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
else if (options_.diffusivity == KAZE::DIFF_WEICKERT)
|
||||
weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
|
||||
|
||||
// Perform FED n inner steps
|
||||
for (int j = 0; j < nsteps_[i - 1]; j++) {
|
||||
for (int j = 0; j < nsteps_[i - 1]; j++)
|
||||
nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -144,7 +143,7 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img)
|
||||
* @param img Input image
|
||||
* @param kpercentile Percentile of the gradient histogram
|
||||
*/
|
||||
void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile)
|
||||
void KAZEFeatures::Compute_KContrast(const Mat &img, const float &kpercentile)
|
||||
{
|
||||
options_.kcontrast = compute_k_percentile(img, kpercentile, options_.sderivatives, options_.kcontrast_bins, 0, 0);
|
||||
}
|
||||
@ -181,7 +180,7 @@ void KAZEFeatures::Compute_Detector_Response(void)
|
||||
* @brief This method selects interesting keypoints through the nonlinear scale space
|
||||
* @param kpts Vector of keypoints
|
||||
*/
|
||||
void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts)
|
||||
void KAZEFeatures::Feature_Detection(std::vector<KeyPoint>& kpts)
|
||||
{
|
||||
kpts.clear();
|
||||
Compute_Detector_Response();
|
||||
@ -190,14 +189,14 @@ void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts)
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
class MultiscaleDerivativesKAZEInvoker : public cv::ParallelLoopBody
|
||||
class MultiscaleDerivativesKAZEInvoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
explicit MultiscaleDerivativesKAZEInvoker(std::vector<TEvolution>& ev) : evolution_(&ev)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(const cv::Range& range) const
|
||||
void operator()(const Range& range) const
|
||||
{
|
||||
std::vector<TEvolution>& evolution = *evolution_;
|
||||
for (int i = range.start; i < range.end; i++)
|
||||
@ -226,74 +225,79 @@ private:
|
||||
*/
|
||||
void KAZEFeatures::Compute_Multiscale_Derivatives(void)
|
||||
{
|
||||
cv::parallel_for_(cv::Range(0, (int)evolution_.size()),
|
||||
parallel_for_(Range(0, (int)evolution_.size()),
|
||||
MultiscaleDerivativesKAZEInvoker(evolution_));
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
class FindExtremumKAZEInvoker : public cv::ParallelLoopBody
|
||||
class FindExtremumKAZEInvoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
explicit FindExtremumKAZEInvoker(std::vector<TEvolution>& ev, std::vector<std::vector<cv::KeyPoint> >& kpts_par,
|
||||
explicit FindExtremumKAZEInvoker(std::vector<TEvolution>& ev, std::vector<std::vector<KeyPoint> >& kpts_par,
|
||||
const KAZEOptions& options) : evolution_(&ev), kpts_par_(&kpts_par), options_(options)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(const cv::Range& range) const
|
||||
void operator()(const Range& range) const
|
||||
{
|
||||
std::vector<TEvolution>& evolution = *evolution_;
|
||||
std::vector<std::vector<cv::KeyPoint> >& kpts_par = *kpts_par_;
|
||||
std::vector<std::vector<KeyPoint> >& kpts_par = *kpts_par_;
|
||||
for (int i = range.start; i < range.end; i++)
|
||||
{
|
||||
float value = 0.0;
|
||||
bool is_extremum = false;
|
||||
|
||||
for (int ix = 1; ix < options_.img_height - 1; ix++) {
|
||||
for (int jx = 1; jx < options_.img_width - 1; jx++) {
|
||||
for (int ix = 1; ix < options_.img_height - 1; ix++)
|
||||
{
|
||||
for (int jx = 1; jx < options_.img_width - 1; jx++)
|
||||
{
|
||||
is_extremum = false;
|
||||
value = *(evolution[i].Ldet.ptr<float>(ix)+jx);
|
||||
|
||||
is_extremum = false;
|
||||
value = *(evolution[i].Ldet.ptr<float>(ix)+jx);
|
||||
|
||||
// Filter the points with the detector threshold
|
||||
if (value > options_.dthreshold) {
|
||||
if (value >= *(evolution[i].Ldet.ptr<float>(ix)+jx - 1)) {
|
||||
// First check on the same scale
|
||||
if (check_maximum_neighbourhood(evolution[i].Ldet, 1, value, ix, jx, 1)) {
|
||||
// Now check on the lower scale
|
||||
if (check_maximum_neighbourhood(evolution[i - 1].Ldet, 1, value, ix, jx, 0)) {
|
||||
// Now check on the upper scale
|
||||
if (check_maximum_neighbourhood(evolution[i + 1].Ldet, 1, value, ix, jx, 0)) {
|
||||
is_extremum = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add the point of interest!!
|
||||
if (is_extremum == true) {
|
||||
cv::KeyPoint point;
|
||||
point.pt.x = (float)jx;
|
||||
point.pt.y = (float)ix;
|
||||
point.response = fabs(value);
|
||||
point.size = evolution[i].esigma;
|
||||
point.octave = (int)evolution[i].octave;
|
||||
point.class_id = i;
|
||||
|
||||
// We use the angle field for the sublevel value
|
||||
// Then, we will replace this angle field with the main orientation
|
||||
point.angle = static_cast<float>(evolution[i].sublevel);
|
||||
kpts_par[i - 1].push_back(point);
|
||||
// Filter the points with the detector threshold
|
||||
if (value > options_.dthreshold)
|
||||
{
|
||||
if (value >= *(evolution[i].Ldet.ptr<float>(ix)+jx - 1))
|
||||
{
|
||||
// First check on the same scale
|
||||
if (check_maximum_neighbourhood(evolution[i].Ldet, 1, value, ix, jx, 1))
|
||||
{
|
||||
// Now check on the lower scale
|
||||
if (check_maximum_neighbourhood(evolution[i - 1].Ldet, 1, value, ix, jx, 0))
|
||||
{
|
||||
// Now check on the upper scale
|
||||
if (check_maximum_neighbourhood(evolution[i + 1].Ldet, 1, value, ix, jx, 0))
|
||||
is_extremum = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add the point of interest!!
|
||||
if (is_extremum)
|
||||
{
|
||||
KeyPoint point;
|
||||
point.pt.x = (float)jx;
|
||||
point.pt.y = (float)ix;
|
||||
point.response = fabs(value);
|
||||
point.size = evolution[i].esigma;
|
||||
point.octave = (int)evolution[i].octave;
|
||||
point.class_id = i;
|
||||
|
||||
// We use the angle field for the sublevel value
|
||||
// Then, we will replace this angle field with the main orientation
|
||||
point.angle = static_cast<float>(evolution[i].sublevel);
|
||||
kpts_par[i - 1].push_back(point);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<TEvolution>* evolution_;
|
||||
std::vector<std::vector<cv::KeyPoint> >* kpts_par_;
|
||||
std::vector<std::vector<KeyPoint> >* kpts_par_;
|
||||
KAZEOptions options_;
|
||||
};
|
||||
|
||||
@ -304,7 +308,7 @@ private:
|
||||
* @param kpts Vector of keypoints
|
||||
* @note We compute features for each of the nonlinear scale space level in a different processing thread
|
||||
*/
|
||||
void KAZEFeatures::Determinant_Hessian(std::vector<cv::KeyPoint>& kpts)
|
||||
void KAZEFeatures::Determinant_Hessian(std::vector<KeyPoint>& kpts)
|
||||
{
|
||||
int level = 0;
|
||||
float dist = 0.0, smax = 3.0;
|
||||
@ -325,12 +329,14 @@ void KAZEFeatures::Determinant_Hessian(std::vector<cv::KeyPoint>& kpts)
|
||||
kpts_par_.push_back(aux);
|
||||
}
|
||||
|
||||
cv::parallel_for_(cv::Range(1, (int)evolution_.size()-1),
|
||||
FindExtremumKAZEInvoker(evolution_, kpts_par_, options_));
|
||||
parallel_for_(Range(1, (int)evolution_.size()-1),
|
||||
FindExtremumKAZEInvoker(evolution_, kpts_par_, options_));
|
||||
|
||||
// Now fill the vector of keypoints!!!
|
||||
for (int i = 0; i < (int)kpts_par_.size(); i++) {
|
||||
for (int j = 0; j < (int)kpts_par_[i].size(); j++) {
|
||||
for (int i = 0; i < (int)kpts_par_.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < (int)kpts_par_[i].size(); j++)
|
||||
{
|
||||
level = i + 1;
|
||||
is_extremum = true;
|
||||
is_repeated = false;
|
||||
@ -388,7 +394,7 @@ void KAZEFeatures::Determinant_Hessian(std::vector<cv::KeyPoint>& kpts)
|
||||
* @brief This method performs subpixel refinement of the detected keypoints
|
||||
* @param kpts Vector of detected keypoints
|
||||
*/
|
||||
void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
|
||||
void KAZEFeatures::Do_Subpixel_Refinement(std::vector<KeyPoint> &kpts) {
|
||||
|
||||
int step = 1;
|
||||
int x = 0, y = 0;
|
||||
@ -482,10 +488,10 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
class KAZE_Descriptor_Invoker : public cv::ParallelLoopBody
|
||||
class KAZE_Descriptor_Invoker : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
KAZE_Descriptor_Invoker(std::vector<cv::KeyPoint> &kpts, cv::Mat &desc, std::vector<TEvolution>& evolution, const KAZEOptions& options)
|
||||
KAZE_Descriptor_Invoker(std::vector<KeyPoint> &kpts, Mat &desc, std::vector<TEvolution>& evolution, const KAZEOptions& options)
|
||||
: kpts_(&kpts)
|
||||
, desc_(&desc)
|
||||
, evolution_(&evolution)
|
||||
@ -497,10 +503,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void operator() (const cv::Range& range) const
|
||||
void operator() (const Range& range) const
|
||||
{
|
||||
std::vector<cv::KeyPoint> &kpts = *kpts_;
|
||||
cv::Mat &desc = *desc_;
|
||||
std::vector<KeyPoint> &kpts = *kpts_;
|
||||
Mat &desc = *desc_;
|
||||
std::vector<TEvolution> &evolution = *evolution_;
|
||||
|
||||
for (int i = range.start; i < range.end; i++)
|
||||
@ -526,13 +532,13 @@ public:
|
||||
}
|
||||
}
|
||||
private:
|
||||
void Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
|
||||
void Get_KAZE_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
|
||||
void Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const;
|
||||
void Get_KAZE_Descriptor_128(const cv::KeyPoint& kpt, float *desc) const;
|
||||
void Get_KAZE_Upright_Descriptor_64(const KeyPoint& kpt, float* desc) const;
|
||||
void Get_KAZE_Descriptor_64(const KeyPoint& kpt, float* desc) const;
|
||||
void Get_KAZE_Upright_Descriptor_128(const KeyPoint& kpt, float* desc) const;
|
||||
void Get_KAZE_Descriptor_128(const KeyPoint& kpt, float *desc) const;
|
||||
|
||||
std::vector<cv::KeyPoint> * kpts_;
|
||||
cv::Mat * desc_;
|
||||
std::vector<KeyPoint> * kpts_;
|
||||
Mat * desc_;
|
||||
std::vector<TEvolution> * evolution_;
|
||||
KAZEOptions options_;
|
||||
};
|
||||
@ -543,7 +549,7 @@ private:
|
||||
* @param kpts Vector of keypoints
|
||||
* @param desc Matrix with the feature descriptors
|
||||
*/
|
||||
void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat &desc)
|
||||
void KAZEFeatures::Feature_Description(std::vector<KeyPoint> &kpts, Mat &desc)
|
||||
{
|
||||
for(size_t i = 0; i < kpts.size(); i++)
|
||||
{
|
||||
@ -558,7 +564,7 @@ void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat
|
||||
desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1);
|
||||
}
|
||||
|
||||
cv::parallel_for_(cv::Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||
parallel_for_(Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options_));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
@ -568,7 +574,7 @@ void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat
|
||||
* @note The orientation is computed using a similar approach as described in the
|
||||
* original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006
|
||||
*/
|
||||
void KAZEFeatures::Compute_Main_Orientation(cv::KeyPoint &kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options)
|
||||
void KAZEFeatures::Compute_Main_Orientation(KeyPoint &kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options)
|
||||
{
|
||||
int ix = 0, iy = 0, idx = 0, s = 0, level = 0;
|
||||
float xf = 0.0, yf = 0.0, gweight = 0.0;
|
||||
@ -647,7 +653,7 @@ void KAZEFeatures::Compute_Main_Orientation(cv::KeyPoint &kpt, const std::vector
|
||||
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
|
||||
* ECCV 2008
|
||||
*/
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const KeyPoint &kpt, float *desc) const
|
||||
{
|
||||
float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
|
||||
float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
|
||||
@ -775,7 +781,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint
|
||||
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
|
||||
* ECCV 2008
|
||||
*/
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const KeyPoint &kpt, float *desc) const
|
||||
{
|
||||
float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
|
||||
float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
|
||||
@ -904,7 +910,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, fl
|
||||
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
|
||||
* ECCV 2008
|
||||
*/
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const KeyPoint &kpt, float *desc) const
|
||||
{
|
||||
float gauss_s1 = 0.0, gauss_s2 = 0.0;
|
||||
float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
|
||||
@ -1056,7 +1062,7 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint
|
||||
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
|
||||
* ECCV 2008
|
||||
*/
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const
|
||||
void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const KeyPoint &kpt, float *desc) const
|
||||
{
|
||||
float gauss_s1 = 0.0, gauss_s2 = 0.0;
|
||||
float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
|
||||
@ -1202,3 +1208,5 @@ void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, f
|
||||
desc[i] /= len;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ namespace cv
|
||||
|
||||
/* ************************************************************************* */
|
||||
// KAZE Class Declaration
|
||||
class KAZEFeatures {
|
||||
|
||||
class KAZEFeatures
|
||||
{
|
||||
private:
|
||||
|
||||
/// Parameters of the Nonlinear diffusion class
|
||||
|
@ -8,10 +8,13 @@
|
||||
#ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__
|
||||
#define __OPENCV_FEATURES_2D_TEVOLUTION_H__
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// KAZE/A-KAZE nonlinear diffusion filtering evolution
|
||||
struct TEvolution {
|
||||
|
||||
struct TEvolution
|
||||
{
|
||||
TEvolution() {
|
||||
etime = 0.0f;
|
||||
esigma = 0.0f;
|
||||
@ -20,11 +23,11 @@ struct TEvolution {
|
||||
sigma_size = 0;
|
||||
}
|
||||
|
||||
cv::Mat Lx, Ly; ///< First order spatial derivatives
|
||||
cv::Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
|
||||
cv::Mat Lt; ///< Evolution image
|
||||
cv::Mat Lsmooth; ///< Smoothed image
|
||||
cv::Mat Ldet; ///< Detector response
|
||||
Mat Lx, Ly; ///< First order spatial derivatives
|
||||
Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
|
||||
Mat Lt; ///< Evolution image
|
||||
Mat Lsmooth; ///< Smoothed image
|
||||
Mat Ldet; ///< Detector response
|
||||
float etime; ///< Evolution time
|
||||
float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2
|
||||
int octave; ///< Image octave
|
||||
@ -32,4 +35,6 @@ struct TEvolution {
|
||||
int sigma_size; ///< Integer esigma. For computing the feature detector responses
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -22,502 +22,500 @@
|
||||
* @author Pablo F. Alcantarilla
|
||||
*/
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "nldiffusion_functions.h"
|
||||
#include <iostream>
|
||||
|
||||
// Namespaces
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
namespace cv {
|
||||
namespace details {
|
||||
namespace kaze {
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function smoothes an image with a Gaussian kernel
|
||||
* @param src Input image
|
||||
* @param dst Output image
|
||||
* @param ksize_x Kernel size in X-direction (horizontal)
|
||||
* @param ksize_y Kernel size in Y-direction (vertical)
|
||||
* @param sigma Kernel standard deviation
|
||||
*/
|
||||
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) {
|
||||
|
||||
int ksize_x_ = 0, ksize_y_ = 0;
|
||||
|
||||
// Compute an appropriate kernel size according to the specified sigma
|
||||
if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) {
|
||||
ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f)));
|
||||
ksize_y_ = ksize_x_;
|
||||
}
|
||||
|
||||
// The kernel size must be and odd number
|
||||
if ((ksize_x_ % 2) == 0) {
|
||||
ksize_x_ += 1;
|
||||
}
|
||||
|
||||
if ((ksize_y_ % 2) == 0) {
|
||||
ksize_y_ += 1;
|
||||
}
|
||||
|
||||
// Perform the Gaussian Smoothing with border replication
|
||||
GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes image derivatives with Scharr kernel
|
||||
* @param src Input image
|
||||
* @param dst Output image
|
||||
* @param xorder Derivative order in X-direction (horizontal)
|
||||
* @param yorder Derivative order in Y-direction (vertical)
|
||||
* @note Scharr operator approximates better rotation invariance than
|
||||
* other stencils such as Sobel. See Weickert and Scharr,
|
||||
* A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance,
|
||||
* Journal of Visual Communication and Image Representation 2002
|
||||
*/
|
||||
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) {
|
||||
Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes the Perona and Malik conductivity coefficient g1
|
||||
* g1 = exp(-|dL|^2/k^2)
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
*/
|
||||
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
float inv_k = 1.0f / (k*k);
|
||||
for (int y = 0; y < sz.height; y++) {
|
||||
|
||||
const float* Lx_row = Lx.ptr<float>(y);
|
||||
const float* Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < sz.width; x++) {
|
||||
dst_row[x] = (-inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]));
|
||||
}
|
||||
}
|
||||
|
||||
exp(dst, dst);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes the Perona and Malik conductivity coefficient g2
|
||||
* g2 = 1 / (1 + dL^2 / k^2)
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
*/
|
||||
void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
dst.create(sz, Lx.type());
|
||||
float k2inv = 1.0f / (k * k);
|
||||
|
||||
for(int y = 0; y < sz.height; y++) {
|
||||
const float *Lx_row = Lx.ptr<float>(y);
|
||||
const float *Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
for(int x = 0; x < sz.width; x++) {
|
||||
dst_row[x] = 1.0f / (1.0f + ((Lx_row[x] * Lx_row[x] + Ly_row[x] * Ly_row[x]) * k2inv));
|
||||
}
|
||||
}
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes Weickert conductivity coefficient gw
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
* @note For more information check the following paper: J. Weickert
|
||||
* Applications of nonlinear diffusion in image processing and computer vision,
|
||||
* Proceedings of Algorithmy 2000
|
||||
*/
|
||||
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
float inv_k = 1.0f / (k*k);
|
||||
for (int y = 0; y < sz.height; y++) {
|
||||
|
||||
const float* Lx_row = Lx.ptr<float>(y);
|
||||
const float* Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < sz.width; x++) {
|
||||
float dL = inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]);
|
||||
dst_row[x] = -3.315f/(dL*dL*dL*dL);
|
||||
}
|
||||
}
|
||||
|
||||
exp(dst, dst);
|
||||
dst = 1.0 - dst;
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes Charbonnier conductivity coefficient gc
|
||||
* gc = 1 / sqrt(1 + dL^2 / k^2)
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
* @note For more information check the following paper: J. Weickert
|
||||
* Applications of nonlinear diffusion in image processing and computer vision,
|
||||
* Proceedings of Algorithmy 2000
|
||||
*/
|
||||
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
float inv_k = 1.0f / (k*k);
|
||||
for (int y = 0; y < sz.height; y++) {
|
||||
|
||||
const float* Lx_row = Lx.ptr<float>(y);
|
||||
const float* Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < sz.width; x++) {
|
||||
float den = sqrt(1.0f+inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]));
|
||||
dst_row[x] = 1.0f / den;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes a good empirical value for the k contrast factor
|
||||
* given an input image, the percentile (0-1), the gradient scale and the number of
|
||||
* bins in the histogram
|
||||
* @param img Input image
|
||||
* @param perc Percentile of the image gradient histogram (0-1)
|
||||
* @param gscale Scale for computing the image gradient histogram
|
||||
* @param nbins Number of histogram bins
|
||||
* @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel
|
||||
* @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel
|
||||
* @return k contrast factor
|
||||
*/
|
||||
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) {
|
||||
|
||||
int nbin = 0, nelements = 0, nthreshold = 0, k = 0;
|
||||
float kperc = 0.0, modg = 0.0;
|
||||
float npoints = 0.0;
|
||||
float hmax = 0.0;
|
||||
|
||||
// Create the array for the histogram
|
||||
std::vector<int> hist(nbins, 0);
|
||||
|
||||
// Create the matrices
|
||||
Mat gaussian = Mat::zeros(img.rows, img.cols, CV_32F);
|
||||
Mat Lx = Mat::zeros(img.rows, img.cols, CV_32F);
|
||||
Mat Ly = Mat::zeros(img.rows, img.cols, CV_32F);
|
||||
|
||||
// Perform the Gaussian convolution
|
||||
gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale);
|
||||
|
||||
// Compute the Gaussian derivatives Lx and Ly
|
||||
Scharr(gaussian, Lx, CV_32F, 1, 0, 1, 0, cv::BORDER_DEFAULT);
|
||||
Scharr(gaussian, Ly, CV_32F, 0, 1, 1, 0, cv::BORDER_DEFAULT);
|
||||
|
||||
// Skip the borders for computing the histogram
|
||||
for (int i = 1; i < gaussian.rows - 1; i++) {
|
||||
const float *lx = Lx.ptr<float>(i);
|
||||
const float *ly = Ly.ptr<float>(i);
|
||||
for (int j = 1; j < gaussian.cols - 1; j++) {
|
||||
modg = lx[j]*lx[j] + ly[j]*ly[j];
|
||||
|
||||
// Get the maximum
|
||||
if (modg > hmax) {
|
||||
hmax = modg;
|
||||
}
|
||||
}
|
||||
}
|
||||
hmax = sqrt(hmax);
|
||||
// Skip the borders for computing the histogram
|
||||
for (int i = 1; i < gaussian.rows - 1; i++) {
|
||||
const float *lx = Lx.ptr<float>(i);
|
||||
const float *ly = Ly.ptr<float>(i);
|
||||
for (int j = 1; j < gaussian.cols - 1; j++) {
|
||||
modg = lx[j]*lx[j] + ly[j]*ly[j];
|
||||
|
||||
// Find the correspondent bin
|
||||
if (modg != 0.0) {
|
||||
nbin = (int)floor(nbins*(sqrt(modg) / hmax));
|
||||
|
||||
if (nbin == nbins) {
|
||||
nbin--;
|
||||
}
|
||||
|
||||
hist[nbin]++;
|
||||
npoints++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now find the perc of the histogram percentile
|
||||
nthreshold = (int)(npoints*perc);
|
||||
|
||||
for (k = 0; nelements < nthreshold && k < nbins; k++) {
|
||||
nelements = nelements + hist[k];
|
||||
}
|
||||
|
||||
if (nelements < nthreshold) {
|
||||
kperc = 0.03f;
|
||||
}
|
||||
else {
|
||||
kperc = hmax*((float)(k) / (float)nbins);
|
||||
}
|
||||
|
||||
return kperc;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes Scharr image derivatives
|
||||
* @param src Input image
|
||||
* @param dst Output image
|
||||
* @param xorder Derivative order in X-direction (horizontal)
|
||||
* @param yorder Derivative order in Y-direction (vertical)
|
||||
* @param scale Scale factor for the derivative size
|
||||
*/
|
||||
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) {
|
||||
Mat kx, ky;
|
||||
compute_derivative_kernels(kx, ky, xorder, yorder, scale);
|
||||
sepFilter2D(src, dst, CV_32F, kx, ky);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief Compute derivative kernels for sizes different than 3
|
||||
* @param _kx Horizontal kernel ues
|
||||
* @param _ky Vertical kernel values
|
||||
* @param dx Derivative order in X-direction (horizontal)
|
||||
* @param dy Derivative order in Y-direction (vertical)
|
||||
* @param scale_ Scale factor or derivative size
|
||||
*/
|
||||
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) {
|
||||
|
||||
int ksize = 3 + 2 * (scale - 1);
|
||||
|
||||
// The standard Scharr kernel
|
||||
if (scale == 1) {
|
||||
getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F);
|
||||
return;
|
||||
}
|
||||
|
||||
_kx.create(ksize, 1, CV_32F, -1, true);
|
||||
_ky.create(ksize, 1, CV_32F, -1, true);
|
||||
Mat kx = _kx.getMat();
|
||||
Mat ky = _ky.getMat();
|
||||
|
||||
float w = 10.0f / 3.0f;
|
||||
float norm = 1.0f / (2.0f*scale*(w + 2.0f));
|
||||
|
||||
for (int k = 0; k < 2; k++) {
|
||||
Mat* kernel = k == 0 ? &kx : &ky;
|
||||
int order = k == 0 ? dx : dy;
|
||||
std::vector<float> kerI(ksize, 0.0f);
|
||||
|
||||
if (order == 0) {
|
||||
kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm;
|
||||
}
|
||||
else if (order == 1) {
|
||||
kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1;
|
||||
}
|
||||
|
||||
Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]);
|
||||
temp.copyTo(*kernel);
|
||||
}
|
||||
}
|
||||
|
||||
class Nld_Step_Scalar_Invoker : public cv::ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
Nld_Step_Scalar_Invoker(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float _stepsize)
|
||||
: _Ld(&Ld)
|
||||
, _c(&c)
|
||||
, _Lstep(&Lstep)
|
||||
, stepsize(_stepsize)
|
||||
{
|
||||
}
|
||||
|
||||
virtual ~Nld_Step_Scalar_Invoker()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void operator()(const cv::Range& range) const
|
||||
{
|
||||
cv::Mat& Ld = *_Ld;
|
||||
const cv::Mat& c = *_c;
|
||||
cv::Mat& Lstep = *_Lstep;
|
||||
|
||||
for (int i = range.start; i < range.end; i++)
|
||||
{
|
||||
const float *c_prev = c.ptr<float>(i - 1);
|
||||
const float *c_curr = c.ptr<float>(i);
|
||||
const float *c_next = c.ptr<float>(i + 1);
|
||||
const float *ld_prev = Ld.ptr<float>(i - 1);
|
||||
const float *ld_curr = Ld.ptr<float>(i);
|
||||
const float *ld_next = Ld.ptr<float>(i + 1);
|
||||
|
||||
float *dst = Lstep.ptr<float>(i);
|
||||
|
||||
for (int j = 1; j < Lstep.cols - 1; j++)
|
||||
{
|
||||
float xpos = (c_curr[j] + c_curr[j+1])*(ld_curr[j+1] - ld_curr[j]);
|
||||
float xneg = (c_curr[j-1] + c_curr[j]) *(ld_curr[j] - ld_curr[j-1]);
|
||||
float ypos = (c_curr[j] + c_next[j]) *(ld_next[j] - ld_curr[j]);
|
||||
float yneg = (c_prev[j] + c_curr[j]) *(ld_curr[j] - ld_prev[j]);
|
||||
dst[j] = 0.5f*stepsize*(xpos - xneg + ypos - yneg);
|
||||
}
|
||||
}
|
||||
}
|
||||
private:
|
||||
cv::Mat * _Ld;
|
||||
const cv::Mat * _c;
|
||||
cv::Mat * _Lstep;
|
||||
float stepsize;
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function performs a scalar non-linear diffusion step
|
||||
* @param Ld2 Output image in the evolution
|
||||
* @param c Conductivity image
|
||||
* @param Lstep Previous image in the evolution
|
||||
* @param stepsize The step size in time units
|
||||
* @note Forward Euler Scheme 3x3 stencil
|
||||
* The function c is a scalar value that depends on the gradient norm
|
||||
* dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy
|
||||
*/
|
||||
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) {
|
||||
|
||||
cv::parallel_for_(cv::Range(1, Lstep.rows - 1), Nld_Step_Scalar_Invoker(Ld, c, Lstep, stepsize), (double)Ld.total()/(1 << 16));
|
||||
|
||||
float xneg, xpos, yneg, ypos;
|
||||
float* dst = Lstep.ptr<float>(0);
|
||||
const float* cprv = NULL;
|
||||
const float* ccur = c.ptr<float>(0);
|
||||
const float* cnxt = c.ptr<float>(1);
|
||||
const float* ldprv = NULL;
|
||||
const float* ldcur = Ld.ptr<float>(0);
|
||||
const float* ldnxt = Ld.ptr<float>(1);
|
||||
for (int j = 1; j < Lstep.cols - 1; j++) {
|
||||
xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]);
|
||||
xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]);
|
||||
ypos = (ccur[j] + cnxt[j]) * (ldnxt[j] - ldcur[j]);
|
||||
dst[j] = 0.5f*stepsize*(xpos - xneg + ypos);
|
||||
}
|
||||
|
||||
dst = Lstep.ptr<float>(Lstep.rows - 1);
|
||||
ccur = c.ptr<float>(Lstep.rows - 1);
|
||||
cprv = c.ptr<float>(Lstep.rows - 2);
|
||||
ldcur = Ld.ptr<float>(Lstep.rows - 1);
|
||||
ldprv = Ld.ptr<float>(Lstep.rows - 2);
|
||||
|
||||
for (int j = 1; j < Lstep.cols - 1; j++) {
|
||||
xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]);
|
||||
xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]);
|
||||
yneg = (cprv[j] + ccur[j]) * (ldcur[j] - ldprv[j]);
|
||||
dst[j] = 0.5f*stepsize*(xpos - xneg - yneg);
|
||||
}
|
||||
|
||||
ccur = c.ptr<float>(1);
|
||||
ldcur = Ld.ptr<float>(1);
|
||||
cprv = c.ptr<float>(0);
|
||||
ldprv = Ld.ptr<float>(0);
|
||||
|
||||
int r0 = Lstep.cols - 1;
|
||||
int r1 = Lstep.cols - 2;
|
||||
|
||||
for (int i = 1; i < Lstep.rows - 1; i++) {
|
||||
cnxt = c.ptr<float>(i + 1);
|
||||
ldnxt = Ld.ptr<float>(i + 1);
|
||||
dst = Lstep.ptr<float>(i);
|
||||
|
||||
xpos = (ccur[0] + ccur[1]) * (ldcur[1] - ldcur[0]);
|
||||
ypos = (ccur[0] + cnxt[0]) * (ldnxt[0] - ldcur[0]);
|
||||
yneg = (cprv[0] + ccur[0]) * (ldcur[0] - ldprv[0]);
|
||||
dst[0] = 0.5f*stepsize*(xpos + ypos - yneg);
|
||||
|
||||
xneg = (ccur[r1] + ccur[r0]) * (ldcur[r0] - ldcur[r1]);
|
||||
ypos = (ccur[r0] + cnxt[r0]) * (ldnxt[r0] - ldcur[r0]);
|
||||
yneg = (cprv[r0] + ccur[r0]) * (ldcur[r0] - ldprv[r0]);
|
||||
dst[r0] = 0.5f*stepsize*(-xneg + ypos - yneg);
|
||||
|
||||
cprv = ccur;
|
||||
ccur = cnxt;
|
||||
ldprv = ldcur;
|
||||
ldcur = ldnxt;
|
||||
}
|
||||
Ld += Lstep;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function downsamples the input image using OpenCV resize
|
||||
* @param img Input image to be downsampled
|
||||
* @param dst Output image with half of the resolution of the input image
|
||||
*/
|
||||
void halfsample_image(const cv::Mat& src, cv::Mat& dst) {
|
||||
|
||||
// Make sure the destination image is of the right size
|
||||
CV_Assert(src.cols / 2 == dst.cols);
|
||||
CV_Assert(src.rows / 2 == dst.rows);
|
||||
resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function checks if a given pixel is a maximum in a local neighbourhood
|
||||
* @param img Input image where we will perform the maximum search
|
||||
* @param dsize Half size of the neighbourhood
|
||||
* @param value Response value at (x,y) position
|
||||
* @param row Image row coordinate
|
||||
* @param col Image column coordinate
|
||||
* @param same_img Flag to indicate if the image value at (x,y) is in the input image
|
||||
* @return 1->is maximum, 0->otherwise
|
||||
*/
|
||||
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img) {
|
||||
|
||||
bool response = true;
|
||||
|
||||
for (int i = row - dsize; i <= row + dsize; i++) {
|
||||
for (int j = col - dsize; j <= col + dsize; j++) {
|
||||
if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) {
|
||||
if (same_img == true) {
|
||||
if (i != row || j != col) {
|
||||
if ((*(img.ptr<float>(i)+j)) > value) {
|
||||
response = false;
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((*(img.ptr<float>(i)+j)) > value) {
|
||||
response = false;
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
namespace cv
|
||||
{
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function smoothes an image with a Gaussian kernel
|
||||
* @param src Input image
|
||||
* @param dst Output image
|
||||
* @param ksize_x Kernel size in X-direction (horizontal)
|
||||
* @param ksize_y Kernel size in Y-direction (vertical)
|
||||
* @param sigma Kernel standard deviation
|
||||
*/
|
||||
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) {
|
||||
|
||||
int ksize_x_ = 0, ksize_y_ = 0;
|
||||
|
||||
// Compute an appropriate kernel size according to the specified sigma
|
||||
if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) {
|
||||
ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f)));
|
||||
ksize_y_ = ksize_x_;
|
||||
}
|
||||
|
||||
// The kernel size must be and odd number
|
||||
if ((ksize_x_ % 2) == 0) {
|
||||
ksize_x_ += 1;
|
||||
}
|
||||
|
||||
if ((ksize_y_ % 2) == 0) {
|
||||
ksize_y_ += 1;
|
||||
}
|
||||
|
||||
// Perform the Gaussian Smoothing with border replication
|
||||
GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes image derivatives with Scharr kernel
|
||||
* @param src Input image
|
||||
* @param dst Output image
|
||||
* @param xorder Derivative order in X-direction (horizontal)
|
||||
* @param yorder Derivative order in Y-direction (vertical)
|
||||
* @note Scharr operator approximates better rotation invariance than
|
||||
* other stencils such as Sobel. See Weickert and Scharr,
|
||||
* A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance,
|
||||
* Journal of Visual Communication and Image Representation 2002
|
||||
*/
|
||||
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) {
|
||||
Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes the Perona and Malik conductivity coefficient g1
|
||||
* g1 = exp(-|dL|^2/k^2)
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
*/
|
||||
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
float inv_k = 1.0f / (k*k);
|
||||
for (int y = 0; y < sz.height; y++) {
|
||||
|
||||
const float* Lx_row = Lx.ptr<float>(y);
|
||||
const float* Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < sz.width; x++) {
|
||||
dst_row[x] = (-inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]));
|
||||
}
|
||||
}
|
||||
|
||||
exp(dst, dst);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes the Perona and Malik conductivity coefficient g2
|
||||
* g2 = 1 / (1 + dL^2 / k^2)
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
*/
|
||||
void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
dst.create(sz, Lx.type());
|
||||
float k2inv = 1.0f / (k * k);
|
||||
|
||||
for(int y = 0; y < sz.height; y++) {
|
||||
const float *Lx_row = Lx.ptr<float>(y);
|
||||
const float *Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
for(int x = 0; x < sz.width; x++) {
|
||||
dst_row[x] = 1.0f / (1.0f + ((Lx_row[x] * Lx_row[x] + Ly_row[x] * Ly_row[x]) * k2inv));
|
||||
}
|
||||
}
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes Weickert conductivity coefficient gw
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
* @note For more information check the following paper: J. Weickert
|
||||
* Applications of nonlinear diffusion in image processing and computer vision,
|
||||
* Proceedings of Algorithmy 2000
|
||||
*/
|
||||
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
float inv_k = 1.0f / (k*k);
|
||||
for (int y = 0; y < sz.height; y++) {
|
||||
|
||||
const float* Lx_row = Lx.ptr<float>(y);
|
||||
const float* Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < sz.width; x++) {
|
||||
float dL = inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]);
|
||||
dst_row[x] = -3.315f/(dL*dL*dL*dL);
|
||||
}
|
||||
}
|
||||
|
||||
exp(dst, dst);
|
||||
dst = 1.0 - dst;
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes Charbonnier conductivity coefficient gc
|
||||
* gc = 1 / sqrt(1 + dL^2 / k^2)
|
||||
* @param Lx First order image derivative in X-direction (horizontal)
|
||||
* @param Ly First order image derivative in Y-direction (vertical)
|
||||
* @param dst Output image
|
||||
* @param k Contrast factor parameter
|
||||
* @note For more information check the following paper: J. Weickert
|
||||
* Applications of nonlinear diffusion in image processing and computer vision,
|
||||
* Proceedings of Algorithmy 2000
|
||||
*/
|
||||
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
|
||||
|
||||
Size sz = Lx.size();
|
||||
float inv_k = 1.0f / (k*k);
|
||||
for (int y = 0; y < sz.height; y++) {
|
||||
|
||||
const float* Lx_row = Lx.ptr<float>(y);
|
||||
const float* Ly_row = Ly.ptr<float>(y);
|
||||
float* dst_row = dst.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < sz.width; x++) {
|
||||
float den = sqrt(1.0f+inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]));
|
||||
dst_row[x] = 1.0f / den;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes a good empirical value for the k contrast factor
|
||||
* given an input image, the percentile (0-1), the gradient scale and the number of
|
||||
* bins in the histogram
|
||||
* @param img Input image
|
||||
* @param perc Percentile of the image gradient histogram (0-1)
|
||||
* @param gscale Scale for computing the image gradient histogram
|
||||
* @param nbins Number of histogram bins
|
||||
* @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel
|
||||
* @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel
|
||||
* @return k contrast factor
|
||||
*/
|
||||
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) {
|
||||
|
||||
int nbin = 0, nelements = 0, nthreshold = 0, k = 0;
|
||||
float kperc = 0.0, modg = 0.0;
|
||||
float npoints = 0.0;
|
||||
float hmax = 0.0;
|
||||
|
||||
// Create the array for the histogram
|
||||
std::vector<int> hist(nbins, 0);
|
||||
|
||||
// Create the matrices
|
||||
Mat gaussian = Mat::zeros(img.rows, img.cols, CV_32F);
|
||||
Mat Lx = Mat::zeros(img.rows, img.cols, CV_32F);
|
||||
Mat Ly = Mat::zeros(img.rows, img.cols, CV_32F);
|
||||
|
||||
// Perform the Gaussian convolution
|
||||
gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale);
|
||||
|
||||
// Compute the Gaussian derivatives Lx and Ly
|
||||
Scharr(gaussian, Lx, CV_32F, 1, 0, 1, 0, cv::BORDER_DEFAULT);
|
||||
Scharr(gaussian, Ly, CV_32F, 0, 1, 1, 0, cv::BORDER_DEFAULT);
|
||||
|
||||
// Skip the borders for computing the histogram
|
||||
for (int i = 1; i < gaussian.rows - 1; i++) {
|
||||
const float *lx = Lx.ptr<float>(i);
|
||||
const float *ly = Ly.ptr<float>(i);
|
||||
for (int j = 1; j < gaussian.cols - 1; j++) {
|
||||
modg = lx[j]*lx[j] + ly[j]*ly[j];
|
||||
|
||||
// Get the maximum
|
||||
if (modg > hmax) {
|
||||
hmax = modg;
|
||||
}
|
||||
}
|
||||
}
|
||||
hmax = sqrt(hmax);
|
||||
// Skip the borders for computing the histogram
|
||||
for (int i = 1; i < gaussian.rows - 1; i++) {
|
||||
const float *lx = Lx.ptr<float>(i);
|
||||
const float *ly = Ly.ptr<float>(i);
|
||||
for (int j = 1; j < gaussian.cols - 1; j++) {
|
||||
modg = lx[j]*lx[j] + ly[j]*ly[j];
|
||||
|
||||
// Find the correspondent bin
|
||||
if (modg != 0.0) {
|
||||
nbin = (int)floor(nbins*(sqrt(modg) / hmax));
|
||||
|
||||
if (nbin == nbins) {
|
||||
nbin--;
|
||||
}
|
||||
|
||||
hist[nbin]++;
|
||||
npoints++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now find the perc of the histogram percentile
|
||||
nthreshold = (int)(npoints*perc);
|
||||
|
||||
for (k = 0; nelements < nthreshold && k < nbins; k++) {
|
||||
nelements = nelements + hist[k];
|
||||
}
|
||||
|
||||
if (nelements < nthreshold) {
|
||||
kperc = 0.03f;
|
||||
}
|
||||
else {
|
||||
kperc = hmax*((float)(k) / (float)nbins);
|
||||
}
|
||||
|
||||
return kperc;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function computes Scharr image derivatives
|
||||
* @param src Input image
|
||||
* @param dst Output image
|
||||
* @param xorder Derivative order in X-direction (horizontal)
|
||||
* @param yorder Derivative order in Y-direction (vertical)
|
||||
* @param scale Scale factor for the derivative size
|
||||
*/
|
||||
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) {
|
||||
Mat kx, ky;
|
||||
compute_derivative_kernels(kx, ky, xorder, yorder, scale);
|
||||
sepFilter2D(src, dst, CV_32F, kx, ky);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief Compute derivative kernels for sizes different than 3
|
||||
* @param _kx Horizontal kernel ues
|
||||
* @param _ky Vertical kernel values
|
||||
* @param dx Derivative order in X-direction (horizontal)
|
||||
* @param dy Derivative order in Y-direction (vertical)
|
||||
* @param scale_ Scale factor or derivative size
|
||||
*/
|
||||
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) {
|
||||
|
||||
int ksize = 3 + 2 * (scale - 1);
|
||||
|
||||
// The standard Scharr kernel
|
||||
if (scale == 1) {
|
||||
getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F);
|
||||
return;
|
||||
}
|
||||
|
||||
_kx.create(ksize, 1, CV_32F, -1, true);
|
||||
_ky.create(ksize, 1, CV_32F, -1, true);
|
||||
Mat kx = _kx.getMat();
|
||||
Mat ky = _ky.getMat();
|
||||
|
||||
float w = 10.0f / 3.0f;
|
||||
float norm = 1.0f / (2.0f*scale*(w + 2.0f));
|
||||
|
||||
for (int k = 0; k < 2; k++) {
|
||||
Mat* kernel = k == 0 ? &kx : &ky;
|
||||
int order = k == 0 ? dx : dy;
|
||||
std::vector<float> kerI(ksize, 0.0f);
|
||||
|
||||
if (order == 0) {
|
||||
kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm;
|
||||
}
|
||||
else if (order == 1) {
|
||||
kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1;
|
||||
}
|
||||
|
||||
Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]);
|
||||
temp.copyTo(*kernel);
|
||||
}
|
||||
}
|
||||
|
||||
class Nld_Step_Scalar_Invoker : public cv::ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
Nld_Step_Scalar_Invoker(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float _stepsize)
|
||||
: _Ld(&Ld)
|
||||
, _c(&c)
|
||||
, _Lstep(&Lstep)
|
||||
, stepsize(_stepsize)
|
||||
{
|
||||
}
|
||||
|
||||
virtual ~Nld_Step_Scalar_Invoker()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void operator()(const cv::Range& range) const
|
||||
{
|
||||
cv::Mat& Ld = *_Ld;
|
||||
const cv::Mat& c = *_c;
|
||||
cv::Mat& Lstep = *_Lstep;
|
||||
|
||||
for (int i = range.start; i < range.end; i++)
|
||||
{
|
||||
const float *c_prev = c.ptr<float>(i - 1);
|
||||
const float *c_curr = c.ptr<float>(i);
|
||||
const float *c_next = c.ptr<float>(i + 1);
|
||||
const float *ld_prev = Ld.ptr<float>(i - 1);
|
||||
const float *ld_curr = Ld.ptr<float>(i);
|
||||
const float *ld_next = Ld.ptr<float>(i + 1);
|
||||
|
||||
float *dst = Lstep.ptr<float>(i);
|
||||
|
||||
for (int j = 1; j < Lstep.cols - 1; j++)
|
||||
{
|
||||
float xpos = (c_curr[j] + c_curr[j+1])*(ld_curr[j+1] - ld_curr[j]);
|
||||
float xneg = (c_curr[j-1] + c_curr[j]) *(ld_curr[j] - ld_curr[j-1]);
|
||||
float ypos = (c_curr[j] + c_next[j]) *(ld_next[j] - ld_curr[j]);
|
||||
float yneg = (c_prev[j] + c_curr[j]) *(ld_curr[j] - ld_prev[j]);
|
||||
dst[j] = 0.5f*stepsize*(xpos - xneg + ypos - yneg);
|
||||
}
|
||||
}
|
||||
}
|
||||
private:
|
||||
cv::Mat * _Ld;
|
||||
const cv::Mat * _c;
|
||||
cv::Mat * _Lstep;
|
||||
float stepsize;
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function performs a scalar non-linear diffusion step
|
||||
* @param Ld2 Output image in the evolution
|
||||
* @param c Conductivity image
|
||||
* @param Lstep Previous image in the evolution
|
||||
* @param stepsize The step size in time units
|
||||
* @note Forward Euler Scheme 3x3 stencil
|
||||
* The function c is a scalar value that depends on the gradient norm
|
||||
* dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy
|
||||
*/
|
||||
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) {
|
||||
|
||||
cv::parallel_for_(cv::Range(1, Lstep.rows - 1), Nld_Step_Scalar_Invoker(Ld, c, Lstep, stepsize), (double)Ld.total()/(1 << 16));
|
||||
|
||||
float xneg, xpos, yneg, ypos;
|
||||
float* dst = Lstep.ptr<float>(0);
|
||||
const float* cprv = NULL;
|
||||
const float* ccur = c.ptr<float>(0);
|
||||
const float* cnxt = c.ptr<float>(1);
|
||||
const float* ldprv = NULL;
|
||||
const float* ldcur = Ld.ptr<float>(0);
|
||||
const float* ldnxt = Ld.ptr<float>(1);
|
||||
for (int j = 1; j < Lstep.cols - 1; j++) {
|
||||
xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]);
|
||||
xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]);
|
||||
ypos = (ccur[j] + cnxt[j]) * (ldnxt[j] - ldcur[j]);
|
||||
dst[j] = 0.5f*stepsize*(xpos - xneg + ypos);
|
||||
}
|
||||
|
||||
dst = Lstep.ptr<float>(Lstep.rows - 1);
|
||||
ccur = c.ptr<float>(Lstep.rows - 1);
|
||||
cprv = c.ptr<float>(Lstep.rows - 2);
|
||||
ldcur = Ld.ptr<float>(Lstep.rows - 1);
|
||||
ldprv = Ld.ptr<float>(Lstep.rows - 2);
|
||||
|
||||
for (int j = 1; j < Lstep.cols - 1; j++) {
|
||||
xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]);
|
||||
xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]);
|
||||
yneg = (cprv[j] + ccur[j]) * (ldcur[j] - ldprv[j]);
|
||||
dst[j] = 0.5f*stepsize*(xpos - xneg - yneg);
|
||||
}
|
||||
|
||||
ccur = c.ptr<float>(1);
|
||||
ldcur = Ld.ptr<float>(1);
|
||||
cprv = c.ptr<float>(0);
|
||||
ldprv = Ld.ptr<float>(0);
|
||||
|
||||
int r0 = Lstep.cols - 1;
|
||||
int r1 = Lstep.cols - 2;
|
||||
|
||||
for (int i = 1; i < Lstep.rows - 1; i++) {
|
||||
cnxt = c.ptr<float>(i + 1);
|
||||
ldnxt = Ld.ptr<float>(i + 1);
|
||||
dst = Lstep.ptr<float>(i);
|
||||
|
||||
xpos = (ccur[0] + ccur[1]) * (ldcur[1] - ldcur[0]);
|
||||
ypos = (ccur[0] + cnxt[0]) * (ldnxt[0] - ldcur[0]);
|
||||
yneg = (cprv[0] + ccur[0]) * (ldcur[0] - ldprv[0]);
|
||||
dst[0] = 0.5f*stepsize*(xpos + ypos - yneg);
|
||||
|
||||
xneg = (ccur[r1] + ccur[r0]) * (ldcur[r0] - ldcur[r1]);
|
||||
ypos = (ccur[r0] + cnxt[r0]) * (ldnxt[r0] - ldcur[r0]);
|
||||
yneg = (cprv[r0] + ccur[r0]) * (ldcur[r0] - ldprv[r0]);
|
||||
dst[r0] = 0.5f*stepsize*(-xneg + ypos - yneg);
|
||||
|
||||
cprv = ccur;
|
||||
ccur = cnxt;
|
||||
ldprv = ldcur;
|
||||
ldcur = ldnxt;
|
||||
}
|
||||
Ld += Lstep;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function downsamples the input image using OpenCV resize
|
||||
* @param img Input image to be downsampled
|
||||
* @param dst Output image with half of the resolution of the input image
|
||||
*/
|
||||
void halfsample_image(const cv::Mat& src, cv::Mat& dst) {
|
||||
|
||||
// Make sure the destination image is of the right size
|
||||
CV_Assert(src.cols / 2 == dst.cols);
|
||||
CV_Assert(src.rows / 2 == dst.rows);
|
||||
resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* @brief This function checks if a given pixel is a maximum in a local neighbourhood
|
||||
* @param img Input image where we will perform the maximum search
|
||||
* @param dsize Half size of the neighbourhood
|
||||
* @param value Response value at (x,y) position
|
||||
* @param row Image row coordinate
|
||||
* @param col Image column coordinate
|
||||
* @param same_img Flag to indicate if the image value at (x,y) is in the input image
|
||||
* @return 1->is maximum, 0->otherwise
|
||||
*/
|
||||
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img) {
|
||||
|
||||
bool response = true;
|
||||
|
||||
for (int i = row - dsize; i <= row + dsize; i++) {
|
||||
for (int j = col - dsize; j <= col + dsize; j++) {
|
||||
if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) {
|
||||
if (same_img == true) {
|
||||
if (i != row || j != col) {
|
||||
if ((*(img.ptr<float>(i)+j)) > value) {
|
||||
response = false;
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((*(img.ptr<float>(i)+j)) > value) {
|
||||
response = false;
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -11,43 +11,37 @@
|
||||
#ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
|
||||
#define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Includes
|
||||
#include "../precomp.hpp"
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Declaration of functions
|
||||
|
||||
namespace cv {
|
||||
namespace details {
|
||||
namespace kaze {
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// Gaussian 2D convolution
|
||||
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
|
||||
// Gaussian 2D convolution
|
||||
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
|
||||
|
||||
// Diffusivity functions
|
||||
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
// Diffusivity functions
|
||||
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
|
||||
|
||||
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
|
||||
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
|
||||
|
||||
// Image derivatives
|
||||
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
|
||||
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
|
||||
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
|
||||
// Image derivatives
|
||||
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
|
||||
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
|
||||
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
|
||||
|
||||
// Nonlinear diffusion filtering scalar step
|
||||
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
|
||||
// Nonlinear diffusion filtering scalar step
|
||||
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
|
||||
|
||||
// For non-maxima suppresion
|
||||
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
|
||||
// For non-maxima suppresion
|
||||
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
|
||||
|
||||
// Image downsampling
|
||||
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
|
||||
|
||||
// Image downsampling
|
||||
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -664,19 +664,11 @@ public:
|
||||
int defaultNorm() const;
|
||||
|
||||
// Compute the ORB_Impl features and descriptors on an image
|
||||
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
|
||||
|
||||
// Compute the ORB_Impl features and descriptors on an image
|
||||
void operator()( InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors, bool useProvidedKeypoints=false ) const;
|
||||
|
||||
AlgorithmInfo* info() const;
|
||||
void detectAndCompute( InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors, bool useProvidedKeypoints=false );
|
||||
|
||||
protected:
|
||||
|
||||
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
|
||||
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
|
||||
|
||||
int nfeatures;
|
||||
double scaleFactor;
|
||||
int nlevels;
|
||||
@ -703,17 +695,6 @@ int ORB_Impl::defaultNorm() const
|
||||
return NORM_HAMMING;
|
||||
}
|
||||
|
||||
/** Compute the ORB_Impl features and descriptors on an image
|
||||
* @param img the image to compute the features and descriptors on
|
||||
* @param mask the mask to apply
|
||||
* @param keypoints the resulting keypoints
|
||||
*/
|
||||
void ORB_Impl::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
|
||||
{
|
||||
(*this)(image, mask, keypoints, noArray(), false);
|
||||
}
|
||||
|
||||
|
||||
static void uploadORBKeypoints(const std::vector<KeyPoint>& src, std::vector<Vec3i>& buf, OutputArray dst)
|
||||
{
|
||||
size_t i, n = src.size();
|
||||
@ -813,8 +794,10 @@ static void computeKeyPoints(const Mat& imagePyramid,
|
||||
Mat mask = maskPyramid.empty() ? Mat() : maskPyramid(layerInfo[level]);
|
||||
|
||||
// Detect FAST features, 20 is a good threshold
|
||||
FastFeatureDetector fd(fastThreshold, true);
|
||||
fd.detect(img, keypoints, mask);
|
||||
{
|
||||
Ptr<FastFeatureDetector> fd = FastFeatureDetector::create(fastThreshold, true);
|
||||
fd->detect(img, keypoints, mask);
|
||||
}
|
||||
|
||||
// Remove keypoints very close to the border
|
||||
KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold);
|
||||
@ -928,8 +911,9 @@ static void computeKeyPoints(const Mat& imagePyramid,
|
||||
* @param do_keypoints if true, the keypoints are computed, otherwise used as an input
|
||||
* @param do_descriptors if true, also computes the descriptors
|
||||
*/
|
||||
void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
|
||||
OutputArray _descriptors, bool useProvidedKeypoints ) const
|
||||
void ORB_Impl::detectAndCompute( InputArray _image, InputArray _mask,
|
||||
std::vector<KeyPoint>& keypoints,
|
||||
OutputArray _descriptors, bool useProvidedKeypoints )
|
||||
{
|
||||
CV_Assert(patchSize >= 2);
|
||||
|
||||
@ -1159,15 +1143,11 @@ void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector<KeyP
|
||||
}
|
||||
}
|
||||
|
||||
void ORB_Impl::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
|
||||
Ptr<ORB> ORB::create(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold,
|
||||
int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold)
|
||||
{
|
||||
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
|
||||
return makePtr<ORB_Impl>(nfeatures, scaleFactor, nlevels, edgeThreshold,
|
||||
firstLevel, WTA_K, scoreType, patchSize, fastThreshold);
|
||||
}
|
||||
|
||||
void ORB_Impl::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
|
||||
{
|
||||
(*this)(image, Mat(), keypoints, descriptors, true);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -314,31 +314,34 @@ private:
|
||||
|
||||
TEST( Features2d_DescriptorExtractor_BRISK, regression )
|
||||
{
|
||||
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk", (CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
|
||||
DescriptorExtractor::create("BRISK") );
|
||||
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk",
|
||||
(CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
|
||||
BRISK::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_DescriptorExtractor_ORB, regression )
|
||||
{
|
||||
// TODO adjust the parameters below
|
||||
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
|
||||
DescriptorExtractor::create("ORB") );
|
||||
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb",
|
||||
(CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
|
||||
ORB::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_DescriptorExtractor_KAZE, regression )
|
||||
{
|
||||
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f,
|
||||
DescriptorExtractor::create("KAZE"),
|
||||
L2<float>(), FeatureDetector::create("KAZE"));
|
||||
KAZE::create(),
|
||||
L2<float>() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_DescriptorExtractor_AKAZE, regression )
|
||||
{
|
||||
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
|
||||
DescriptorExtractor::create("AKAZE"),
|
||||
Hamming(), FeatureDetector::create("AKAZE"));
|
||||
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze",
|
||||
(CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
|
||||
AKAZE::create(),
|
||||
Hamming(), AKAZE::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
@ -249,48 +249,48 @@ void CV_FeatureDetectorTest::run( int /*start_from*/ )
|
||||
|
||||
TEST( Features2d_Detector_BRISK, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-brisk", FeatureDetector::create("BRISK") );
|
||||
CV_FeatureDetectorTest test( "detector-brisk", BRISK::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_FAST, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-fast", FeatureDetector::create("FAST") );
|
||||
CV_FeatureDetectorTest test( "detector-fast", FastFeatureDetector::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_GFTT, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-gftt", FeatureDetector::create("GFTT") );
|
||||
CV_FeatureDetectorTest test( "detector-gftt", GFTTDetector::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_Harris, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-harris", FeatureDetector::create("HARRIS") );
|
||||
CV_FeatureDetectorTest test( "detector-harris", GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04));
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_MSER, DISABLED_regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-mser", FeatureDetector::create("MSER") );
|
||||
CV_FeatureDetectorTest test( "detector-mser", MSER::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_ORB, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") );
|
||||
CV_FeatureDetectorTest test( "detector-orb", ORB::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_KAZE, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-kaze", FeatureDetector::create("KAZE") );
|
||||
CV_FeatureDetectorTest test( "detector-kaze", KAZE::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_AKAZE, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") );
|
||||
CV_FeatureDetectorTest test( "detector-akaze", AKAZE::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
@ -61,7 +61,6 @@ public:
|
||||
protected:
|
||||
virtual void run(int)
|
||||
{
|
||||
cv::initModule_features2d();
|
||||
CV_Assert(detector);
|
||||
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
|
||||
|
||||
@ -121,51 +120,51 @@ protected:
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_BRISK, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"));
|
||||
CV_FeatureDetectorKeypointsTest test(BRISK::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_FAST, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.FAST"));
|
||||
CV_FeatureDetectorKeypointsTest test(FastFeatureDetector::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_HARRIS, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.HARRIS"));
|
||||
CV_FeatureDetectorKeypointsTest test(GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04));
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_GFTT, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.GFTT"));
|
||||
CV_FeatureDetectorKeypointsTest test(GFTTDetector::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_MSER, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.MSER"));
|
||||
CV_FeatureDetectorKeypointsTest test(MSER::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_ORB, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"));
|
||||
CV_FeatureDetectorKeypointsTest test(ORB::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_KAZE, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.KAZE"));
|
||||
CV_FeatureDetectorKeypointsTest test(KAZE::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_AKAZE, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_KAZE)));
|
||||
CV_FeatureDetectorKeypointsTest test_kaze(AKAZE::create(AKAZE::DESCRIPTOR_KAZE));
|
||||
test_kaze.safe_run();
|
||||
|
||||
CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_MLDB)));
|
||||
CV_FeatureDetectorKeypointsTest test_mldb(AKAZE::create(AKAZE::DESCRIPTOR_MLDB));
|
||||
test_mldb.safe_run();
|
||||
}
|
||||
|
@ -43,6 +43,8 @@
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
|
||||
#if 0
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
using namespace std;
|
||||
@ -205,3 +207,5 @@ void CV_MserTest::run(int)
|
||||
}
|
||||
|
||||
TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); }
|
||||
|
||||
#endif
|
||||
|
@ -47,10 +47,10 @@ using namespace cv;
|
||||
|
||||
TEST(Features2D_ORB, _1996)
|
||||
{
|
||||
Ptr<FeatureDetector> fd = FeatureDetector::create("ORB");
|
||||
Ptr<FeatureDetector> fd = ORB::create();
|
||||
fd->set("nFeatures", 10000);//setting a higher maximum to make effect of threshold visible
|
||||
fd->set("fastThreshold", 20);//more features than the default
|
||||
Ptr<DescriptorExtractor> de = DescriptorExtractor::create("ORB");
|
||||
Ptr<DescriptorExtractor> de = fd;
|
||||
|
||||
Mat image = imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.png");
|
||||
ASSERT_FALSE(image.empty());
|
||||
|
@ -367,7 +367,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
||||
else
|
||||
{
|
||||
UMat descriptors;
|
||||
(*surf)(gray_image, Mat(), features.keypoints, descriptors);
|
||||
surf->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
|
||||
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
|
||||
}
|
||||
}
|
||||
@ -375,7 +375,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
||||
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
|
||||
{
|
||||
grid_size = _grid_size;
|
||||
orb = makePtr<ORB>(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
|
||||
orb = ORB::create(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
|
||||
}
|
||||
|
||||
void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
||||
@ -395,7 +395,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
||||
}
|
||||
|
||||
if (grid_size.area() == 1)
|
||||
(*orb)(gray_image, Mat(), features.keypoints, features.descriptors);
|
||||
orb->detectAndCompute(gray_image, Mat(), features.keypoints, features.descriptors);
|
||||
else
|
||||
{
|
||||
features.keypoints.clear();
|
||||
@ -425,7 +425,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
||||
// << " gray_image_part.dims=" << gray_image_part.dims << ", "
|
||||
// << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
|
||||
|
||||
(*orb)(gray_image_part, UMat(), points, descriptors);
|
||||
orb->detectAndCompute(gray_image_part, UMat(), points, descriptors);
|
||||
|
||||
features.keypoints.reserve(features.keypoints.size() + points.size());
|
||||
for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
|
||||
|
@ -671,7 +671,7 @@ Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
||||
KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
|
||||
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
|
||||
{
|
||||
setDetector(makePtr<GoodFeaturesToTrackDetector>());
|
||||
setDetector(GFTTDetector::create());
|
||||
setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>());
|
||||
setOutlierRejector(makePtr<NullOutlierRejector>());
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user