Conflicts:
	modules/features2d/include/opencv2/features2d.hpp
	modules/features2d/src/freak.cpp
	modules/features2d/src/stardetector.cpp
This commit is contained in:
sprice
2014-03-06 15:39:06 -08:00
900 changed files with 535873 additions and 671868 deletions

View File

@@ -44,7 +44,7 @@
namespace cv
{
BOWTrainer::BOWTrainer()
BOWTrainer::BOWTrainer() : size(0)
{}
BOWTrainer::~BOWTrainer()
@@ -72,7 +72,7 @@ const std::vector<Mat>& BOWTrainer::getDescriptors() const
return descriptors;
}
int BOWTrainer::descripotorsCount() const
int BOWTrainer::descriptorsCount() const
{
return descriptors.empty() ? 0 : size;
}
@@ -121,6 +121,10 @@ BOWImgDescriptorExtractor::BOWImgDescriptorExtractor( const Ptr<DescriptorExtrac
dextractor(_dextractor), dmatcher(_dmatcher)
{}
BOWImgDescriptorExtractor::BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& _dmatcher ) :
dmatcher(_dmatcher)
{}
BOWImgDescriptorExtractor::~BOWImgDescriptorExtractor()
{}
@@ -136,50 +140,23 @@ const Mat& BOWImgDescriptorExtractor::getVocabulary() const
return vocabulary;
}
void BOWImgDescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters, Mat* _descriptors )
void BOWImgDescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters, Mat* descriptors )
{
imgDescriptor.release();
if( keypoints.empty() )
return;
int clusterCount = descriptorSize(); // = vocabulary.rows
// Compute descriptors for the image.
Mat descriptors;
dextractor->compute( image, keypoints, descriptors );
Mat _descriptors;
dextractor->compute( image, keypoints, _descriptors );
// Match keypoint descriptors to cluster center (to vocabulary)
std::vector<DMatch> matches;
dmatcher->match( descriptors, matches );
// Compute image descriptor
if( pointIdxsOfClusters )
{
pointIdxsOfClusters->clear();
pointIdxsOfClusters->resize(clusterCount);
}
imgDescriptor = Mat( 1, clusterCount, descriptorType(), Scalar::all(0.0) );
float *dptr = (float*)imgDescriptor.data;
for( size_t i = 0; i < matches.size(); i++ )
{
int queryIdx = matches[i].queryIdx;
int trainIdx = matches[i].trainIdx; // cluster index
CV_Assert( queryIdx == (int)i );
dptr[trainIdx] = dptr[trainIdx] + 1.f;
if( pointIdxsOfClusters )
(*pointIdxsOfClusters)[trainIdx].push_back( queryIdx );
}
// Normalize image descriptor.
imgDescriptor /= descriptors.rows;
compute( _descriptors, imgDescriptor, pointIdxsOfClusters );
// Add the descriptors of image keypoints
if (_descriptors) {
*_descriptors = descriptors.clone();
if (descriptors) {
*descriptors = _descriptors.clone();
}
}
@@ -193,4 +170,42 @@ int BOWImgDescriptorExtractor::descriptorType() const
return CV_32FC1;
}
void BOWImgDescriptorExtractor::compute( InputArray keypointDescriptors, OutputArray _imgDescriptor, std::vector<std::vector<int> >* pointIdxsOfClusters )
{
CV_Assert( !vocabulary.empty() );
int clusterCount = descriptorSize(); // = vocabulary.rows
// Match keypoint descriptors to cluster center (to vocabulary)
std::vector<DMatch> matches;
dmatcher->match( keypointDescriptors, matches );
// Compute image descriptor
if( pointIdxsOfClusters )
{
pointIdxsOfClusters->clear();
pointIdxsOfClusters->resize(clusterCount);
}
_imgDescriptor.create(1, clusterCount, descriptorType());
_imgDescriptor.setTo(Scalar::all(0));
Mat imgDescriptor = _imgDescriptor.getMat();
float *dptr = (float*)imgDescriptor.data;
for( size_t i = 0; i < matches.size(); i++ )
{
int queryIdx = matches[i].queryIdx;
int trainIdx = matches[i].trainIdx; // cluster index
CV_Assert( queryIdx == (int)i );
dptr[trainIdx] = dptr[trainIdx] + 1.f;
if( pointIdxsOfClusters )
(*pointIdxsOfClusters)[trainIdx].push_back( queryIdx );
}
// Normalize image descriptor.
imgDescriptor /= keypointDescriptors.size().height;
}
}

View File

@@ -163,8 +163,9 @@ void SimpleBlobDetector::write( cv::FileStorage& fs ) const
params.write(fs);
}
void SimpleBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryImage, std::vector<Center> &centers) const
void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
{
Mat image = _image.getMat(), binaryImage = _binaryImage.getMat();
(void)image;
centers.clear();
@@ -276,7 +277,7 @@ void SimpleBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryIm
#endif
}
void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray) const
{
//TODO: support mask
keypoints.clear();
@@ -284,7 +285,7 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
if (image.channels() == 3)
cvtColor(image, grayscaleImage, COLOR_BGR2GRAY);
else
grayscaleImage = image;
grayscaleImage = image.getMat();
std::vector < std::vector<Center> > centers;
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
@@ -292,20 +293,11 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
Mat binarizedImage;
threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);
#ifdef DEBUG_BLOB_DETECTOR
// Mat keypointsImage;
// cvtColor( binarizedImage, keypointsImage, CV_GRAY2RGB );
#endif
std::vector < Center > curCenters;
findBlobs(grayscaleImage, binarizedImage, curCenters);
std::vector < std::vector<Center> > newCenters;
for (size_t i = 0; i < curCenters.size(); i++)
{
#ifdef DEBUG_BLOB_DETECTOR
// circle(keypointsImage, curCenters[i].location, curCenters[i].radius, Scalar(0,0,255),-1);
#endif
bool isNew = true;
for (size_t j = 0; j < centers.size(); j++)
{
@@ -327,17 +319,9 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
}
}
if (isNew)
{
newCenters.push_back(std::vector<Center> (1, curCenters[i]));
//centers.push_back(std::vector<Center> (1, curCenters[i]));
}
}
std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
#ifdef DEBUG_BLOB_DETECTOR
// imshow("binarized", keypointsImage );
//waitKey();
#endif
}
for (size_t i = 0; i < centers.size(); i++)
@@ -352,19 +336,7 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
normalizer += centers[i][j].confidence;
}
sumPoint *= (1. / normalizer);
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius) * 2.0f);
keypoints.push_back(kpt);
}
#ifdef DEBUG_BLOB_DETECTOR
namedWindow("keypoints", CV_WINDOW_NORMAL);
Mat outImg = image.clone();
for(size_t i=0; i<keypoints.size(); i++)
{
circle(outImg, keypoints[i].pt, keypoints[i].size, Scalar(255, 0, 255), -1);
}
//drawKeypoints(image, keypoints, outImg);
imshow("keypoints", outImg);
waitKey();
#endif
}

View File

@@ -61,8 +61,9 @@ inline int smoothedSum(const Mat& sum, const KeyPoint& pt, int y, int x)
+ sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
}
static void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests16(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
@@ -71,8 +72,9 @@ static void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints,
}
}
static void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests32(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
@@ -82,8 +84,9 @@ static void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints,
}
}
static void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests64(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
@@ -125,6 +128,11 @@ int BriefDescriptorExtractor::descriptorType() const
return CV_8UC1;
}
int BriefDescriptorExtractor::defaultNorm() const
{
return NORM_HAMMING;
}
void BriefDescriptorExtractor::read( const FileNode& fn)
{
int dSize = fn["descriptorSize"];
@@ -150,12 +158,12 @@ void BriefDescriptorExtractor::write( FileStorage& fs) const
fs << "descriptorSize" << bytes_;
}
void BriefDescriptorExtractor::computeImpl(const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
void BriefDescriptorExtractor::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
// Construct integral image for fast smoothing (box filter)
Mat sum;
Mat grayImage = image;
Mat grayImage = image.getMat();
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
///TODO allow the user to pass in a precomputed integral image
@@ -168,7 +176,8 @@ void BriefDescriptorExtractor::computeImpl(const Mat& image, std::vector<KeyPoin
//Remove keypoints very close to the border
KeyPointsFilter::runByImageBorder(keypoints, image.size(), PATCH_SIZE/2 + KERNEL_SIZE/2);
descriptors = Mat::zeros((int)keypoints.size(), bytes_, CV_8U);
descriptors.create((int)keypoints.size(), bytes_, CV_8U);
descriptors.setTo(Scalar::all(0));
test_fn_(sum, keypoints, descriptors);
}

View File

@@ -224,6 +224,8 @@ BRISK::BRISK(std::vector<float> &radiusList, std::vector<int> &numberList, float
std::vector<int> indexChange)
{
generateKernel(radiusList, numberList, dMax, dMin, indexChange);
threshold = 20;
octaves = 3;
}
void
@@ -712,6 +714,12 @@ BRISK::descriptorType() const
return CV_8U;
}
int
BRISK::defaultNorm() const
{
return NORM_HAMMING;
}
BRISK::~BRISK()
{
delete[] patternPoints_;
@@ -745,13 +753,13 @@ BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::v
void
BRISK::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
BRISK::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
(*this)(image, mask, keypoints);
(*this)(image.getMat(), mask.getMat(), keypoints);
}
void
BRISK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
BRISK::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}
@@ -2223,7 +2231,7 @@ BriskLayer::halfsample(const cv::Mat& srcimg, cv::Mat& dstimg)
CV_Assert(srcimg.cols / 2 == dstimg.cols);
CV_Assert(srcimg.rows / 2 == dstimg.rows);
// handle non-SSE case
// handle non-SSE case
resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA);
}

View File

@@ -54,7 +54,7 @@ namespace cv
DescriptorExtractor::~DescriptorExtractor()
{}
void DescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
if( image.empty() || keypoints.empty() )
{
@@ -68,8 +68,11 @@ void DescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keyp
computeImpl( image, keypoints, descriptors );
}
void DescriptorExtractor::compute( const std::vector<Mat>& imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, std::vector<Mat>& descCollection ) const
void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, OutputArrayOfArrays _descCollection ) const
{
std::vector<Mat> imageCollection, descCollection;
_imageCollection.getMatVector(imageCollection);
_descCollection.getMatVector(descCollection);
CV_Assert( imageCollection.size() == pointCollection.size() );
descCollection.resize( imageCollection.size() );
for( size_t i = 0; i < imageCollection.size(); i++ )
@@ -106,7 +109,7 @@ Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExt
}
CV_WRAP void Feature2D::compute( const Mat& image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, CV_OUT Mat& descriptors ) const
CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
DescriptorExtractor::compute(image, keypoints, descriptors);
}
@@ -157,8 +160,9 @@ struct KP_LessThan
const std::vector<KeyPoint>* kp;
};
void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const
void OpponentColorDescriptorExtractor::computeImpl( InputArray _bgrImage, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
Mat bgrImage = _bgrImage.getMat();
std::vector<Mat> opponentChannels;
convertBGRImageToOpponentColorSpace( bgrImage, opponentChannels );
@@ -247,6 +251,11 @@ int OpponentColorDescriptorExtractor::descriptorType() const
return descriptorExtractor->descriptorType();
}
int OpponentColorDescriptorExtractor::defaultNorm() const
{
return descriptorExtractor->defaultNorm();
}
bool OpponentColorDescriptorExtractor::empty() const
{
return !descriptorExtractor || descriptorExtractor->empty();

View File

@@ -51,7 +51,7 @@ namespace cv
FeatureDetector::~FeatureDetector()
{}
void FeatureDetector::detect( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
void FeatureDetector::detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask ) const
{
keypoints.clear();
@@ -63,11 +63,29 @@ void FeatureDetector::detect( const Mat& image, std::vector<KeyPoint>& keypoints
detectImpl( image, keypoints, mask );
}
void FeatureDetector::detect(const std::vector<Mat>& imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, const std::vector<Mat>& masks ) const
void FeatureDetector::detect(InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection,
InputArrayOfArrays _masks ) const
{
if (_imageCollection.isUMatVector())
{
std::vector<UMat> uimageCollection, umasks;
_imageCollection.getUMatVector(uimageCollection);
_masks.getUMatVector(umasks);
pointCollection.resize( uimageCollection.size() );
for( size_t i = 0; i < uimageCollection.size(); i++ )
detect( uimageCollection[i], pointCollection[i], umasks.empty() ? noArray() : umasks[i] );
return;
}
std::vector<Mat> imageCollection, masks;
_imageCollection.getMatVector(imageCollection);
_masks.getMatVector(masks);
pointCollection.resize( imageCollection.size() );
for( size_t i = 0; i < imageCollection.size(); i++ )
detect( imageCollection[i], pointCollection[i], masks.empty() ? Mat() : masks[i] );
detect( imageCollection[i], pointCollection[i], masks.empty() ? noArray() : masks[i] );
}
/*void FeatureDetector::read( const FileNode& )
@@ -125,21 +143,37 @@ GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel,
{
}
void GFTTDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
{
Mat grayImage = image;
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
std::vector<Point2f> corners;
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, mask,
blockSize, useHarrisDetector, k );
if (_image.isUMat())
{
UMat ugrayImage;
if( _image.type() != CV_8U )
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
else
ugrayImage = _image.getUMat();
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
else
{
Mat image = _image.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
keypoints.resize(corners.size());
std::vector<Point2f>::const_iterator corner_it = corners.begin();
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
{
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -157,8 +191,10 @@ DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featur
{}
void DenseFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
void DenseFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
float curScale = static_cast<float>(initFeatureScale);
int curStep = initXyStep;
int curBound = initImgBound;
@@ -271,9 +307,9 @@ public:
};
} // namepace
void GridAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
void GridAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
if (image.empty() || maxTotalKeypoints < gridRows * gridCols)
if (_image.empty() || maxTotalKeypoints < gridRows * gridCols)
{
keypoints.clear();
return;
@@ -281,6 +317,8 @@ void GridAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPo
keypoints.reserve(maxTotalKeypoints);
int maxPerCell = maxTotalKeypoints / (gridRows * gridCols);
Mat image = _image.getMat(), mask = _mask.getMat();
cv::Mutex kptLock;
cv::parallel_for_(cv::Range(0, gridRows * gridCols),
GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols, &kptLock));
@@ -298,8 +336,9 @@ bool PyramidAdaptedFeatureDetector::empty() const
return !detector || detector->empty();
}
void PyramidAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
void PyramidAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
Mat src = image;
Mat src_mask = mask;

View File

@@ -50,7 +50,7 @@ namespace cv
/*
* Functions to draw keypoints and matches.
*/
static inline void _drawKeypoint( Mat& img, const KeyPoint& p, const Scalar& color, int flags )
static inline void _drawKeypoint( InputOutputArray img, const KeyPoint& p, const Scalar& color, int flags )
{
CV_Assert( !img.empty() );
Point center( cvRound(p.pt.x * draw_multiplier), cvRound(p.pt.y * draw_multiplier) );
@@ -88,7 +88,7 @@ static inline void _drawKeypoint( Mat& img, const KeyPoint& p, const Scalar& col
}
}
void drawKeypoints( const Mat& image, const std::vector<KeyPoint>& keypoints, Mat& outImage,
void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& _color, int flags )
{
if( !(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
@@ -120,25 +120,29 @@ void drawKeypoints( const Mat& image, const std::vector<KeyPoint>& keypoints, Ma
}
}
static void _prepareImgAndDrawKeypoints( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
Mat& outImg, Mat& outImg1, Mat& outImg2,
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
const Scalar& singlePointColor, int flags )
{
Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );
Mat outImg;
Size img1size = img1.size(), img2size = img2.size();
Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
{
outImg = _outImg.getMat();
if( size.width > outImg.cols || size.height > outImg.rows )
CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
}
else
{
outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
_outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
outImg = _outImg.getMat();
outImg = Scalar::all(0);
outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
if( img1.type() == CV_8U )
cvtColor( img1, outImg1, COLOR_GRAY2BGR );
@@ -154,15 +158,15 @@ static void _prepareImgAndDrawKeypoints( const Mat& img1, const std::vector<KeyP
// draw keypoints
if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
{
Mat _outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
Mat _outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
}
}
static inline void _drawMatch( Mat& outImg, Mat& outImg1, Mat& outImg2 ,
static inline void _drawMatch( InputOutputArray outImg, InputOutputArray outImg1, InputOutputArray outImg2 ,
const KeyPoint& kp1, const KeyPoint& kp2, const Scalar& matchColor, int flags )
{
RNG& rng = theRNG();
@@ -174,7 +178,7 @@ static inline void _drawMatch( Mat& outImg, Mat& outImg1, Mat& outImg2 ,
Point2f pt1 = kp1.pt,
pt2 = kp2.pt,
dpt2 = Point2f( std::min(pt2.x+outImg1.cols, float(outImg.cols-1)), pt2.y );
dpt2 = Point2f( std::min(pt2.x+outImg1.size().width, float(outImg.size().width-1)), pt2.y );
line( outImg,
Point(cvRound(pt1.x*draw_multiplier), cvRound(pt1.y*draw_multiplier)),
@@ -182,9 +186,9 @@ static inline void _drawMatch( Mat& outImg, Mat& outImg1, Mat& outImg2 ,
color, 1, LINE_AA, draw_shift_bits );
}
void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, Mat& outImg,
void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
const Scalar& matchColor, const Scalar& singlePointColor,
const std::vector<char>& matchesMask, int flags )
{
@@ -211,9 +215,9 @@ void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
}
}
void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<std::vector<DMatch> >& matches1to2, Mat& outImg,
void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,
const Scalar& matchColor, const Scalar& singlePointColor,
const std::vector<std::vector<char> >& matchesMask, int flags )
{

View File

@@ -54,8 +54,10 @@ bool DynamicAdaptedFeatureDetector::empty() const
return !adjuster_ || adjuster_->empty();
}
void DynamicAdaptedFeatureDetector::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
void DynamicAdaptedFeatureDetector::detectImpl(InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
//for oscillation testing
bool down = false;
bool up = false;
@@ -98,7 +100,7 @@ FastAdjuster::FastAdjuster( int init_thresh, bool nonmax, int min_thresh, int ma
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void FastAdjuster::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
void FastAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
FastFeatureDetector(thresh_, nonmax_).detect(image, keypoints, mask);
}
@@ -133,7 +135,7 @@ StarAdjuster::StarAdjuster(double initial_thresh, double min_thresh, double max_
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void StarAdjuster::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
void StarAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
StarFeatureDetector detector_tmp(16, cvRound(thresh_), 10, 8, 3);
detector_tmp.detect(image, keypoints, mask);
@@ -167,7 +169,7 @@ SurfAdjuster::SurfAdjuster( double initial_thresh, double min_thresh, double max
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void SurfAdjuster::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const cv::Mat& mask) const
void SurfAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
Ptr<FeatureDetector> surf = FeatureDetector::create("SURF");
surf->set("hessianThreshold", thresh_);

View File

@@ -283,10 +283,11 @@ FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppressio
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
{}
void FastFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
void FastFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat grayImage = image;
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
Mat image = _image.getMat(), mask = _mask.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
FAST( grayImage, keypoints, threshold, nonmaxSuppression, type );
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}

View File

@@ -58,19 +58,19 @@ Ptr<Feature2D> Feature2D::create( const String& feature2DType )
CV_INIT_ALGORITHM(BRISK, "Feature2D.BRISK",
obj.info()->addParam(obj, "thres", obj.threshold);
obj.info()->addParam(obj, "octaves", obj.octaves));
obj.info()->addParam(obj, "octaves", obj.octaves))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BriefDescriptorExtractor, "Feature2D.BRIEF",
obj.info()->addParam(obj, "bytes", obj.bytes_));
obj.info()->addParam(obj, "bytes", obj.bytes_))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
obj.info()->addParam(obj, "threshold", obj.threshold);
obj.info()->addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
obj.info()->addParam(obj, "type", obj.type));
obj.info()->addParam(obj, "type", obj.type))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -79,7 +79,7 @@ CV_INIT_ALGORITHM(StarDetector, "Feature2D.STAR",
obj.info()->addParam(obj, "responseThreshold", obj.responseThreshold);
obj.info()->addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
obj.info()->addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize));
obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -92,7 +92,7 @@ CV_INIT_ALGORITHM(MSER, "Feature2D.MSER",
obj.info()->addParam(obj, "maxEvolution", obj.maxEvolution);
obj.info()->addParam(obj, "areaThreshold", obj.areaThreshold);
obj.info()->addParam(obj, "minMargin", obj.minMargin);
obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize));
obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -104,7 +104,7 @@ CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
obj.info()->addParam(obj, "edgeThreshold", obj.edgeThreshold);
obj.info()->addParam(obj, "patchSize", obj.patchSize);
obj.info()->addParam(obj, "WTA_K", obj.WTA_K);
obj.info()->addParam(obj, "scoreType", obj.scoreType));
obj.info()->addParam(obj, "scoreType", obj.scoreType))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -112,7 +112,7 @@ CV_INIT_ALGORITHM(FREAK, "Feature2D.FREAK",
obj.info()->addParam(obj, "orientationNormalized", obj.orientationNormalized);
obj.info()->addParam(obj, "scaleNormalized", obj.scaleNormalized);
obj.info()->addParam(obj, "patternScale", obj.patternScale);
obj.info()->addParam(obj, "nbOctave", obj.nOctaves));
obj.info()->addParam(obj, "nbOctave", obj.nOctaves))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -121,7 +121,7 @@ CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
obj.info()->addParam(obj, "minDistance", obj.minDistance);
obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
obj.info()->addParam(obj, "k", obj.k));
obj.info()->addParam(obj, "k", obj.k))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -141,7 +141,7 @@ CV_INIT_ALGORITHM(SimpleBlobDetector, "Feature2D.SimpleBlob",
obj.info()->addParam(obj, "maxInertiaRatio", obj.params.maxInertiaRatio);
obj.info()->addParam(obj, "filterByConvexity", obj.params.filterByConvexity);
obj.info()->addParam(obj, "maxConvexity", obj.params.maxConvexity);
);
)
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -162,7 +162,7 @@ CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
obj.info()->addParam(obj, "minDistance", obj.minDistance);
obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
obj.info()->addParam(obj, "k", obj.k));
obj.info()->addParam(obj, "k", obj.k))
////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -173,21 +173,21 @@ CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
obj.info()->addParam(obj, "initXyStep", obj.initXyStep);
obj.info()->addParam(obj, "initImgBound", obj.initImgBound);
obj.info()->addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale));
obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale))
CV_INIT_ALGORITHM(GridAdaptedFeatureDetector, "Feature2D.Grid",
obj.info()->addParam<FeatureDetector>(obj, "detector", obj.detector, false, 0, 0); // Extra params added to avoid VS2013 fatal error in opencv2/core.hpp (decl. of addParam)
obj.info()->addParam(obj, "maxTotalKeypoints", obj.maxTotalKeypoints);
obj.info()->addParam(obj, "gridRows", obj.gridRows);
obj.info()->addParam(obj, "gridCols", obj.gridCols));
obj.info()->addParam(obj, "gridCols", obj.gridCols))
////////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BFMatcher, "DescriptorMatcher.BFMatcher",
obj.info()->addParam(obj, "normType", obj.normType);
obj.info()->addParam(obj, "crossCheck", obj.crossCheck));
obj.info()->addParam(obj, "crossCheck", obj.crossCheck))
CV_INIT_ALGORITHM(FlannBasedMatcher, "DescriptorMatcher.FlannBasedMatcher",);
CV_INIT_ALGORITHM(FlannBasedMatcher, "DescriptorMatcher.FlannBasedMatcher",)
///////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@@ -54,8 +54,9 @@ static const int FREAK_NB_SCALES = FREAK::NB_SCALES;
static const int FREAK_NB_PAIRS = FREAK::NB_PAIRS;
static const int FREAK_NB_ORIENPAIRS = FREAK::NB_ORIENPAIRS;
// default pairs
static const int FREAK_DEF_PAIRS[FREAK::NB_PAIRS] =
{ // default pairs
{
404,431,818,511,181,52,311,874,774,543,719,230,417,205,11,
560,149,265,39,306,165,857,250,8,61,15,55,717,44,412,
592,134,761,695,660,782,625,487,549,516,271,665,762,392,178,
@@ -92,15 +93,17 @@ static const int FREAK_DEF_PAIRS[FREAK::NB_PAIRS] =
670,249,36,581,389,605,331,518,442,822
};
// used to sort pairs during pairs selection
struct PairStat
{ // used to sort pairs during pairs selection
{
double mean;
int idx;
};
struct sortMean
{
bool operator()( const PairStat& a, const PairStat& b ) const {
bool operator()( const PairStat& a, const PairStat& b ) const
{
return a.mean < b.mean;
}
};
@@ -130,17 +133,21 @@ void FREAK::buildPattern()
radius[6]/2.0, radius[6]/2.0
};
// fill the lookup table
for( int scaleIdx=0; scaleIdx < FREAK_NB_SCALES; ++scaleIdx ) {
for( int scaleIdx=0; scaleIdx < FREAK_NB_SCALES; ++scaleIdx )
{
patternSizes[scaleIdx] = 0; // proper initialization
scalingFactor = std::pow(scaleStep,scaleIdx); //scale of the pattern, scaleStep ^ scaleIdx
for( int orientationIdx = 0; orientationIdx < FREAK_NB_ORIENTATION; ++orientationIdx ) {
for( int orientationIdx = 0; orientationIdx < FREAK_NB_ORIENTATION; ++orientationIdx )
{
theta = double(orientationIdx)* 2*CV_PI/double(FREAK_NB_ORIENTATION); // orientation of the pattern
int pointIdx = 0;
PatternPoint* patternLookupPtr = &patternLookup[0];
for( size_t i = 0; i < 8; ++i ) {
for( int k = 0 ; k < n[i]; ++k ) {
for( size_t i = 0; i < 8; ++i )
{
for( int k = 0 ; k < n[i]; ++k )
{
beta = CV_PI/n[i] * (i%2); // orientation offset so that groups of points on each circles are staggered
alpha = double(k)* 2*CV_PI/double(n[i])+beta+theta;
@@ -182,7 +189,8 @@ void FREAK::buildPattern()
orientationPairs[39].i=30; orientationPairs[39].j=33; orientationPairs[40].i=31; orientationPairs[40].j=34; orientationPairs[41].i=32; orientationPairs[41].j=35;
orientationPairs[42].i=36; orientationPairs[42].j=39; orientationPairs[43].i=37; orientationPairs[43].j=40; orientationPairs[44].i=38; orientationPairs[44].j=41;
for( unsigned m = FREAK_NB_ORIENPAIRS; m--; ) {
for( unsigned m = FREAK_NB_ORIENPAIRS; m--; )
{
const float dx = patternLookup[orientationPairs[m].i].x-patternLookup[orientationPairs[m].j].x;
const float dy = patternLookup[orientationPairs[m].i].y-patternLookup[orientationPairs[m].j].y;
const float norm_sq = (dx*dx+dy*dy);
@@ -192,31 +200,38 @@ void FREAK::buildPattern()
// build the list of description pairs
std::vector<DescriptionPair> allPairs;
for( unsigned int i = 1; i < (unsigned int)FREAK_NB_POINTS; ++i ) {
for( unsigned int i = 1; i < (unsigned int)FREAK_NB_POINTS; ++i )
{
// (generate all the pairs)
for( unsigned int j = 0; (unsigned int)j < i; ++j ) {
for( unsigned int j = 0; (unsigned int)j < i; ++j )
{
DescriptionPair pair = {(uchar)i,(uchar)j};
allPairs.push_back(pair);
}
}
// Input vector provided
if( !selectedPairs0.empty() ) {
if( (int)selectedPairs0.size() == FREAK_NB_PAIRS ) {
if( !selectedPairs0.empty() )
{
if( (int)selectedPairs0.size() == FREAK_NB_PAIRS )
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
descriptionPairs[i] = allPairs[selectedPairs0.at(i)];
}
else {
else
{
CV_Error(Error::StsVecLengthErr, "Input vector does not match the required size");
}
}
else { // default selected pairs
else // default selected pairs
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
descriptionPairs[i] = allPairs[FREAK_DEF_PAIRS[i]];
}
}
void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const {
void FREAK::computeImpl( InputArray _image, std::vector<KeyPoint>& keypoints, OutputArray _descriptors ) const
{
Mat image = _image.getMat();
if( image.empty() )
return;
if( keypoints.empty() )
@@ -235,21 +250,21 @@ void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat
{
// Create the integral image appropriate for our type & usage
if (image.depth() == CV_8U)
computeDescriptors<uchar, int>(grayImage, keypoints, descriptors);
computeDescriptors<uchar, int>(grayImage, keypoints, _descriptors);
else if (image.depth() == CV_8S)
computeDescriptors<char, int>(grayImage, keypoints, descriptors);
computeDescriptors<char, int>(grayImage, keypoints, _descriptors);
else
CV_Error( Error::StsUnsupportedFormat, "" );
} else {
// Create the integral image appropriate for our type & usage
if ( image.depth() == CV_8U )
computeDescriptors<uchar, double>(grayImage, keypoints, descriptors);
computeDescriptors<uchar, double>(grayImage, keypoints, _descriptors);
else if ( image.depth() == CV_8S )
computeDescriptors<char, double>(grayImage, keypoints, descriptors);
computeDescriptors<char, double>(grayImage, keypoints, _descriptors);
else if ( image.depth() == CV_16U )
computeDescriptors<ushort, double>(grayImage, keypoints, descriptors);
computeDescriptors<ushort, double>(grayImage, keypoints, _descriptors);
else if ( image.depth() == CV_16S )
computeDescriptors<short, double>(grayImage, keypoints, descriptors);
computeDescriptors<short, double>(grayImage, keypoints, _descriptors);
else
CV_Error( Error::StsUnsupportedFormat, "" );
}
@@ -337,8 +352,9 @@ void FREAK::extractDescriptor(uchar *pointsValue, void ** ptr) const
#endif
template <typename srcMatType, typename iiMatType>
void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const {
void FREAK::computeDescriptors( InputArray _image, std::vector<KeyPoint>& keypoints, OutputArray _descriptors ) const {
Mat image = _image.getMat();
Mat imgIntegral;
integral(image, imgIntegral, DataType<iiMatType>::type);
std::vector<int> kpScaleIdx(keypoints.size()); // used to save pattern scale index corresponding to each keypoints
@@ -351,8 +367,10 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
int direction1;
// compute the scale index corresponding to the keypoint size and remove keypoints close to the border
if( scaleNormalized ) {
for( size_t k = keypoints.size(); k--; ) {
if( scaleNormalized )
{
for( size_t k = keypoints.size(); k--; )
{
//Is k non-zero? If so, decrement it and continue"
kpScaleIdx[k] = std::max( (int)(std::log(keypoints[k].size/FREAK_SMALLEST_KP_SIZE)*sizeCst+0.5) ,0);
if( kpScaleIdx[k] >= FREAK_NB_SCALES )
@@ -362,24 +380,29 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.x >= image.cols-patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.y >= image.rows-patternSizes[kpScaleIdx[k]]
) {
)
{
keypoints.erase(kpBegin+k);
kpScaleIdx.erase(ScaleIdxBegin+k);
}
}
}
else {
else
{
const int scIdx = std::max( (int)(1.0986122886681*sizeCst+0.5) ,0);
for( size_t k = keypoints.size(); k--; ) {
for( size_t k = keypoints.size(); k--; )
{
kpScaleIdx[k] = scIdx; // equivalent to the formule when the scale is normalized with a constant size of keypoints[k].size=3*SMALLEST_KP_SIZE
if( kpScaleIdx[k] >= FREAK_NB_SCALES ) {
if( kpScaleIdx[k] >= FREAK_NB_SCALES )
{
kpScaleIdx[k] = FREAK_NB_SCALES-1;
}
if( keypoints[k].pt.x <= patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.x >= image.cols-patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.y >= image.rows-patternSizes[kpScaleIdx[k]]
) {
)
{
keypoints.erase(kpBegin+k);
kpScaleIdx.erase(ScaleIdxBegin+k);
}
@@ -387,18 +410,24 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
}
// allocate descriptor memory, estimate orientations, extract descriptors
if( !extAll ) {
if( !extAll )
{
// extract the best comparisons only
descriptors = cv::Mat::zeros((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
_descriptors.create((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
_descriptors.setTo(Scalar::all(0));
Mat descriptors = _descriptors.getMat();
void *ptr = descriptors.data+(keypoints.size()-1)*descriptors.step[0];
for( size_t k = keypoints.size(); k--; ) {
// estimate orientation (gradient)
if( !orientationNormalized ) {
if( !orientationNormalized )
{
thetaIdx = 0; // assign 0° to all keypoints
keypoints[k].angle = 0.0;
}
else {
else
{
// get the points intensity value in the un-rotated pattern
for( int i = FREAK_NB_POINTS; i--; ) {
pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
@@ -407,7 +436,8 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
}
direction0 = 0;
direction1 = 0;
for( int m = 45; m--; ) {
for( int m = 45; m--; )
{
//iterate through the orientation pairs
const int delta = (pointsValue[ orientationPairs[m].i ]-pointsValue[ orientationPairs[m].j ]);
direction0 += delta*(orientationPairs[m].weight_dx)/2048;
@@ -433,17 +463,23 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
extractDescriptor<srcMatType>(pointsValue, &ptr);
}
}
else { // extract all possible comparisons for selection
descriptors = cv::Mat::zeros((int)keypoints.size(), 128, CV_8U);
else // extract all possible comparisons for selection
{
_descriptors.create((int)keypoints.size(), 128, CV_8U);
_descriptors.setTo(Scalar::all(0));
Mat descriptors = _descriptors.getMat();
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
for( size_t k = keypoints.size(); k--; ) {
for( size_t k = keypoints.size(); k--; )
{
//estimate orientation (gradient)
if( !orientationNormalized ) {
if( !orientationNormalized )
{
thetaIdx = 0;//assign 0° to all keypoints
keypoints[k].angle = 0.0;
}
else {
else
{
//get the points intensity value in the un-rotated pattern
for( int i = FREAK_NB_POINTS;i--; )
pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
@@ -452,7 +488,8 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
direction0 = 0;
direction1 = 0;
for( int m = 45; m--; ) {
for( int m = 45; m--; )
{
//iterate through the orientation pairs
const int delta = (pointsValue[ orientationPairs[m].i ]-pointsValue[ orientationPairs[m].j ]);
direction0 += delta*(orientationPairs[m].weight_dx)/2048;
@@ -476,9 +513,11 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
}
int cnt(0);
for( int i = 1; i < FREAK_NB_POINTS; ++i ) {
for( int i = 1; i < FREAK_NB_POINTS; ++i )
{
//(generate all the pairs)
for( int j = 0; j < i; ++j ) {
for( int j = 0; j < i; ++j )
{
ptr->set(cnt, pointsValue[i] >= pointsValue[j] );
++cnt;
}
@@ -490,12 +529,13 @@ void FREAK::computeDescriptors( const Mat& image, std::vector<KeyPoint>& keypoin
// simply take average on a square patch, not even gaussian approx
template <typename imgType, typename iiType>
imgType FREAK::meanIntensity( const cv::Mat& image, const cv::Mat& integral,
imgType FREAK::meanIntensity( InputArray _image, InputArray _integral,
const float kp_x,
const float kp_y,
const unsigned int scale,
const unsigned int rot,
const unsigned int point) const {
Mat image = _image.getMat(), integral = _integral.getMat();
// get point position in image
const PatternPoint& FreakPoint = patternLookup[scale*FREAK_NB_ORIENTATION*FREAK_NB_POINTS + rot*FREAK_NB_POINTS + point];
const float xf = FreakPoint.x+kp_x;
@@ -507,7 +547,8 @@ imgType FREAK::meanIntensity( const cv::Mat& image, const cv::Mat& integral,
const float radius = FreakPoint.sigma;
// calculate output:
if( radius < 0.5 ) {
if( radius < 0.5 )
{
// interpolation multipliers:
const int r_x = static_cast<int>((xf-x)*1024);
const int r_y = static_cast<int>((yf-y)*1024);
@@ -555,7 +596,8 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
if( verbose )
std::cout << "Number of images: " << images.size() << std::endl;
for( size_t i = 0;i < images.size(); ++i ) {
for( size_t i = 0;i < images.size(); ++i )
{
Mat descriptorsTmp;
computeImpl(images[i],keypoints[i],descriptorsTmp);
descriptors.push_back(descriptorsTmp);
@@ -568,8 +610,10 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
Mat descriptorsFloat = Mat::zeros(descriptors.rows, 903, CV_32F);
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(descriptors.rows-1)*descriptors.step[0]);
for( int m = descriptors.rows; m--; ) {
for( int n = 903; n--; ) {
for( int m = descriptors.rows; m--; )
{
for( int n = 903; n--; )
{
if( ptr->test(n) == true )
descriptorsFloat.at<float>(m,n)=1.0f;
}
@@ -577,7 +621,8 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
}
std::vector<PairStat> pairStat;
for( int n = 903; n--; ) {
for( int n = 903; n--; )
{
// the higher the variance, the better --> mean = 0.5
PairStat tmp = { fabs( mean(descriptorsFloat.col(n))[0]-0.5 ) ,n};
pairStat.push_back(tmp);
@@ -586,19 +631,22 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
std::sort( pairStat.begin(),pairStat.end(), sortMean() );
std::vector<PairStat> bestPairs;
for( int m = 0; m < 903; ++m ) {
for( int m = 0; m < 903; ++m )
{
if( verbose )
std::cout << m << ":" << bestPairs.size() << " " << std::flush;
double corrMax(0);
for( size_t n = 0; n < bestPairs.size(); ++n ) {
for( size_t n = 0; n < bestPairs.size(); ++n )
{
int idxA = bestPairs[n].idx;
int idxB = pairStat[m].idx;
double corr(0);
// compute correlation between 2 pairs
corr = fabs(compareHist(descriptorsFloat.col(idxA), descriptorsFloat.col(idxB), HISTCMP_CORREL));
if( corr > corrMax ) {
if( corr > corrMax )
{
corrMax = corr;
if( corrMax >= corrTresh )
break;
@@ -608,7 +656,8 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
if( corrMax < corrTresh/*0.7*/ )
bestPairs.push_back(pairStat[m]);
if( bestPairs.size() >= 512 ) {
if( bestPairs.size() >= 512 )
{
if( verbose )
std::cout << m << std::endl;
break;
@@ -616,11 +665,13 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
}
std::vector<int> idxBestPairs;
if( (int)bestPairs.size() >= FREAK_NB_PAIRS ) {
if( (int)bestPairs.size() >= FREAK_NB_PAIRS )
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
idxBestPairs.push_back(bestPairs[i].idx);
}
else {
else
{
if( verbose )
std::cout << "correlation threshold too small (restrictive)" << std::endl;
CV_Error(Error::StsError, "correlation threshold too small (restrictive)");
@@ -631,11 +682,13 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
/*
// create an image showing the brisk pattern
void FREAKImpl::drawPattern()
{ // create an image showing the brisk pattern
{
Mat pattern = Mat::zeros(1000, 1000, CV_8UC3) + Scalar(255,255,255);
int sFac = 500 / patternScale;
for( int n = 0; n < kNB_POINTS; ++n ) {
for( int n = 0; n < kNB_POINTS; ++n )
{
PatternPoint& pt = patternLookup[n];
circle(pattern, Point( pt.x*sFac,pt.y*sFac)+Point(500,500), pt.sigma*sFac, Scalar(0,0,255),2);
// rectangle(pattern, Point( (pt.x-pt.sigma)*sFac,(pt.y-pt.sigma)*sFac)+Point(500,500), Point( (pt.x+pt.sigma)*sFac,(pt.y+pt.sigma)*sFac)+Point(500,500), Scalar(0,0,255),2);
@@ -663,12 +716,19 @@ FREAK::~FREAK()
{
}
int FREAK::descriptorSize() const {
int FREAK::descriptorSize() const
{
return FREAK_NB_PAIRS / 8; // descriptor length in bytes
}
int FREAK::descriptorType() const {
int FREAK::descriptorType() const
{
return CV_8U;
}
int FREAK::defaultNorm() const
{
return NORM_HAMMING;
}
} // END NAMESPACE CV

File diff suppressed because it is too large Load Diff

View File

@@ -1266,11 +1266,11 @@ MSER::MSER( int _delta, int _min_area, int _max_area,
{
}
void MSER::operator()( const Mat& image, std::vector<std::vector<Point> >& dstcontours, const Mat& mask ) const
void MSER::operator()( InputArray image, std::vector<std::vector<Point> >& dstcontours, InputArray mask ) const
{
CvMat _image = image, _mask, *pmask = 0;
if( mask.data )
pmask = &(_mask = mask);
CvMat _image = image.getMat(), _mask, *pmask = 0;
if( !mask.empty() )
pmask = &(_mask = mask.getMat());
MemStorage storage(cvCreateMemStorage(0));
Seq<CvSeq*> contours;
extractMSER( &_image, pmask, &contours.seq, storage,
@@ -1284,8 +1284,9 @@ void MSER::operator()( const Mat& image, std::vector<std::vector<Point> >& dstco
}
void MserFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
void MserFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
std::vector<std::vector<Point> > msers;
(*this)(image, msers, mask);

View File

@@ -0,0 +1,789 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Nathan, liujun@multicorewareinc.com
// Peng Xiao, pengxiao@outlook.com
// Baichuan Su, baichuan@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics:enable
#define MAX_FLOAT 3.40282e+038f
#ifndef T
#define T float
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
#ifndef MAX_DESC_LEN
#define MAX_DESC_LEN 64
#endif
#ifndef DIST_TYPE
#define DIST_TYPE 2
#endif
// dirty fix for non-template support
#if (DIST_TYPE == 2) // L1Dist
# ifdef T_FLOAT
# define DIST(x, y) fabs((x) - (y))
typedef float value_type;
typedef float result_type;
# else
# define DIST(x, y) abs((x) - (y))
typedef int value_type;
typedef int result_type;
# endif
#define DIST_RES(x) (x)
#elif (DIST_TYPE == 4) // L2Dist
#define DIST(x, y) (((x) - (y)) * ((x) - (y)))
typedef float value_type;
typedef float result_type;
#define DIST_RES(x) sqrt(x)
#elif (DIST_TYPE == 6) // Hamming
//http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
inline int bit1Count(int v)
{
v = v - ((v >> 1) & 0x55555555); // reuse input as temporary
v = (v & 0x33333333) + ((v >> 2) & 0x33333333); // temp
return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; // count
}
#define DIST(x, y) bit1Count( (x) ^ (y) )
typedef int value_type;
typedef int result_type;
#define DIST_RES(x) (x)
#endif
inline result_type reduce_block(
__local value_type *s_query,
__local value_type *s_train,
int lidx,
int lidy
)
{
result_type result = 0;
#pragma unroll
for (int j = 0 ; j < BLOCK_SIZE ; j++)
{
result += DIST(
s_query[lidy * BLOCK_SIZE + j],
s_train[j * BLOCK_SIZE + lidx]);
}
return DIST_RES(result);
}
inline result_type reduce_block_match(
__local value_type *s_query,
__local value_type *s_train,
int lidx,
int lidy
)
{
result_type result = 0;
#pragma unroll
for (int j = 0 ; j < BLOCK_SIZE ; j++)
{
result += DIST(
s_query[lidy * BLOCK_SIZE + j],
s_train[j * BLOCK_SIZE + lidx]);
}
return (result);
}
inline result_type reduce_multi_block(
__local value_type *s_query,
__local value_type *s_train,
int block_index,
int lidx,
int lidy
)
{
result_type result = 0;
#pragma unroll
for (int j = 0 ; j < BLOCK_SIZE ; j++)
{
result += DIST(
s_query[lidy * MAX_DESC_LEN + block_index * BLOCK_SIZE + j],
s_train[j * BLOCK_SIZE + lidx]);
}
return result;
}
/* 2dim launch, global size: dim0 is (query rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, dim1 is BLOCK_SIZE
local size: dim0 is BLOCK_SIZE, dim1 is BLOCK_SIZE.
*/
__kernel void BruteForceMatch_UnrollMatch(
__global T *query,
__global T *train,
//__global float *mask,
__global int *bestTrainIdx,
__global float *bestDistance,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
__local value_type *s_query = (__local value_type *)sharebuffer;
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * MAX_DESC_LEN;
int queryIdx = groupidx * BLOCK_SIZE + lidy;
// load the query into local memory.
#pragma unroll
for (int i = 0 ; i < MAX_DESC_LEN / BLOCK_SIZE; i ++)
{
int loadx = lidx + i * BLOCK_SIZE;
s_query[lidy * MAX_DESC_LEN + loadx] = loadx < query_cols ? query[min(queryIdx, query_rows - 1) * (step / sizeof(float)) + loadx] : 0;
}
float myBestDistance = MAX_FLOAT;
int myBestTrainIdx = -1;
// loopUnrolledCached to find the best trainIdx and best distance.
for (int t = 0, endt = (train_rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; t++)
{
result_type result = 0;
#pragma unroll
for (int i = 0 ; i < MAX_DESC_LEN / BLOCK_SIZE ; i++)
{
//load a BLOCK_SIZE * BLOCK_SIZE block into local train.
const int loadx = lidx + i * BLOCK_SIZE;
s_train[lidx * BLOCK_SIZE + lidy] = loadx < train_cols ? train[min(t * BLOCK_SIZE + lidy, train_rows - 1) * (step / sizeof(float)) + loadx] : 0;
//synchronize to make sure each elem for reduceIteration in share memory is written already.
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_multi_block(s_query, s_train, i, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
result = DIST_RES(result);
int trainIdx = t * BLOCK_SIZE + lidx;
if (queryIdx < query_rows && trainIdx < train_rows && result < myBestDistance/* && mask(queryIdx, trainIdx)*/)
{
myBestDistance = result;
myBestTrainIdx = trainIdx;
}
}
barrier(CLK_LOCAL_MEM_FENCE);
__local float *s_distance = (__local float*)(sharebuffer);
__local int* s_trainIdx = (__local int *)(sharebuffer + BLOCK_SIZE * BLOCK_SIZE);
//find BestMatch
s_distance += lidy * BLOCK_SIZE;
s_trainIdx += lidy * BLOCK_SIZE;
s_distance[lidx] = myBestDistance;
s_trainIdx[lidx] = myBestTrainIdx;
barrier(CLK_LOCAL_MEM_FENCE);
//reduce -- now all reduce implement in each threads.
#pragma unroll
for (int k = 0 ; k < BLOCK_SIZE; k++)
{
if (myBestDistance > s_distance[k])
{
myBestDistance = s_distance[k];
myBestTrainIdx = s_trainIdx[k];
}
}
if (queryIdx < query_rows && lidx == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
__kernel void BruteForceMatch_Match(
__global T *query,
__global T *train,
//__global float *mask,
__global int *bestTrainIdx,
__global float *bestDistance,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int queryIdx = groupidx * BLOCK_SIZE + lidy;
float myBestDistance = MAX_FLOAT;
int myBestTrainIdx = -1;
__local value_type *s_query = (__local value_type *)sharebuffer;
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * BLOCK_SIZE;
// loop
for (int t = 0 ; t < (train_rows + BLOCK_SIZE - 1) / BLOCK_SIZE ; t++)
{
result_type result = 0;
for (int i = 0 ; i < (query_cols + BLOCK_SIZE - 1) / BLOCK_SIZE ; i++)
{
const int loadx = lidx + i * BLOCK_SIZE;
//load query and train into local memory
s_query[lidy * BLOCK_SIZE + lidx] = 0;
s_train[lidx * BLOCK_SIZE + lidy] = 0;
if (loadx < query_cols)
{
s_query[lidy * BLOCK_SIZE + lidx] = query[min(queryIdx, query_rows - 1) * (step / sizeof(float)) + loadx];
s_train[lidx * BLOCK_SIZE + lidy] = train[min(t * BLOCK_SIZE + lidy, train_rows - 1) * (step / sizeof(float)) + loadx];
}
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_block_match(s_query, s_train, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
result = DIST_RES(result);
const int trainIdx = t * BLOCK_SIZE + lidx;
if (queryIdx < query_rows && trainIdx < train_rows && result < myBestDistance /*&& mask(queryIdx, trainIdx)*/)
{
myBestDistance = result;
myBestTrainIdx = trainIdx;
}
}
barrier(CLK_LOCAL_MEM_FENCE);
__local float *s_distance = (__local float *)sharebuffer;
__local int *s_trainIdx = (__local int *)(sharebuffer + BLOCK_SIZE * BLOCK_SIZE);
//findBestMatch
s_distance += lidy * BLOCK_SIZE;
s_trainIdx += lidy * BLOCK_SIZE;
s_distance[lidx] = myBestDistance;
s_trainIdx[lidx] = myBestTrainIdx;
barrier(CLK_LOCAL_MEM_FENCE);
//reduce -- now all reduce implement in each threads.
for (int k = 0 ; k < BLOCK_SIZE; k++)
{
if (myBestDistance > s_distance[k])
{
myBestDistance = s_distance[k];
myBestTrainIdx = s_trainIdx[k];
}
}
if (queryIdx < query_rows && lidx == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
//radius_unrollmatch
__kernel void BruteForceMatch_RadiusUnrollMatch(
__global T *query,
__global T *train,
float maxDistance,
//__global float *mask,
__global int *bestTrainIdx,
__global float *bestDistance,
__global int *nMatches,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int bestTrainIdx_cols,
int step,
int ostep
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int groupidy = get_group_id(1);
const int queryIdx = groupidy * BLOCK_SIZE + lidy;
const int trainIdx = groupidx * BLOCK_SIZE + lidx;
__local value_type *s_query = (__local value_type *)sharebuffer;
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * BLOCK_SIZE;
result_type result = 0;
for (int i = 0 ; i < MAX_DESC_LEN / BLOCK_SIZE ; ++i)
{
//load a BLOCK_SIZE * BLOCK_SIZE block into local train.
const int loadx = lidx + i * BLOCK_SIZE;
s_query[lidy * BLOCK_SIZE + lidx] = loadx < query_cols ? query[min(queryIdx, query_rows - 1) * (step / sizeof(float)) + loadx] : 0;
s_train[lidx * BLOCK_SIZE + lidy] = loadx < query_cols ? train[min(groupidx * BLOCK_SIZE + lidy, train_rows - 1) * (step / sizeof(float)) + loadx] : 0;
//synchronize to make sure each elem for reduceIteration in share memory is written already.
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_block(s_query, s_train, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
if (queryIdx < query_rows && trainIdx < train_rows &&
convert_float(result) < maxDistance/* && mask(queryIdx, trainIdx)*/)
{
int ind = atom_inc(nMatches + queryIdx/*, (unsigned int) -1*/);
if(ind < bestTrainIdx_cols)
{
bestTrainIdx[queryIdx * (ostep / sizeof(int)) + ind] = trainIdx;
bestDistance[queryIdx * (ostep / sizeof(float)) + ind] = result;
}
}
}
//radius_match
__kernel void BruteForceMatch_RadiusMatch(
__global T *query,
__global T *train,
float maxDistance,
//__global float *mask,
__global int *bestTrainIdx,
__global float *bestDistance,
__global int *nMatches,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int bestTrainIdx_cols,
int step,
int ostep
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int groupidy = get_group_id(1);
const int queryIdx = groupidy * BLOCK_SIZE + lidy;
const int trainIdx = groupidx * BLOCK_SIZE + lidx;
__local value_type *s_query = (__local value_type *)sharebuffer;
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * BLOCK_SIZE;
result_type result = 0;
for (int i = 0 ; i < (query_cols + BLOCK_SIZE - 1) / BLOCK_SIZE ; ++i)
{
//load a BLOCK_SIZE * BLOCK_SIZE block into local train.
const int loadx = lidx + i * BLOCK_SIZE;
s_query[lidy * BLOCK_SIZE + lidx] = loadx < query_cols ? query[min(queryIdx, query_rows - 1) * (step / sizeof(float)) + loadx] : 0;
s_train[lidx * BLOCK_SIZE + lidy] = loadx < query_cols ? train[min(groupidx * BLOCK_SIZE + lidy, train_rows - 1) * (step / sizeof(float)) + loadx] : 0;
//synchronize to make sure each elem for reduceIteration in share memory is written already.
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_block(s_query, s_train, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
if (queryIdx < query_rows && trainIdx < train_rows &&
convert_float(result) < maxDistance/* && mask(queryIdx, trainIdx)*/)
{
int ind = atom_inc(nMatches + queryIdx);
if(ind < bestTrainIdx_cols)
{
bestTrainIdx[queryIdx * (ostep / sizeof(int)) + ind] = trainIdx;
bestDistance[queryIdx * (ostep / sizeof(float)) + ind] = result;
}
}
}
__kernel void BruteForceMatch_knnUnrollMatch(
__global T *query,
__global T *train,
//__global float *mask,
__global int2 *bestTrainIdx,
__global float2 *bestDistance,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int queryIdx = groupidx * BLOCK_SIZE + lidy;
__local value_type *s_query = (__local value_type *)sharebuffer;
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * MAX_DESC_LEN;
// load the query into local memory.
for (int i = 0 ; i < MAX_DESC_LEN / BLOCK_SIZE; i ++)
{
int loadx = lidx + i * BLOCK_SIZE;
s_query[lidy * MAX_DESC_LEN + loadx] = loadx < query_cols ? query[min(queryIdx, query_rows - 1) * (step / sizeof(float)) + loadx] : 0;
}
float myBestDistance1 = MAX_FLOAT;
float myBestDistance2 = MAX_FLOAT;
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
//loopUnrolledCached
for (int t = 0 ; t < (train_rows + BLOCK_SIZE - 1) / BLOCK_SIZE ; t++)
{
result_type result = 0;
for (int i = 0 ; i < MAX_DESC_LEN / BLOCK_SIZE ; i++)
{
//load a BLOCK_SIZE * BLOCK_SIZE block into local train.
const int loadx = lidx + i * BLOCK_SIZE;
s_train[lidx * BLOCK_SIZE + lidy] = loadx < train_cols ? train[min(t * BLOCK_SIZE + lidy, train_rows - 1) * (step / sizeof(float)) + loadx] : 0;
//synchronize to make sure each elem for reduceIteration in share memory is written already.
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_multi_block(s_query, s_train, i, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
result = DIST_RES(result);
const int trainIdx = t * BLOCK_SIZE + lidx;
if (queryIdx < query_rows && trainIdx < train_rows)
{
if (result < myBestDistance1)
{
myBestDistance2 = myBestDistance1;
myBestTrainIdx2 = myBestTrainIdx1;
myBestDistance1 = result;
myBestTrainIdx1 = trainIdx;
}
else if (result < myBestDistance2)
{
myBestDistance2 = result;
myBestTrainIdx2 = trainIdx;
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
__local float *s_distance = (local float *)sharebuffer;
__local int *s_trainIdx = (local int *)(sharebuffer + BLOCK_SIZE * BLOCK_SIZE);
// find BestMatch
s_distance += lidy * BLOCK_SIZE;
s_trainIdx += lidy * BLOCK_SIZE;
s_distance[lidx] = myBestDistance1;
s_trainIdx[lidx] = myBestTrainIdx1;
float bestDistance1 = MAX_FLOAT;
float bestDistance2 = MAX_FLOAT;
int bestTrainIdx1 = -1;
int bestTrainIdx2 = -1;
barrier(CLK_LOCAL_MEM_FENCE);
if (lidx == 0)
{
for (int i = 0 ; i < BLOCK_SIZE ; i++)
{
float val = s_distance[i];
if (val < bestDistance1)
{
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestDistance1 = val;
bestTrainIdx1 = s_trainIdx[i];
}
else if (val < bestDistance2)
{
bestDistance2 = val;
bestTrainIdx2 = s_trainIdx[i];
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
s_distance[lidx] = myBestDistance2;
s_trainIdx[lidx] = myBestTrainIdx2;
barrier(CLK_LOCAL_MEM_FENCE);
if (lidx == 0)
{
for (int i = 0 ; i < BLOCK_SIZE ; i++)
{
float val = s_distance[i];
if (val < bestDistance2)
{
bestDistance2 = val;
bestTrainIdx2 = s_trainIdx[i];
}
}
}
myBestDistance1 = bestDistance1;
myBestDistance2 = bestDistance2;
myBestTrainIdx1 = bestTrainIdx1;
myBestTrainIdx2 = bestTrainIdx2;
if (queryIdx < query_rows && lidx == 0)
{
bestTrainIdx[queryIdx] = (int2)(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = (float2)(myBestDistance1, myBestDistance2);
}
}
__kernel void BruteForceMatch_knnMatch(
__global T *query,
__global T *train,
//__global float *mask,
__global int2 *bestTrainIdx,
__global float2 *bestDistance,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int queryIdx = groupidx * BLOCK_SIZE + lidy;
__local value_type *s_query = (__local value_type *)sharebuffer;
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * BLOCK_SIZE;
float myBestDistance1 = MAX_FLOAT;
float myBestDistance2 = MAX_FLOAT;
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
//loop
for (int t = 0 ; t < (train_rows + BLOCK_SIZE - 1) / BLOCK_SIZE ; t++)
{
result_type result = 0.0f;
for (int i = 0 ; i < (query_cols + BLOCK_SIZE -1) / BLOCK_SIZE ; i++)
{
const int loadx = lidx + i * BLOCK_SIZE;
//load query and train into local memory
s_query[lidy * BLOCK_SIZE + lidx] = 0;
s_train[lidx * BLOCK_SIZE + lidy] = 0;
if (loadx < query_cols)
{
s_query[lidy * BLOCK_SIZE + lidx] = query[min(queryIdx, query_rows - 1) * (step / sizeof(float)) + loadx];
s_train[lidx * BLOCK_SIZE + lidy] = train[min(t * BLOCK_SIZE + lidy, train_rows - 1) * (step / sizeof(float)) + loadx];
}
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_block_match(s_query, s_train, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
result = DIST_RES(result);
const int trainIdx = t * BLOCK_SIZE + lidx;
if (queryIdx < query_rows && trainIdx < train_rows /*&& mask(queryIdx, trainIdx)*/)
{
if (result < myBestDistance1)
{
myBestDistance2 = myBestDistance1;
myBestTrainIdx2 = myBestTrainIdx1;
myBestDistance1 = result;
myBestTrainIdx1 = trainIdx;
}
else if (result < myBestDistance2)
{
myBestDistance2 = result;
myBestTrainIdx2 = trainIdx;
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
__local float *s_distance = (__local float *)sharebuffer;
__local int *s_trainIdx = (__local int *)(sharebuffer + BLOCK_SIZE * BLOCK_SIZE);
//findBestMatch
s_distance += lidy * BLOCK_SIZE;
s_trainIdx += lidy * BLOCK_SIZE;
s_distance[lidx] = myBestDistance1;
s_trainIdx[lidx] = myBestTrainIdx1;
float bestDistance1 = MAX_FLOAT;
float bestDistance2 = MAX_FLOAT;
int bestTrainIdx1 = -1;
int bestTrainIdx2 = -1;
barrier(CLK_LOCAL_MEM_FENCE);
if (lidx == 0)
{
for (int i = 0 ; i < BLOCK_SIZE ; i++)
{
float val = s_distance[i];
if (val < bestDistance1)
{
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestDistance1 = val;
bestTrainIdx1 = s_trainIdx[i];
}
else if (val < bestDistance2)
{
bestDistance2 = val;
bestTrainIdx2 = s_trainIdx[i];
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
s_distance[lidx] = myBestDistance2;
s_trainIdx[lidx] = myBestTrainIdx2;
barrier(CLK_LOCAL_MEM_FENCE);
if (lidx == 0)
{
for (int i = 0 ; i < BLOCK_SIZE ; i++)
{
float val = s_distance[i];
if (val < bestDistance2)
{
bestDistance2 = val;
bestTrainIdx2 = s_trainIdx[i];
}
}
}
myBestDistance1 = bestDistance1;
myBestDistance2 = bestDistance2;
myBestTrainIdx1 = bestTrainIdx1;
myBestTrainIdx2 = bestTrainIdx2;
if (queryIdx < query_rows && lidx == 0)
{
bestTrainIdx[queryIdx] = (int2)(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = (float2)(myBestDistance1, myBestDistance2);
}
}
kernel void BruteForceMatch_calcDistanceUnrolled(
__global T *query,
__global T *train,
//__global float *mask,
__global float *allDist,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step)
{
/* Todo */
}
kernel void BruteForceMatch_calcDistance(
__global T *query,
__global T *train,
//__global float *mask,
__global float *allDist,
__local float *sharebuffer,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step)
{
/* Todo */
}
kernel void BruteForceMatch_findBestMatch(
__global float *allDist,
__global int *bestTrainIdx,
__global float *bestDistance,
int k
)
{
/* Todo */
}

View File

@@ -575,6 +575,11 @@ int ORB::descriptorType() const
return CV_8U;
}
int ORB::defaultNorm() const
{
return NORM_HAMMING;
}
/** Compute the ORB features and descriptors on an image
* @param img the image to compute the features and descriptors on
* @param mask the mask to apply
@@ -938,12 +943,12 @@ void ORB::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>
}
}
void ORB::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
void ORB::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
(*this)(image, mask, keypoints, noArray(), false);
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
}
void ORB::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
void ORB::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}

View File

@@ -48,6 +48,7 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/core/ocl.hpp"
#include <algorithm>

View File

@@ -441,9 +441,9 @@ StarDetector::StarDetector(int _maxSize, int _responseThreshold,
{}
void StarDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
void StarDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat grayImage = image;
Mat image = _image.getMat(), mask = _mask.getMat(), grayImage = image;
if( image.channels() > 1 ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
(*this)(grayImage, keypoints);