Merge pull request #360 from vpisarev:sift_fixes

This commit is contained in:
Andrey Kamaev 2013-01-29 20:39:13 +04:00 committed by OpenCV Buildbot
commit daa02aaa98
5 changed files with 207 additions and 61 deletions

View File

@ -3,4 +3,4 @@ if(BUILD_ANDROID_PACKAGE)
endif() endif()
set(the_description "Functionality with possible limitations on the use") set(the_description "Functionality with possible limitations on the use")
ocv_define_module(nonfree opencv_imgproc opencv_features2d) ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d)

View File

@ -162,8 +162,24 @@ static const float SIFT_DESCR_MAG_THR = 0.2f;
// factor used to convert floating-point descriptor to unsigned char // factor used to convert floating-point descriptor to unsigned char
static const float SIFT_INT_DESCR_FCTR = 512.f; static const float SIFT_INT_DESCR_FCTR = 512.f;
#if 0
// intermediate type used for DoG pyramids
typedef short sift_wt;
static const int SIFT_FIXPT_SCALE = 48; static const int SIFT_FIXPT_SCALE = 48;
#else
// intermediate type used for DoG pyramids
typedef float sift_wt;
static const int SIFT_FIXPT_SCALE = 1;
#endif
static inline void
unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale)
{
octave = kpt.octave & 255;
layer = (kpt.octave >> 8) & 255;
octave = octave < 128 ? octave : (-128 | octave);
scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave);
}
static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma ) static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
{ {
@ -172,7 +188,7 @@ static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma
cvtColor(img, gray, COLOR_BGR2GRAY); cvtColor(img, gray, COLOR_BGR2GRAY);
else else
img.copyTo(gray); img.copyTo(gray);
gray.convertTo(gray_fpt, CV_16S, SIFT_FIXPT_SCALE, 0); gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
float sig_diff; float sig_diff;
@ -245,7 +261,7 @@ void SIFT::buildDoGPyramid( const vector<Mat>& gpyr, vector<Mat>& dogpyr ) const
const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i]; const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1]; const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i]; Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
subtract(src2, src1, dst, noArray(), CV_16S); subtract(src2, src1, dst, noArray(), DataType<sift_wt>::type);
} }
} }
} }
@ -276,8 +292,8 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius,
if( x <= 0 || x >= img.cols - 1 ) if( x <= 0 || x >= img.cols - 1 )
continue; continue;
float dx = (float)(img.at<short>(y, x+1) - img.at<short>(y, x-1)); float dx = (float)(img.at<sift_wt>(y, x+1) - img.at<sift_wt>(y, x-1));
float dy = (float)(img.at<short>(y-1, x) - img.at<short>(y+1, x)); float dy = (float)(img.at<sift_wt>(y-1, x) - img.at<sift_wt>(y+1, x));
X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale; X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale;
k++; k++;
@ -323,7 +339,7 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius,
// //
// Interpolates a scale-space extremum's location and scale to subpixel // Interpolates a scale-space extremum's location and scale to subpixel
// accuracy to form an image feature. Rejects features with low contrast. // accuracy to form an image feature. Rejects features with low contrast.
// Based on Section 4 of Lowe's paper. // Based on Section 4 of Lowe's paper.
static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int octv, static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int octv,
int& layer, int& r, int& c, int nOctaveLayers, int& layer, int& r, int& c, int nOctaveLayers,
@ -334,7 +350,7 @@ static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int o
const float second_deriv_scale = img_scale; const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f; const float cross_deriv_scale = img_scale*0.25f;
float xi=0, xr=0, xc=0, contr; float xi=0, xr=0, xc=0, contr=0;
int i = 0; int i = 0;
for( ; i < SIFT_MAX_INTERP_STEPS; i++ ) for( ; i < SIFT_MAX_INTERP_STEPS; i++ )
@ -344,20 +360,20 @@ static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int o
const Mat& prev = dog_pyr[idx-1]; const Mat& prev = dog_pyr[idx-1];
const Mat& next = dog_pyr[idx+1]; const Mat& next = dog_pyr[idx+1];
Vec3f dD((img.at<short>(r, c+1) - img.at<short>(r, c-1))*deriv_scale, Vec3f dD((img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1))*deriv_scale,
(img.at<short>(r+1, c) - img.at<short>(r-1, c))*deriv_scale, (img.at<sift_wt>(r+1, c) - img.at<sift_wt>(r-1, c))*deriv_scale,
(next.at<short>(r, c) - prev.at<short>(r, c))*deriv_scale); (next.at<sift_wt>(r, c) - prev.at<sift_wt>(r, c))*deriv_scale);
float v2 = (float)img.at<short>(r, c)*2; float v2 = (float)img.at<sift_wt>(r, c)*2;
float dxx = (img.at<short>(r, c+1) + img.at<short>(r, c-1) - v2)*second_deriv_scale; float dxx = (img.at<sift_wt>(r, c+1) + img.at<sift_wt>(r, c-1) - v2)*second_deriv_scale;
float dyy = (img.at<short>(r+1, c) + img.at<short>(r-1, c) - v2)*second_deriv_scale; float dyy = (img.at<sift_wt>(r+1, c) + img.at<sift_wt>(r-1, c) - v2)*second_deriv_scale;
float dss = (next.at<short>(r, c) + prev.at<short>(r, c) - v2)*second_deriv_scale; float dss = (next.at<sift_wt>(r, c) + prev.at<sift_wt>(r, c) - v2)*second_deriv_scale;
float dxy = (img.at<short>(r+1, c+1) - img.at<short>(r+1, c-1) - float dxy = (img.at<sift_wt>(r+1, c+1) - img.at<sift_wt>(r+1, c-1) -
img.at<short>(r-1, c+1) + img.at<short>(r-1, c-1))*cross_deriv_scale; img.at<sift_wt>(r-1, c+1) + img.at<sift_wt>(r-1, c-1))*cross_deriv_scale;
float dxs = (next.at<short>(r, c+1) - next.at<short>(r, c-1) - float dxs = (next.at<sift_wt>(r, c+1) - next.at<sift_wt>(r, c-1) -
prev.at<short>(r, c+1) + prev.at<short>(r, c-1))*cross_deriv_scale; prev.at<sift_wt>(r, c+1) + prev.at<sift_wt>(r, c-1))*cross_deriv_scale;
float dys = (next.at<short>(r+1, c) - next.at<short>(r-1, c) - float dys = (next.at<sift_wt>(r+1, c) - next.at<sift_wt>(r-1, c) -
prev.at<short>(r+1, c) + prev.at<short>(r-1, c))*cross_deriv_scale; prev.at<sift_wt>(r+1, c) + prev.at<sift_wt>(r-1, c))*cross_deriv_scale;
Matx33f H(dxx, dxy, dxs, Matx33f H(dxx, dxy, dxs,
dxy, dyy, dys, dxy, dyy, dys,
@ -369,20 +385,25 @@ static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int o
xr = -X[1]; xr = -X[1];
xc = -X[0]; xc = -X[0];
if( std::abs( xi ) < 0.5f && std::abs( xr ) < 0.5f && std::abs( xc ) < 0.5f ) if( std::abs(xi) < 0.5f && std::abs(xr) < 0.5f && std::abs(xc) < 0.5f )
break; break;
c += cvRound( xc ); if( std::abs(xi) > (float)(INT_MAX/3) ||
r += cvRound( xr ); std::abs(xr) > (float)(INT_MAX/3) ||
layer += cvRound( xi ); std::abs(xc) > (float)(INT_MAX/3) )
return false;
c += cvRound(xc);
r += cvRound(xr);
layer += cvRound(xi);
if( layer < 1 || layer > nOctaveLayers || if( layer < 1 || layer > nOctaveLayers ||
c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER || c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER ||
r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER ) r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER )
return false; return false;
} }
/* ensure convergence of interpolation */ // ensure convergence of interpolation
if( i >= SIFT_MAX_INTERP_STEPS ) if( i >= SIFT_MAX_INTERP_STEPS )
return false; return false;
@ -391,21 +412,21 @@ static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int o
const Mat& img = dog_pyr[idx]; const Mat& img = dog_pyr[idx];
const Mat& prev = dog_pyr[idx-1]; const Mat& prev = dog_pyr[idx-1];
const Mat& next = dog_pyr[idx+1]; const Mat& next = dog_pyr[idx+1];
Matx31f dD((img.at<short>(r, c+1) - img.at<short>(r, c-1))*deriv_scale, Matx31f dD((img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1))*deriv_scale,
(img.at<short>(r+1, c) - img.at<short>(r-1, c))*deriv_scale, (img.at<sift_wt>(r+1, c) - img.at<sift_wt>(r-1, c))*deriv_scale,
(next.at<short>(r, c) - prev.at<short>(r, c))*deriv_scale); (next.at<sift_wt>(r, c) - prev.at<sift_wt>(r, c))*deriv_scale);
float t = dD.dot(Matx31f(xc, xr, xi)); float t = dD.dot(Matx31f(xc, xr, xi));
contr = img.at<short>(r, c)*img_scale + t * 0.5f; contr = img.at<sift_wt>(r, c)*img_scale + t * 0.5f;
if( std::abs( contr ) * nOctaveLayers < contrastThreshold ) if( std::abs( contr ) * nOctaveLayers < contrastThreshold )
return false; return false;
/* principal curvatures are computed using the trace and det of Hessian */ // principal curvatures are computed using the trace and det of Hessian
float v2 = img.at<short>(r, c)*2.f; float v2 = img.at<sift_wt>(r, c)*2.f;
float dxx = (img.at<short>(r, c+1) + img.at<short>(r, c-1) - v2)*second_deriv_scale; float dxx = (img.at<sift_wt>(r, c+1) + img.at<sift_wt>(r, c-1) - v2)*second_deriv_scale;
float dyy = (img.at<short>(r+1, c) + img.at<short>(r-1, c) - v2)*second_deriv_scale; float dyy = (img.at<sift_wt>(r+1, c) + img.at<sift_wt>(r-1, c) - v2)*second_deriv_scale;
float dxy = (img.at<short>(r+1, c+1) - img.at<short>(r+1, c-1) - float dxy = (img.at<sift_wt>(r+1, c+1) - img.at<sift_wt>(r+1, c-1) -
img.at<short>(r-1, c+1) + img.at<short>(r-1, c-1)) * cross_deriv_scale; img.at<sift_wt>(r-1, c+1) + img.at<sift_wt>(r-1, c-1)) * cross_deriv_scale;
float tr = dxx + dyy; float tr = dxx + dyy;
float det = dxx * dyy - dxy * dxy; float det = dxx * dyy - dxy * dxy;
@ -417,6 +438,7 @@ static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int o
kpt.pt.y = (r + xr) * (1 << octv); kpt.pt.y = (r + xr) * (1 << octv);
kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16); kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2; kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
kpt.response = std::abs(contr);
return true; return true;
} }
@ -448,13 +470,13 @@ void SIFT::findScaleSpaceExtrema( const vector<Mat>& gauss_pyr, const vector<Mat
for( int r = SIFT_IMG_BORDER; r < rows-SIFT_IMG_BORDER; r++) for( int r = SIFT_IMG_BORDER; r < rows-SIFT_IMG_BORDER; r++)
{ {
const short* currptr = img.ptr<short>(r); const sift_wt* currptr = img.ptr<sift_wt>(r);
const short* prevptr = prev.ptr<short>(r); const sift_wt* prevptr = prev.ptr<sift_wt>(r);
const short* nextptr = next.ptr<short>(r); const sift_wt* nextptr = next.ptr<sift_wt>(r);
for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++) for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++)
{ {
int val = currptr[c]; sift_wt val = currptr[c];
// find local extrema with pixel accuracy // find local extrema with pixel accuracy
if( std::abs(val) > threshold && if( std::abs(val) > threshold &&
@ -541,11 +563,9 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc
for( i = -radius, k = 0; i <= radius; i++ ) for( i = -radius, k = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ ) for( j = -radius; j <= radius; j++ )
{ {
/* // Calculate sample's histogram array coords rotated relative to ori.
Calculate sample's histogram array coords rotated relative to ori. // Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e. // r_rot = 1.5) have full weight placed in row 1 after interpolation.
r_rot = 1.5) have full weight placed in row 1 after interpolation.
*/
float c_rot = j * cos_t - i * sin_t; float c_rot = j * cos_t - i * sin_t;
float r_rot = j * sin_t + i * cos_t; float r_rot = j * sin_t + i * cos_t;
float rbin = r_rot + d/2 - 0.5f; float rbin = r_rot + d/2 - 0.5f;
@ -553,10 +573,10 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc
int r = pt.y + i, c = pt.x + j; int r = pt.y + i, c = pt.x + j;
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d && if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 ) r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
{ {
float dx = (float)(img.at<short>(r, c+1) - img.at<short>(r, c-1)); float dx = (float)(img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1));
float dy = (float)(img.at<short>(r-1, c) - img.at<short>(r+1, c)); float dy = (float)(img.at<sift_wt>(r-1, c) - img.at<sift_wt>(r+1, c));
X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin; X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale; W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
k++; k++;
@ -632,29 +652,46 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc
nrm2 += val*val; nrm2 += val*val;
} }
nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON); nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON);
#if 1
for( k = 0; k < len; k++ ) for( k = 0; k < len; k++ )
{ {
dst[k] = saturate_cast<uchar>(dst[k]*nrm2); dst[k] = saturate_cast<uchar>(dst[k]*nrm2);
} }
#else
float nrm1 = 0;
for( k = 0; k < len; k++ )
{
dst[k] *= nrm2;
nrm1 += dst[k];
}
nrm1 = 1.f/std::max(nrm1, FLT_EPSILON);
for( k = 0; k < len; k++ )
{
dst[k] = std::sqrt(dst[k] * nrm1);//saturate_cast<uchar>(std::sqrt(dst[k] * nrm1)*SIFT_INT_DESCR_FCTR);
}
#endif
} }
static void calcDescriptors(const vector<Mat>& gpyr, const vector<KeyPoint>& keypoints, static void calcDescriptors(const vector<Mat>& gpyr, const vector<KeyPoint>& keypoints,
Mat& descriptors, int nOctaveLayers ) Mat& descriptors, int nOctaveLayers, int firstOctave )
{ {
int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS; int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
for( size_t i = 0; i < keypoints.size(); i++ ) for( size_t i = 0; i < keypoints.size(); i++ )
{ {
KeyPoint kpt = keypoints[i]; KeyPoint kpt = keypoints[i];
int octv=kpt.octave & 255, layer=(kpt.octave >> 8) & 255; int octave, layer;
float scale = 1.f/(1 << octv); float scale;
unpackOctave(kpt, octave, layer, scale);
CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2);
float size=kpt.size*scale; float size=kpt.size*scale;
Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale); Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
const Mat& img = gpyr[octv*(nOctaveLayers + 3) + layer]; const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer];
float angle = 360.f - kpt.angle; float angle = 360.f - kpt.angle;
if(std::abs(angle - 360.f) < FLT_EPSILON) if(std::abs(angle - 360.f) < FLT_EPSILON)
angle = 0.f; angle = 0.f;
calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i)); calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
} }
} }
@ -691,6 +728,7 @@ void SIFT::operator()(InputArray _image, InputArray _mask,
OutputArray _descriptors, OutputArray _descriptors,
bool useProvidedKeypoints) const bool useProvidedKeypoints) const
{ {
int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
Mat image = _image.getMat(), mask = _mask.getMat(); Mat image = _image.getMat(), mask = _mask.getMat();
if( image.empty() || image.depth() != CV_8U ) if( image.empty() || image.depth() != CV_8U )
@ -699,9 +737,28 @@ void SIFT::operator()(InputArray _image, InputArray _mask,
if( !mask.empty() && mask.type() != CV_8UC1 ) if( !mask.empty() && mask.type() != CV_8UC1 )
CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
Mat base = createInitialImage(image, false, (float)sigma); if( useProvidedKeypoints )
{
firstOctave = 0;
int maxOctave = INT_MIN;
for( size_t i = 0; i < keypoints.size(); i++ )
{
int octave, layer;
float scale;
unpackOctave(keypoints[i], octave, layer, scale);
firstOctave = std::min(firstOctave, octave);
maxOctave = std::max(maxOctave, octave);
actualNLayers = std::max(actualNLayers, layer-2);
}
firstOctave = std::min(firstOctave, 0);
CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
actualNOctaves = maxOctave - firstOctave + 1;
}
Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
vector<Mat> gpyr, dogpyr; vector<Mat> gpyr, dogpyr;
int nOctaves = cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2); int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2) - firstOctave;
//double t, tf = getTickFrequency(); //double t, tf = getTickFrequency();
//t = (double)getTickCount(); //t = (double)getTickCount();
@ -724,6 +781,16 @@ void SIFT::operator()(InputArray _image, InputArray _mask,
KeyPointsFilter::retainBest(keypoints, nfeatures); KeyPointsFilter::retainBest(keypoints, nfeatures);
//t = (double)getTickCount() - t; //t = (double)getTickCount() - t;
//printf("keypoint detection time: %g\n", t*1000./tf); //printf("keypoint detection time: %g\n", t*1000./tf);
if( firstOctave < 0 )
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint& kpt = keypoints[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
} }
else else
{ {
@ -738,7 +805,7 @@ void SIFT::operator()(InputArray _image, InputArray _mask,
_descriptors.create((int)keypoints.size(), dsize, CV_32F); _descriptors.create((int)keypoints.size(), dsize, CV_32F);
Mat descriptors = _descriptors.getMat(); Mat descriptors = _descriptors.getMat();
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers); calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
//t = (double)getTickCount() - t; //t = (double)getTickCount() - t;
//printf("descriptor extraction time: %g\n", t*1000./tf); //printf("descriptor extraction time: %g\n", t*1000./tf);
} }

View File

@ -40,6 +40,7 @@
//M*/ //M*/
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace std; using namespace std;
using namespace cv; using namespace cv;
@ -1085,4 +1086,59 @@ TEST(Features2d_BruteForceDescriptorMatcher_knnMatch, regression)
Ptr<DescriptorExtractor> s = DescriptorExtractor::create("SURF"); Ptr<DescriptorExtractor> s = DescriptorExtractor::create("SURF");
ASSERT_STREQ(s->paramHelp("extended").c_str(), ""); ASSERT_STREQ(s->paramHelp("extended").c_str(), "");
} }
*/ */
class CV_DetectPlanarTest : public cvtest::BaseTest
{
public:
CV_DetectPlanarTest(const string& _fname, int _min_ninliers) : fname(_fname), min_ninliers(_min_ninliers) {}
protected:
void run(int)
{
Ptr<Feature2D> f = Algorithm::create<Feature2D>("Feature2D." + fname);
if(f.empty())
return;
string path = string(ts->get_data_path()) + "detectors_descriptors_evaluation/planar/";
string imgname1 = path + "box.png";
string imgname2 = path + "box_in_scene.png";
Mat img1 = imread(imgname1, 0);
Mat img2 = imread(imgname2, 0);
if( img1.empty() || img2.empty() )
{
ts->printf( cvtest::TS::LOG, "missing %s and/or %s\n", imgname1.c_str(), imgname2.c_str());
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
vector<KeyPoint> kpt1, kpt2;
Mat d1, d2;
f->operator()(img1, Mat(), kpt1, d1);
f->operator()(img1, Mat(), kpt2, d2);
vector<DMatch> matches;
BFMatcher(NORM_L2, true).match(d1, d2, matches);
vector<Point2f> pt1, pt2;
for( size_t i = 0; i < matches.size(); i++ ) {
pt1.push_back(kpt1[matches[i].queryIdx].pt);
pt2.push_back(kpt2[matches[i].trainIdx].pt);
}
Mat inliers, H = findHomography(pt1, pt2, RANSAC, 10, inliers);
int ninliers = countNonZero(inliers);
if( ninliers < min_ninliers )
{
ts->printf( cvtest::TS::LOG, "too little inliers (%d) vs expected %d\n", ninliers, min_ninliers);
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
}
string fname;
int min_ninliers;
};
TEST(Features2d_SIFTHomographyTest, regression) { CV_DetectPlanarTest test("SIFT", 80); test.safe_run(); }
//TEST(Features2d_SURFHomographyTest, regression) { CV_DetectPlanarTest test("SURF", 80); test.safe_run(); }

View File

@ -186,6 +186,20 @@ void matchKeyPoints(const vector<KeyPoint>& keypoints0, const Mat& H,
} }
} }
static void removeVerySmallKeypoints(vector<KeyPoint>& keypoints)
{
size_t i, j = 0, n = keypoints.size();
for( i = 0; i < n; i++ )
{
if( (keypoints[i].octave & 128) != 0 )
;
else
keypoints[j++] = keypoints[i];
}
keypoints.resize(j);
}
class DetectorRotationInvarianceTest : public cvtest::BaseTest class DetectorRotationInvarianceTest : public cvtest::BaseTest
{ {
public: public:
@ -216,6 +230,7 @@ protected:
vector<KeyPoint> keypoints0; vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0); featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15) if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
@ -226,6 +241,7 @@ protected:
vector<KeyPoint> keypoints1; vector<KeyPoint> keypoints1;
featureDetector->detect(image1, keypoints1, mask1); featureDetector->detect(image1, keypoints1, mask1);
removeVerySmallKeypoints(keypoints1);
vector<DMatch> matches; vector<DMatch> matches;
matchKeyPoints(keypoints0, H, keypoints1, matches); matchKeyPoints(keypoints0, H, keypoints1, matches);
@ -329,6 +345,7 @@ protected:
vector<KeyPoint> keypoints0; vector<KeyPoint> keypoints0;
Mat descriptors0; Mat descriptors0;
featureDetector->detect(image0, keypoints0); featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15) if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
descriptorExtractor->compute(image0, keypoints0, descriptors0); descriptorExtractor->compute(image0, keypoints0, descriptors0);
@ -382,6 +399,7 @@ protected:
float minDescInliersRatio; float minDescInliersRatio;
}; };
class DetectorScaleInvarianceTest : public cvtest::BaseTest class DetectorScaleInvarianceTest : public cvtest::BaseTest
{ {
public: public:
@ -412,6 +430,7 @@ protected:
vector<KeyPoint> keypoints0; vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0); featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15) if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
@ -423,6 +442,7 @@ protected:
vector<KeyPoint> keypoints1, osiKeypoints1; // osi - original size image vector<KeyPoint> keypoints1, osiKeypoints1; // osi - original size image
featureDetector->detect(image1, keypoints1); featureDetector->detect(image1, keypoints1);
removeVerySmallKeypoints(keypoints1);
if(keypoints1.size() < 15) if(keypoints1.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
@ -531,6 +551,7 @@ protected:
vector<KeyPoint> keypoints0; vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0); featureDetector->detect(image0, keypoints0);
removeVerySmallKeypoints(keypoints0);
if(keypoints0.size() < 15) if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
Mat descriptors0; Mat descriptors0;
@ -603,8 +624,8 @@ TEST(Features2d_RotationInvariance_Detector_SURF, regression)
TEST(Features2d_RotationInvariance_Detector_SIFT, regression) TEST(Features2d_RotationInvariance_Detector_SIFT, regression)
{ {
DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"), DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
0.75f, 0.45f,
0.76f); 0.70f);
test.safe_run(); test.safe_run();
} }
@ -665,7 +686,7 @@ TEST(Features2d_ScaleInvariance_Descriptor_SIFT, regression)
DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"), DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
Algorithm::create<DescriptorExtractor>("Feature2D.SIFT"), Algorithm::create<DescriptorExtractor>("Feature2D.SIFT"),
NORM_L1, NORM_L1,
0.87f); 0.78f);
test.safe_run(); test.safe_run();
} }

View File

@ -221,6 +221,8 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask, drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif #endif
printf("Number of inliers: %d\n", countNonZero(matchesMask));
} }
else else
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg ); drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg );