Merge branch 'master' of https://github.com/Itseez/opencv
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
|
||||
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
|
||||
opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
|
||||
opencv_objdetect opencv_photo opencv_nonfree opencv_features2d opencv_calib3d
|
||||
opencv_objdetect opencv_photo opencv_features2d opencv_calib3d
|
||||
opencv_stitching opencv_videostab opencv_shape)
|
||||
|
||||
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,81 +0,0 @@
|
||||
/*
|
||||
* FGBGTest.cpp
|
||||
*
|
||||
* Created on: May 7, 2012
|
||||
* Author: Andrew B. Godbehere
|
||||
*/
|
||||
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
std::cout <<
|
||||
"\nA program demonstrating the use and capabilities of a particular BackgroundSubtraction\n"
|
||||
"algorithm described in A. Godbehere, A. Matsukawa, K. Goldberg, \n"
|
||||
"\"Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive\n"
|
||||
"Audio Art Installation\", American Control Conference, 2012, used in an interactive\n"
|
||||
"installation at the Contemporary Jewish Museum in San Francisco, CA from March 31 through\n"
|
||||
"July 31, 2011.\n"
|
||||
"Call:\n"
|
||||
"./BackgroundSubtractorGMG_sample\n"
|
||||
"Using OpenCV version " << CV_VERSION << "\n"<<std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
initModule_video();
|
||||
setUseOptimized(true);
|
||||
setNumThreads(8);
|
||||
|
||||
Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
|
||||
if (!fgbg)
|
||||
{
|
||||
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
VideoCapture cap;
|
||||
if (argc > 1)
|
||||
cap.open(argv[1]);
|
||||
else
|
||||
cap.open(0);
|
||||
|
||||
if (!cap.isOpened())
|
||||
{
|
||||
std::cerr << "Cannot read video. Try moving video file to sample directory." << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat frame, fgmask, segm;
|
||||
|
||||
namedWindow("FG Segmentation", WINDOW_NORMAL);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
cap >> frame;
|
||||
|
||||
if (frame.empty())
|
||||
break;
|
||||
|
||||
fgbg->apply(frame, fgmask);
|
||||
|
||||
frame.convertTo(segm, CV_8U, 0.5);
|
||||
add(frame, Scalar(100, 100, 0), segm, fgmask);
|
||||
|
||||
imshow("FG Segmentation", segm);
|
||||
|
||||
int c = waitKey(30);
|
||||
if (c == 'q' || c == 'Q' || (c & 255) == 27)
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -21,6 +21,8 @@ static void help()
|
||||
const char* keys =
|
||||
{
|
||||
"{c camera | | use camera or not}"
|
||||
"{m method |mog2 | method (knn or mog2) }"
|
||||
"{s smooth | | smooth the mask }"
|
||||
"{fn file_name|tree.avi | movie file }"
|
||||
};
|
||||
|
||||
@@ -31,7 +33,9 @@ int main(int argc, const char** argv)
|
||||
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
bool useCamera = parser.has("camera");
|
||||
bool smoothMask = parser.has("smooth");
|
||||
string file = parser.get<string>("file_name");
|
||||
string method = parser.get<string>("method");
|
||||
VideoCapture cap;
|
||||
bool update_bg_model = true;
|
||||
|
||||
@@ -53,24 +57,31 @@ int main(int argc, const char** argv)
|
||||
namedWindow("foreground image", WINDOW_NORMAL);
|
||||
namedWindow("mean background image", WINDOW_NORMAL);
|
||||
|
||||
Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
|
||||
Ptr<BackgroundSubtractor> bg_model = method == "knn" ?
|
||||
createBackgroundSubtractorKNN().dynamicCast<BackgroundSubtractor>() :
|
||||
createBackgroundSubtractorMOG2().dynamicCast<BackgroundSubtractor>();
|
||||
|
||||
Mat img, fgmask, fgimg;
|
||||
Mat img0, img, fgmask, fgimg;
|
||||
|
||||
for(;;)
|
||||
{
|
||||
cap >> img;
|
||||
cap >> img0;
|
||||
|
||||
if( img.empty() )
|
||||
if( img0.empty() )
|
||||
break;
|
||||
|
||||
//cvtColor(_img, img, COLOR_BGR2GRAY);
|
||||
resize(img0, img, Size(640, 640*img0.rows/img0.cols), INTER_LINEAR);
|
||||
|
||||
if( fgimg.empty() )
|
||||
fgimg.create(img.size(), img.type());
|
||||
|
||||
//update the model
|
||||
bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
|
||||
if( smoothMask )
|
||||
{
|
||||
GaussianBlur(fgmask, fgmask, Size(11, 11), 3.5, 3.5);
|
||||
threshold(fgmask, fgmask, 10, 255, THRESH_BINARY);
|
||||
}
|
||||
|
||||
fgimg = Scalar::all(0);
|
||||
img.copyTo(fgimg, fgmask);
|
||||
|
@@ -1,305 +0,0 @@
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void help(char** argv)
|
||||
{
|
||||
cout << "\nThis program demonstrats keypoint finding and matching between 2 images using features2d framework.\n"
|
||||
<< " In one case, the 2nd image is synthesized by homography from the first, in the second case, there are 2 images\n"
|
||||
<< "\n"
|
||||
<< "Case1: second image is obtained from the first (given) image using random generated homography matrix\n"
|
||||
<< argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image] [evaluate(0 or 1)]\n"
|
||||
<< "Example of case1:\n"
|
||||
<< "./descriptor_extractor_matcher SURF SURF FlannBased NoneFilter cola.jpg 0\n"
|
||||
<< "\n"
|
||||
<< "Case2: both images are given. If ransacReprojThreshold>=0 then homography matrix are calculated\n"
|
||||
<< argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image1] [image2] [ransacReprojThreshold]\n"
|
||||
<< "\n"
|
||||
<< "Matches are filtered using homography matrix in case1 and case2 (if ransacReprojThreshold>=0)\n"
|
||||
<< "Example of case2:\n"
|
||||
<< "./descriptor_extractor_matcher SURF SURF BruteForce CrossCheckFilter cola1.jpg cola2.jpg 3\n"
|
||||
<< "\n"
|
||||
<< "Possible detectorType values: see in documentation on createFeatureDetector().\n"
|
||||
<< "Possible descriptorType values: see in documentation on createDescriptorExtractor().\n"
|
||||
<< "Possible matcherType values: see in documentation on createDescriptorMatcher().\n"
|
||||
<< "Possible matcherFilterType values: NoneFilter, CrossCheckFilter." << endl;
|
||||
}
|
||||
|
||||
#define DRAW_RICH_KEYPOINTS_MODE 0
|
||||
#define DRAW_OUTLIERS_MODE 0
|
||||
|
||||
const string winName = "correspondences";
|
||||
|
||||
enum { NONE_FILTER = 0, CROSS_CHECK_FILTER = 1 };
|
||||
|
||||
static int getMatcherFilterType( const string& str )
|
||||
{
|
||||
if( str == "NoneFilter" )
|
||||
return NONE_FILTER;
|
||||
if( str == "CrossCheckFilter" )
|
||||
return CROSS_CHECK_FILTER;
|
||||
CV_Error(Error::StsBadArg, "Invalid filter name");
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
|
||||
const Mat& descriptors1, const Mat& descriptors2,
|
||||
vector<DMatch>& matches12 )
|
||||
{
|
||||
vector<DMatch> matches;
|
||||
descriptorMatcher->match( descriptors1, descriptors2, matches12 );
|
||||
}
|
||||
|
||||
static void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
|
||||
const Mat& descriptors1, const Mat& descriptors2,
|
||||
vector<DMatch>& filteredMatches12, int knn=1 )
|
||||
{
|
||||
filteredMatches12.clear();
|
||||
vector<vector<DMatch> > matches12, matches21;
|
||||
descriptorMatcher->knnMatch( descriptors1, descriptors2, matches12, knn );
|
||||
descriptorMatcher->knnMatch( descriptors2, descriptors1, matches21, knn );
|
||||
for( size_t m = 0; m < matches12.size(); m++ )
|
||||
{
|
||||
bool findCrossCheck = false;
|
||||
for( size_t fk = 0; fk < matches12[m].size(); fk++ )
|
||||
{
|
||||
DMatch forward = matches12[m][fk];
|
||||
|
||||
for( size_t bk = 0; bk < matches21[forward.trainIdx].size(); bk++ )
|
||||
{
|
||||
DMatch backward = matches21[forward.trainIdx][bk];
|
||||
if( backward.trainIdx == forward.queryIdx )
|
||||
{
|
||||
filteredMatches12.push_back(forward);
|
||||
findCrossCheck = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if( findCrossCheck ) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
|
||||
{
|
||||
H.create(3, 3, CV_32FC1);
|
||||
H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
|
||||
H.at<float>(0,1) = rng.uniform(-0.1f, 0.1f);
|
||||
H.at<float>(0,2) = rng.uniform(-0.1f, 0.1f)*src.cols;
|
||||
H.at<float>(1,0) = rng.uniform(-0.1f, 0.1f);
|
||||
H.at<float>(1,1) = rng.uniform( 0.8f, 1.2f);
|
||||
H.at<float>(1,2) = rng.uniform(-0.1f, 0.1f)*src.rows;
|
||||
H.at<float>(2,0) = rng.uniform( -1e-4f, 1e-4f);
|
||||
H.at<float>(2,1) = rng.uniform( -1e-4f, 1e-4f);
|
||||
H.at<float>(2,2) = rng.uniform( 0.8f, 1.2f);
|
||||
|
||||
warpPerspective( src, dst, H, src.size() );
|
||||
}
|
||||
|
||||
static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
|
||||
vector<KeyPoint>& keypoints1, const Mat& descriptors1,
|
||||
Ptr<FeatureDetector>& detector, Ptr<DescriptorExtractor>& descriptorExtractor,
|
||||
Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter, bool eval,
|
||||
double ransacReprojThreshold, RNG& rng )
|
||||
{
|
||||
CV_Assert( !img1.empty() );
|
||||
Mat H12;
|
||||
if( isWarpPerspective )
|
||||
warpPerspectiveRand(img1, img2, H12, rng );
|
||||
else
|
||||
CV_Assert( !img2.empty()/* && img2.cols==img1.cols && img2.rows==img1.rows*/ );
|
||||
|
||||
cout << endl << "< Extracting keypoints from second image..." << endl;
|
||||
vector<KeyPoint> keypoints2;
|
||||
detector->detect( img2, keypoints2 );
|
||||
cout << keypoints2.size() << " points" << endl << ">" << endl;
|
||||
|
||||
if( !H12.empty() && eval )
|
||||
{
|
||||
cout << "< Evaluate feature detector..." << endl;
|
||||
float repeatability;
|
||||
int correspCount;
|
||||
evaluateFeatureDetector( img1, img2, H12, &keypoints1, &keypoints2, repeatability, correspCount );
|
||||
cout << "repeatability = " << repeatability << endl;
|
||||
cout << "correspCount = " << correspCount << endl;
|
||||
cout << ">" << endl;
|
||||
}
|
||||
|
||||
cout << "< Computing descriptors for keypoints from second image..." << endl;
|
||||
Mat descriptors2;
|
||||
descriptorExtractor->compute( img2, keypoints2, descriptors2 );
|
||||
cout << ">" << endl;
|
||||
|
||||
cout << "< Matching descriptors..." << endl;
|
||||
vector<DMatch> filteredMatches;
|
||||
switch( matcherFilter )
|
||||
{
|
||||
case CROSS_CHECK_FILTER :
|
||||
crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );
|
||||
break;
|
||||
default :
|
||||
simpleMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches );
|
||||
}
|
||||
cout << ">" << endl;
|
||||
|
||||
if( !H12.empty() && eval )
|
||||
{
|
||||
cout << "< Evaluate descriptor matcher..." << endl;
|
||||
vector<Point2f> curve;
|
||||
Ptr<GenericDescriptorMatcher> gdm = makePtr<VectorDescriptorMatcher>( descriptorExtractor, descriptorMatcher );
|
||||
evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
|
||||
|
||||
Point2f firstPoint = *curve.begin();
|
||||
Point2f lastPoint = *curve.rbegin();
|
||||
int prevPointIndex = -1;
|
||||
cout << "1-precision = " << firstPoint.x << "; recall = " << firstPoint.y << endl;
|
||||
for( float l_p = 0; l_p <= 1 + FLT_EPSILON; l_p+=0.05f )
|
||||
{
|
||||
int nearest = getNearestPoint( curve, l_p );
|
||||
if( nearest >= 0 )
|
||||
{
|
||||
Point2f curPoint = curve[nearest];
|
||||
if( curPoint.x > firstPoint.x && curPoint.x < lastPoint.x && nearest != prevPointIndex )
|
||||
{
|
||||
cout << "1-precision = " << curPoint.x << "; recall = " << curPoint.y << endl;
|
||||
prevPointIndex = nearest;
|
||||
}
|
||||
}
|
||||
}
|
||||
cout << "1-precision = " << lastPoint.x << "; recall = " << lastPoint.y << endl;
|
||||
cout << ">" << endl;
|
||||
}
|
||||
|
||||
vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() );
|
||||
for( size_t i = 0; i < filteredMatches.size(); i++ )
|
||||
{
|
||||
queryIdxs[i] = filteredMatches[i].queryIdx;
|
||||
trainIdxs[i] = filteredMatches[i].trainIdx;
|
||||
}
|
||||
|
||||
if( !isWarpPerspective && ransacReprojThreshold >= 0 )
|
||||
{
|
||||
cout << "< Computing homography (RANSAC)..." << endl;
|
||||
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
|
||||
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
|
||||
H12 = findHomography( Mat(points1), Mat(points2), RANSAC, ransacReprojThreshold );
|
||||
cout << ">" << endl;
|
||||
}
|
||||
|
||||
Mat drawImg;
|
||||
if( !H12.empty() ) // filter outliers
|
||||
{
|
||||
vector<char> matchesMask( filteredMatches.size(), 0 );
|
||||
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
|
||||
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
|
||||
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
|
||||
|
||||
double maxInlierDist = ransacReprojThreshold < 0 ? 3 : ransacReprojThreshold;
|
||||
for( size_t i1 = 0; i1 < points1.size(); i1++ )
|
||||
{
|
||||
if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
|
||||
matchesMask[i1] = 1;
|
||||
}
|
||||
// draw inliers
|
||||
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask
|
||||
#if DRAW_RICH_KEYPOINTS_MODE
|
||||
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
|
||||
#endif
|
||||
);
|
||||
|
||||
#if DRAW_OUTLIERS_MODE
|
||||
// draw outliers
|
||||
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
|
||||
matchesMask[i1] = !matchesMask[i1];
|
||||
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(255, 0, 0), Scalar(0, 0, 255), matchesMask,
|
||||
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
|
||||
#endif
|
||||
|
||||
cout << "Number of inliers: " << countNonZero(matchesMask) << endl;
|
||||
}
|
||||
else
|
||||
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg );
|
||||
|
||||
imshow( winName, drawImg );
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
if( argc != 7 && argc != 8 )
|
||||
{
|
||||
help(argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cv::initModule_nonfree();
|
||||
|
||||
bool isWarpPerspective = argc == 7;
|
||||
double ransacReprojThreshold = -1;
|
||||
if( !isWarpPerspective )
|
||||
ransacReprojThreshold = atof(argv[7]);
|
||||
|
||||
cout << "< Creating detector, descriptor extractor and descriptor matcher ..." << endl;
|
||||
Ptr<FeatureDetector> detector = FeatureDetector::create( argv[1] );
|
||||
Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( argv[2] );
|
||||
Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( argv[3] );
|
||||
int mactherFilterType = getMatcherFilterType( argv[4] );
|
||||
bool eval = !isWarpPerspective ? false : (atoi(argv[6]) == 0 ? false : true);
|
||||
cout << ">" << endl;
|
||||
if( !detector || !descriptorExtractor || !descriptorMatcher )
|
||||
{
|
||||
cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
cout << "< Reading the images..." << endl;
|
||||
Mat img1 = imread( argv[5] ), img2;
|
||||
if( !isWarpPerspective )
|
||||
img2 = imread( argv[6] );
|
||||
cout << ">" << endl;
|
||||
if( img1.empty() || (!isWarpPerspective && img2.empty()) )
|
||||
{
|
||||
cout << "Can not read images" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
cout << endl << "< Extracting keypoints from first image..." << endl;
|
||||
vector<KeyPoint> keypoints1;
|
||||
detector->detect( img1, keypoints1 );
|
||||
cout << keypoints1.size() << " points" << endl << ">" << endl;
|
||||
|
||||
cout << "< Computing descriptors for keypoints from first image..." << endl;
|
||||
Mat descriptors1;
|
||||
descriptorExtractor->compute( img1, keypoints1, descriptors1 );
|
||||
cout << ">" << endl;
|
||||
|
||||
namedWindow(winName, 1);
|
||||
RNG rng = theRNG();
|
||||
doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
|
||||
detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
|
||||
ransacReprojThreshold, rng );
|
||||
for(;;)
|
||||
{
|
||||
char c = (char)waitKey(0);
|
||||
if( c == '\x1b' ) // esc
|
||||
{
|
||||
cout << "Exiting ..." << endl;
|
||||
break;
|
||||
}
|
||||
else if( isWarpPerspective )
|
||||
{
|
||||
doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
|
||||
detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
|
||||
ransacReprojThreshold, rng );
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
@@ -37,7 +37,8 @@ int main(int, char**)
|
||||
if( !cap.isOpened() )
|
||||
return -1;
|
||||
|
||||
Mat prevgray, gray, flow, cflow, frame;
|
||||
Mat flow, cflow, frame;
|
||||
UMat gray, prevgray, uflow;
|
||||
namedWindow("flow", 1);
|
||||
|
||||
for(;;)
|
||||
@@ -45,10 +46,11 @@ int main(int, char**)
|
||||
cap >> frame;
|
||||
cvtColor(frame, gray, COLOR_BGR2GRAY);
|
||||
|
||||
if( prevgray.data )
|
||||
if( !prevgray.empty() )
|
||||
{
|
||||
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
|
||||
calcOpticalFlowFarneback(prevgray, gray, uflow, 0.5, 3, 15, 3, 5, 1.2, 0);
|
||||
cvtColor(prevgray, cflow, COLOR_GRAY2BGR);
|
||||
uflow.copyTo(flow);
|
||||
drawOptFlowMap(flow, cflow, 16, 1.5, Scalar(0, 255, 0));
|
||||
imshow("flow", cflow);
|
||||
}
|
||||
|
@@ -1,204 +0,0 @@
|
||||
#include "opencv2/video/tracking_c.h"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include <time.h>
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
|
||||
static void help(void)
|
||||
{
|
||||
printf(
|
||||
"\nThis program demonstrated the use of motion templates -- basically using the gradients\n"
|
||||
"of thresholded layers of decaying frame differencing. New movements are stamped on top with floating system\n"
|
||||
"time code and motions too old are thresholded away. This is the 'motion history file'. The program reads from the camera of your choice or from\n"
|
||||
"a file. Gradients of motion history are used to detect direction of motoin etc\n"
|
||||
"Usage :\n"
|
||||
"./motempl [camera number 0-n or file name, default is camera 0]\n"
|
||||
);
|
||||
}
|
||||
// various tracking parameters (in seconds)
|
||||
const double MHI_DURATION = 1;
|
||||
const double MAX_TIME_DELTA = 0.5;
|
||||
const double MIN_TIME_DELTA = 0.05;
|
||||
// number of cyclic frame buffer used for motion detection
|
||||
// (should, probably, depend on FPS)
|
||||
const int N = 4;
|
||||
|
||||
// ring image buffer
|
||||
IplImage **buf = 0;
|
||||
int last = 0;
|
||||
|
||||
// temporary images
|
||||
IplImage *mhi = 0; // MHI
|
||||
IplImage *orient = 0; // orientation
|
||||
IplImage *mask = 0; // valid orientation mask
|
||||
IplImage *segmask = 0; // motion segmentation map
|
||||
CvMemStorage* storage = 0; // temporary storage
|
||||
|
||||
// parameters:
|
||||
// img - input video frame
|
||||
// dst - resultant motion picture
|
||||
// args - optional parameters
|
||||
static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
|
||||
{
|
||||
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
|
||||
CvSize size = cvSize(img->width,img->height); // get current frame size
|
||||
int i, idx1 = last, idx2;
|
||||
IplImage* silh;
|
||||
CvSeq* seq;
|
||||
CvRect comp_rect;
|
||||
double count;
|
||||
double angle;
|
||||
CvPoint center;
|
||||
double magnitude;
|
||||
CvScalar color;
|
||||
|
||||
// allocate images at the beginning or
|
||||
// reallocate them if the frame size is changed
|
||||
if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
|
||||
if( buf == 0 ) {
|
||||
buf = (IplImage**)malloc(N*sizeof(buf[0]));
|
||||
memset( buf, 0, N*sizeof(buf[0]));
|
||||
}
|
||||
|
||||
for( i = 0; i < N; i++ ) {
|
||||
cvReleaseImage( &buf[i] );
|
||||
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
|
||||
cvZero( buf[i] );
|
||||
}
|
||||
cvReleaseImage( &mhi );
|
||||
cvReleaseImage( &orient );
|
||||
cvReleaseImage( &segmask );
|
||||
cvReleaseImage( &mask );
|
||||
|
||||
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
cvZero( mhi ); // clear MHI at the beginning
|
||||
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
|
||||
}
|
||||
|
||||
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
|
||||
|
||||
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
|
||||
last = idx2;
|
||||
|
||||
silh = buf[idx2];
|
||||
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
|
||||
|
||||
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
|
||||
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI
|
||||
|
||||
// convert MHI to blue 8u image
|
||||
cvCvtScale( mhi, mask, 255./MHI_DURATION,
|
||||
(MHI_DURATION - timestamp)*255./MHI_DURATION );
|
||||
cvZero( dst );
|
||||
cvMerge( mask, 0, 0, 0, dst );
|
||||
|
||||
// calculate motion gradient orientation and valid orientation mask
|
||||
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
|
||||
|
||||
if( !storage )
|
||||
storage = cvCreateMemStorage(0);
|
||||
else
|
||||
cvClearMemStorage(storage);
|
||||
|
||||
// segment motion: get sequence of motion components
|
||||
// segmask is marked motion components map. It is not used further
|
||||
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );
|
||||
|
||||
// iterate through the motion components,
|
||||
// One more iteration (i == -1) corresponds to the whole image (global motion)
|
||||
for( i = -1; i < seq->total; i++ ) {
|
||||
|
||||
if( i < 0 ) { // case of the whole image
|
||||
comp_rect = cvRect( 0, 0, size.width, size.height );
|
||||
color = CV_RGB(255,255,255);
|
||||
magnitude = 100;
|
||||
}
|
||||
else { // i-th motion component
|
||||
comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
|
||||
if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
|
||||
continue;
|
||||
color = CV_RGB(255,0,0);
|
||||
magnitude = 30;
|
||||
}
|
||||
|
||||
// select component ROI
|
||||
cvSetImageROI( silh, comp_rect );
|
||||
cvSetImageROI( mhi, comp_rect );
|
||||
cvSetImageROI( orient, comp_rect );
|
||||
cvSetImageROI( mask, comp_rect );
|
||||
|
||||
// calculate orientation
|
||||
angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
|
||||
angle = 360.0 - angle; // adjust for images with top-left origin
|
||||
|
||||
count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
|
||||
|
||||
cvResetImageROI( mhi );
|
||||
cvResetImageROI( orient );
|
||||
cvResetImageROI( mask );
|
||||
cvResetImageROI( silh );
|
||||
|
||||
// check for the case of little motion
|
||||
if( count < comp_rect.width*comp_rect.height * 0.05 )
|
||||
continue;
|
||||
|
||||
// draw a clock with arrow indicating the direction
|
||||
center = cvPoint( (comp_rect.x + comp_rect.width/2),
|
||||
(comp_rect.y + comp_rect.height/2) );
|
||||
|
||||
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
|
||||
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
|
||||
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
IplImage* motion = 0;
|
||||
CvCapture* capture = 0;
|
||||
|
||||
help();
|
||||
|
||||
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
|
||||
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
|
||||
else if( argc == 2 )
|
||||
capture = cvCaptureFromFile( argv[1] );
|
||||
|
||||
if( capture )
|
||||
{
|
||||
cvNamedWindow( "Motion", 1 );
|
||||
|
||||
for(;;)
|
||||
{
|
||||
IplImage* image = cvQueryFrame( capture );
|
||||
if( !image )
|
||||
break;
|
||||
|
||||
if( !motion )
|
||||
{
|
||||
motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
|
||||
cvZero( motion );
|
||||
motion->origin = image->origin;
|
||||
}
|
||||
|
||||
update_mhi( image, motion, 30 );
|
||||
cvShowImage( "Motion", motion );
|
||||
|
||||
if( cvWaitKey(10) >= 0 )
|
||||
break;
|
||||
}
|
||||
cvReleaseCapture( &capture );
|
||||
cvDestroyWindow( "Motion" );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef _EiC
|
||||
main(1,"motempl.c");
|
||||
#endif
|
@@ -88,8 +88,8 @@ int main(int argc, char** argv)
|
||||
namedWindow("video", 1);
|
||||
namedWindow("segmented", 1);
|
||||
|
||||
Ptr<BackgroundSubtractorMOG> bgsubtractor=createBackgroundSubtractorMOG();
|
||||
bgsubtractor->setNoiseSigma(10);
|
||||
Ptr<BackgroundSubtractorMOG2> bgsubtractor=createBackgroundSubtractorMOG2();
|
||||
bgsubtractor->setVarThreshold(10);
|
||||
|
||||
for(;;)
|
||||
{
|
||||
|
@@ -1,75 +0,0 @@
|
||||
/*
|
||||
* shape_context.cpp -- Shape context demo for shape matching
|
||||
*/
|
||||
|
||||
#include "opencv2/shape.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
printf("\nThis program demonstrates how to use common interface for shape transformers\n"
|
||||
"Call\n"
|
||||
"shape_transformation [image1] [image2]\n");
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
|
||||
Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
|
||||
if(img1.empty() || img2.empty() || argc<2)
|
||||
{
|
||||
printf("Can't read one of the images\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// detecting keypoints
|
||||
SurfFeatureDetector detector(5000);
|
||||
vector<KeyPoint> keypoints1, keypoints2;
|
||||
detector.detect(img1, keypoints1);
|
||||
detector.detect(img2, keypoints2);
|
||||
|
||||
// computing descriptors
|
||||
SurfDescriptorExtractor extractor;
|
||||
Mat descriptors1, descriptors2;
|
||||
extractor.compute(img1, keypoints1, descriptors1);
|
||||
extractor.compute(img2, keypoints2, descriptors2);
|
||||
|
||||
// matching descriptors
|
||||
BFMatcher matcher(extractor.defaultNorm());
|
||||
vector<DMatch> matches;
|
||||
matcher.match(descriptors1, descriptors2, matches);
|
||||
|
||||
// drawing the results
|
||||
namedWindow("matches", 1);
|
||||
Mat img_matches;
|
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
|
||||
imshow("matches", img_matches);
|
||||
|
||||
// extract points
|
||||
vector<Point2f> pts1, pts2;
|
||||
for (size_t ii=0; ii<keypoints1.size(); ii++)
|
||||
pts1.push_back( keypoints1[ii].pt );
|
||||
for (size_t ii=0; ii<keypoints2.size(); ii++)
|
||||
pts2.push_back( keypoints2[ii].pt );
|
||||
|
||||
// Apply TPS
|
||||
Ptr<ThinPlateSplineShapeTransformer> mytps = createThinPlateSplineShapeTransformer(25000); //TPS with a relaxed constraint
|
||||
mytps->estimateTransformation(pts1, pts2, matches);
|
||||
mytps->warpImage(img2, img2);
|
||||
|
||||
imshow("Tranformed", img2);
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -1,221 +0,0 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
#define APP_NAME "simpleflow_demo : "
|
||||
|
||||
static void help()
|
||||
{
|
||||
// print a welcome message, and the OpenCV version
|
||||
printf("This is a demo of SimpleFlow optical flow algorithm,\n"
|
||||
"Using OpenCV version %s\n\n", CV_VERSION);
|
||||
|
||||
printf("Usage: simpleflow_demo frame1 frame2 output_flow"
|
||||
"\nApplication will write estimated flow "
|
||||
"\nbetween 'frame1' and 'frame2' in binary format"
|
||||
"\ninto file 'output_flow'"
|
||||
"\nThen one can use code from http://vision.middlebury.edu/flow/data/"
|
||||
"\nto convert flow in binary file to image\n");
|
||||
}
|
||||
|
||||
// binary file format for flow data specified here:
|
||||
// http://vision.middlebury.edu/flow/data/
|
||||
static void writeOpticalFlowToFile(const Mat& flow, FILE* file) {
|
||||
int cols = flow.cols;
|
||||
int rows = flow.rows;
|
||||
|
||||
fprintf(file, "PIEH");
|
||||
|
||||
if (fwrite(&cols, sizeof(int), 1, file) != 1 ||
|
||||
fwrite(&rows, sizeof(int), 1, file) != 1) {
|
||||
printf(APP_NAME "writeOpticalFlowToFile : problem writing header\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (int i= 0; i < rows; ++i) {
|
||||
for (int j = 0; j < cols; ++j) {
|
||||
Vec2f flow_at_point = flow.at<Vec2f>(i, j);
|
||||
|
||||
if (fwrite(&(flow_at_point[0]), sizeof(float), 1, file) != 1 ||
|
||||
fwrite(&(flow_at_point[1]), sizeof(float), 1, file) != 1) {
|
||||
printf(APP_NAME "writeOpticalFlowToFile : problem writing data\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void run(int argc, char** argv) {
|
||||
if (argc < 3) {
|
||||
printf(APP_NAME "Wrong number of command line arguments for mode `run`: %d (expected %d)\n",
|
||||
argc, 3);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
Mat frame1 = imread(argv[0]);
|
||||
Mat frame2 = imread(argv[1]);
|
||||
|
||||
if (frame1.empty()) {
|
||||
printf(APP_NAME "Image #1 : %s cannot be read\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (frame2.empty()) {
|
||||
printf(APP_NAME "Image #2 : %s cannot be read\n", argv[1]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (frame1.rows != frame2.rows && frame1.cols != frame2.cols) {
|
||||
printf(APP_NAME "Images should be of equal sizes\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (frame1.type() != 16 || frame2.type() != 16) {
|
||||
printf(APP_NAME "Images should be of equal type CV_8UC3\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf(APP_NAME "Read two images of size [rows = %d, cols = %d]\n",
|
||||
frame1.rows, frame1.cols);
|
||||
|
||||
Mat flow;
|
||||
|
||||
float start = (float)getTickCount();
|
||||
calcOpticalFlowSF(frame1, frame2,
|
||||
flow,
|
||||
3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10);
|
||||
printf(APP_NAME "calcOpticalFlowSF : %lf sec\n", (getTickCount() - start) / getTickFrequency());
|
||||
|
||||
FILE* file = fopen(argv[2], "wb");
|
||||
if (file == NULL) {
|
||||
printf(APP_NAME "Unable to open file '%s' for writing\n", argv[2]);
|
||||
exit(1);
|
||||
}
|
||||
printf(APP_NAME "Writing to file\n");
|
||||
writeOpticalFlowToFile(flow, file);
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
static bool readOpticalFlowFromFile(FILE* file, Mat& flow) {
|
||||
char header[5];
|
||||
if (fread(header, 1, 4, file) < 4 && (string)header != "PIEH") {
|
||||
return false;
|
||||
}
|
||||
|
||||
int cols, rows;
|
||||
if (fread(&cols, sizeof(int), 1, file) != 1||
|
||||
fread(&rows, sizeof(int), 1, file) != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
flow = Mat::zeros(rows, cols, CV_32FC2);
|
||||
|
||||
for (int i = 0; i < rows; ++i) {
|
||||
for (int j = 0; j < cols; ++j) {
|
||||
Vec2f flow_at_point;
|
||||
if (fread(&(flow_at_point[0]), sizeof(float), 1, file) != 1 ||
|
||||
fread(&(flow_at_point[1]), sizeof(float), 1, file) != 1) {
|
||||
return false;
|
||||
}
|
||||
flow.at<Vec2f>(i, j) = flow_at_point;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool isFlowCorrect(float u) {
|
||||
return !cvIsNaN(u) && (fabs(u) < 1e9);
|
||||
}
|
||||
|
||||
static float calc_rmse(Mat flow1, Mat flow2) {
|
||||
float sum = 0;
|
||||
int counter = 0;
|
||||
const int rows = flow1.rows;
|
||||
const int cols = flow1.cols;
|
||||
|
||||
for (int y = 0; y < rows; ++y) {
|
||||
for (int x = 0; x < cols; ++x) {
|
||||
Vec2f flow1_at_point = flow1.at<Vec2f>(y, x);
|
||||
Vec2f flow2_at_point = flow2.at<Vec2f>(y, x);
|
||||
|
||||
float u1 = flow1_at_point[0];
|
||||
float v1 = flow1_at_point[1];
|
||||
float u2 = flow2_at_point[0];
|
||||
float v2 = flow2_at_point[1];
|
||||
|
||||
if (isFlowCorrect(u1) && isFlowCorrect(u2) && isFlowCorrect(v1) && isFlowCorrect(v2)) {
|
||||
sum += (u1-u2)*(u1-u2) + (v1-v2)*(v1-v2);
|
||||
counter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (float)sqrt(sum / (1e-9 + counter));
|
||||
}
|
||||
|
||||
static void eval(int argc, char** argv) {
|
||||
if (argc < 2) {
|
||||
printf(APP_NAME "Wrong number of command line arguments for mode `eval` : %d (expected %d)\n",
|
||||
argc, 2);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
Mat flow1, flow2;
|
||||
|
||||
FILE* flow_file_1 = fopen(argv[0], "rb");
|
||||
if (flow_file_1 == NULL) {
|
||||
printf(APP_NAME "Cannot open file with first flow : %s\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (!readOpticalFlowFromFile(flow_file_1, flow1)) {
|
||||
printf(APP_NAME "Cannot read flow data from file %s\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
fclose(flow_file_1);
|
||||
|
||||
FILE* flow_file_2 = fopen(argv[1], "rb");
|
||||
if (flow_file_2 == NULL) {
|
||||
printf(APP_NAME "Cannot open file with first flow : %s\n", argv[1]);
|
||||
exit(1);
|
||||
}
|
||||
if (!readOpticalFlowFromFile(flow_file_2, flow2)) {
|
||||
printf(APP_NAME "Cannot read flow data from file %s\n", argv[1]);
|
||||
exit(1);
|
||||
}
|
||||
fclose(flow_file_2);
|
||||
|
||||
float rmse = calc_rmse(flow1, flow2);
|
||||
printf("%lf\n", rmse);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc < 2) {
|
||||
printf(APP_NAME "Mode is not specified\n");
|
||||
help();
|
||||
exit(1);
|
||||
}
|
||||
string mode = (string)argv[1];
|
||||
int new_argc = argc - 2;
|
||||
char** new_argv = &argv[2];
|
||||
|
||||
if ("run" == mode) {
|
||||
run(new_argc, new_argv);
|
||||
} else if ("eval" == mode) {
|
||||
eval(new_argc, new_argv);
|
||||
} else if ("help" == mode)
|
||||
help();
|
||||
else {
|
||||
printf(APP_NAME "Unknown mode : %s\n", argv[1]);
|
||||
help();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,102 +0,0 @@
|
||||
/**
|
||||
* @file SURF_FlannMatcher
|
||||
* @brief SURF detector + descriptor + FLANN Matcher
|
||||
* @author A. Huaman
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/nonfree/features2d.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void readme();
|
||||
|
||||
/**
|
||||
* @function main
|
||||
* @brief Main function
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
if( argc != 3 )
|
||||
{ readme(); return -1; }
|
||||
|
||||
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
|
||||
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
|
||||
|
||||
if( !img_1.data || !img_2.data )
|
||||
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
|
||||
|
||||
//-- Step 1: Detect the keypoints using SURF Detector
|
||||
int minHessian = 400;
|
||||
|
||||
SurfFeatureDetector detector( minHessian );
|
||||
|
||||
std::vector<KeyPoint> keypoints_1, keypoints_2;
|
||||
|
||||
detector.detect( img_1, keypoints_1 );
|
||||
detector.detect( img_2, keypoints_2 );
|
||||
|
||||
//-- Step 2: Calculate descriptors (feature vectors)
|
||||
SurfDescriptorExtractor extractor;
|
||||
|
||||
Mat descriptors_1, descriptors_2;
|
||||
|
||||
extractor.compute( img_1, keypoints_1, descriptors_1 );
|
||||
extractor.compute( img_2, keypoints_2, descriptors_2 );
|
||||
|
||||
//-- Step 3: Matching descriptor vectors using FLANN matcher
|
||||
FlannBasedMatcher matcher;
|
||||
std::vector< DMatch > matches;
|
||||
matcher.match( descriptors_1, descriptors_2, matches );
|
||||
|
||||
double max_dist = 0; double min_dist = 100;
|
||||
|
||||
//-- Quick calculation of max and min distances between keypoints
|
||||
for( int i = 0; i < descriptors_1.rows; i++ )
|
||||
{ double dist = matches[i].distance;
|
||||
if( dist < min_dist ) min_dist = dist;
|
||||
if( dist > max_dist ) max_dist = dist;
|
||||
}
|
||||
|
||||
printf("-- Max dist : %f \n", max_dist );
|
||||
printf("-- Min dist : %f \n", min_dist );
|
||||
|
||||
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
|
||||
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
|
||||
//-- small)
|
||||
//-- PS.- radiusMatch can also be used here.
|
||||
std::vector< DMatch > good_matches;
|
||||
|
||||
for( int i = 0; i < descriptors_1.rows; i++ )
|
||||
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
|
||||
{ good_matches.push_back( matches[i]); }
|
||||
}
|
||||
|
||||
//-- Draw only "good" matches
|
||||
Mat img_matches;
|
||||
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
|
||||
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
|
||||
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
|
||||
|
||||
//-- Show detected matches
|
||||
imshow( "Good Matches", img_matches );
|
||||
|
||||
for( int i = 0; i < (int)good_matches.size(); i++ )
|
||||
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
|
||||
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function readme
|
||||
*/
|
||||
void readme()
|
||||
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
|
@@ -1,126 +0,0 @@
|
||||
/**
|
||||
* @file SURF_Homography
|
||||
* @brief SURF detector + descriptor + FLANN Matcher + FindHomography
|
||||
* @author A. Huaman
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/nonfree/features2d.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void readme();
|
||||
|
||||
/**
|
||||
* @function main
|
||||
* @brief Main function
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
if( argc != 3 )
|
||||
{ readme(); return -1; }
|
||||
|
||||
Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
|
||||
Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );
|
||||
|
||||
if( !img_object.data || !img_scene.data )
|
||||
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
|
||||
|
||||
//-- Step 1: Detect the keypoints using SURF Detector
|
||||
int minHessian = 400;
|
||||
|
||||
SurfFeatureDetector detector( minHessian );
|
||||
|
||||
std::vector<KeyPoint> keypoints_object, keypoints_scene;
|
||||
|
||||
detector.detect( img_object, keypoints_object );
|
||||
detector.detect( img_scene, keypoints_scene );
|
||||
|
||||
//-- Step 2: Calculate descriptors (feature vectors)
|
||||
SurfDescriptorExtractor extractor;
|
||||
|
||||
Mat descriptors_object, descriptors_scene;
|
||||
|
||||
extractor.compute( img_object, keypoints_object, descriptors_object );
|
||||
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
|
||||
|
||||
//-- Step 3: Matching descriptor vectors using FLANN matcher
|
||||
FlannBasedMatcher matcher;
|
||||
std::vector< DMatch > matches;
|
||||
matcher.match( descriptors_object, descriptors_scene, matches );
|
||||
|
||||
double max_dist = 0; double min_dist = 100;
|
||||
|
||||
//-- Quick calculation of max and min distances between keypoints
|
||||
for( int i = 0; i < descriptors_object.rows; i++ )
|
||||
{ double dist = matches[i].distance;
|
||||
if( dist < min_dist ) min_dist = dist;
|
||||
if( dist > max_dist ) max_dist = dist;
|
||||
}
|
||||
|
||||
printf("-- Max dist : %f \n", max_dist );
|
||||
printf("-- Min dist : %f \n", min_dist );
|
||||
|
||||
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
|
||||
std::vector< DMatch > good_matches;
|
||||
|
||||
for( int i = 0; i < descriptors_object.rows; i++ )
|
||||
{ if( matches[i].distance < 3*min_dist )
|
||||
{ good_matches.push_back( matches[i]); }
|
||||
}
|
||||
|
||||
Mat img_matches;
|
||||
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
|
||||
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
|
||||
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
|
||||
|
||||
|
||||
//-- Localize the object from img_1 in img_2
|
||||
std::vector<Point2f> obj;
|
||||
std::vector<Point2f> scene;
|
||||
|
||||
for( size_t i = 0; i < good_matches.size(); i++ )
|
||||
{
|
||||
//-- Get the keypoints from the good matches
|
||||
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
|
||||
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
|
||||
}
|
||||
|
||||
Mat H = findHomography( obj, scene, RANSAC );
|
||||
|
||||
//-- Get the corners from the image_1 ( the object to be "detected" )
|
||||
std::vector<Point2f> obj_corners(4);
|
||||
obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
|
||||
obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
|
||||
std::vector<Point2f> scene_corners(4);
|
||||
|
||||
perspectiveTransform( obj_corners, scene_corners, H);
|
||||
|
||||
|
||||
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
|
||||
Point2f offset( (float)img_object.cols, 0);
|
||||
line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
|
||||
line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
|
||||
line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
|
||||
line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
|
||||
|
||||
//-- Show detected matches
|
||||
imshow( "Good Matches & Object detection", img_matches );
|
||||
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function readme
|
||||
*/
|
||||
void readme()
|
||||
{ std::cout << " Usage: ./SURF_Homography <img1> <img2>" << std::endl; }
|
@@ -1,73 +0,0 @@
|
||||
/**
|
||||
* @file SURF_descriptor
|
||||
* @brief SURF detector + descritpor + BruteForce Matcher + drawing matches with OpenCV functions
|
||||
* @author A. Huaman
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/nonfree/features2d.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
void readme();
|
||||
|
||||
/**
|
||||
* @function main
|
||||
* @brief Main function
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
if( argc != 3 )
|
||||
{ return -1; }
|
||||
|
||||
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
|
||||
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
|
||||
|
||||
if( !img_1.data || !img_2.data )
|
||||
{ return -1; }
|
||||
|
||||
//-- Step 1: Detect the keypoints using SURF Detector
|
||||
int minHessian = 400;
|
||||
|
||||
SurfFeatureDetector detector( minHessian );
|
||||
|
||||
std::vector<KeyPoint> keypoints_1, keypoints_2;
|
||||
|
||||
detector.detect( img_1, keypoints_1 );
|
||||
detector.detect( img_2, keypoints_2 );
|
||||
|
||||
//-- Step 2: Calculate descriptors (feature vectors)
|
||||
SurfDescriptorExtractor extractor;
|
||||
|
||||
Mat descriptors_1, descriptors_2;
|
||||
|
||||
extractor.compute( img_1, keypoints_1, descriptors_1 );
|
||||
extractor.compute( img_2, keypoints_2, descriptors_2 );
|
||||
|
||||
//-- Step 3: Matching descriptor vectors with a brute force matcher
|
||||
BFMatcher matcher(extractor.defaultNorm());
|
||||
std::vector< DMatch > matches;
|
||||
matcher.match( descriptors_1, descriptors_2, matches );
|
||||
|
||||
//-- Draw matches
|
||||
Mat img_matches;
|
||||
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
|
||||
|
||||
//-- Show detected matches
|
||||
imshow("Matches", img_matches );
|
||||
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function readme
|
||||
*/
|
||||
void readme()
|
||||
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
|
@@ -1,63 +0,0 @@
|
||||
/**
|
||||
* @file SURF_detector
|
||||
* @brief SURF keypoint detection + keypoint drawing with OpenCV functions
|
||||
* @author A. Huaman
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/nonfree/features2d.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
void readme();
|
||||
|
||||
/**
|
||||
* @function main
|
||||
* @brief Main function
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
if( argc != 3 )
|
||||
{ readme(); return -1; }
|
||||
|
||||
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
|
||||
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
|
||||
|
||||
if( !img_1.data || !img_2.data )
|
||||
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
|
||||
|
||||
//-- Step 1: Detect the keypoints using SURF Detector
|
||||
int minHessian = 400;
|
||||
|
||||
SurfFeatureDetector detector( minHessian );
|
||||
|
||||
std::vector<KeyPoint> keypoints_1, keypoints_2;
|
||||
|
||||
detector.detect( img_1, keypoints_1 );
|
||||
detector.detect( img_2, keypoints_2 );
|
||||
|
||||
//-- Draw keypoints
|
||||
Mat img_keypoints_1; Mat img_keypoints_2;
|
||||
|
||||
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
|
||||
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
|
||||
|
||||
//-- Show detected (drawn) keypoints
|
||||
imshow("Keypoints 1", img_keypoints_1 );
|
||||
imshow("Keypoints 2", img_keypoints_2 );
|
||||
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function readme
|
||||
*/
|
||||
void readme()
|
||||
{ std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }
|
@@ -20,9 +20,7 @@ using namespace std;
|
||||
|
||||
// Global variables
|
||||
Mat frame; //current frame
|
||||
Mat fgMaskMOG; //fg mask generated by MOG method
|
||||
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
|
||||
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
|
||||
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
|
||||
int keyboard; //input from keyboard
|
||||
|
||||
@@ -63,11 +61,9 @@ int main(int argc, char* argv[])
|
||||
|
||||
//create GUI windows
|
||||
namedWindow("Frame");
|
||||
namedWindow("FG Mask MOG");
|
||||
namedWindow("FG Mask MOG 2");
|
||||
|
||||
//create Background Subtractor objects
|
||||
pMOG = createBackgroundSubtractorMOG(); //MOG approach
|
||||
pMOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach
|
||||
|
||||
if(strcmp(argv[1], "-vid") == 0) {
|
||||
@@ -109,7 +105,6 @@ void processVideo(char* videoFilename) {
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
//update the background model
|
||||
pMOG->apply(frame, fgMaskMOG);
|
||||
pMOG2->apply(frame, fgMaskMOG2);
|
||||
//get the frame number and write it on the current frame
|
||||
stringstream ss;
|
||||
@@ -121,7 +116,6 @@ void processVideo(char* videoFilename) {
|
||||
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
|
||||
//show the current frame and the fg masks
|
||||
imshow("Frame", frame);
|
||||
imshow("FG Mask MOG", fgMaskMOG);
|
||||
imshow("FG Mask MOG 2", fgMaskMOG2);
|
||||
//get the input from the keyboard
|
||||
keyboard = waitKey( 30 );
|
||||
@@ -146,7 +140,6 @@ void processImages(char* fistFrameFilename) {
|
||||
//read input data. ESC or 'q' for quitting
|
||||
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
|
||||
//update the background model
|
||||
pMOG->apply(frame, fgMaskMOG);
|
||||
pMOG2->apply(frame, fgMaskMOG2);
|
||||
//get the frame number and write it on the current frame
|
||||
size_t index = fn.find_last_of("/");
|
||||
@@ -166,7 +159,6 @@ void processImages(char* fistFrameFilename) {
|
||||
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
|
||||
//show the current frame and the fg masks
|
||||
imshow("Frame", frame);
|
||||
imshow("FG Mask MOG", fgMaskMOG);
|
||||
imshow("FG Mask MOG 2", fgMaskMOG2);
|
||||
//get the input from the keyboard
|
||||
keyboard = waitKey( 30 );
|
||||
|
@@ -1,234 +0,0 @@
|
||||
/*
|
||||
* video_homography.cpp
|
||||
*
|
||||
* Created on: Oct 18, 2010
|
||||
* Author: erublee
|
||||
*/
|
||||
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include <iostream>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static void help(char **av)
|
||||
{
|
||||
cout << "\nThis program demonstrated the use of features2d with the Fast corner detector and brief descriptors\n"
|
||||
<< "to track planar objects by computing their homography from the key (training) image to the query (test) image\n\n" << endl;
|
||||
cout << "usage: " << av[0] << " <video device number>\n" << endl;
|
||||
cout << "The following keys do stuff:" << endl;
|
||||
cout << " t : grabs a reference frame to match against" << endl;
|
||||
cout << " l : makes the reference frame new every frame" << endl;
|
||||
cout << " q or escape: quit" << endl;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query,
|
||||
std::vector<cv::DMatch>& matches, Mat& img, const vector<unsigned char>& mask = vector<
|
||||
unsigned char> ())
|
||||
{
|
||||
for (int i = 0; i < (int)matches.size(); i++)
|
||||
{
|
||||
if (mask.empty() || mask[i])
|
||||
{
|
||||
Point2f pt_new = query[matches[i].queryIdx].pt;
|
||||
Point2f pt_old = train[matches[i].trainIdx].pt;
|
||||
|
||||
cv::line(img, pt_new, pt_old, Scalar(125, 255, 125), 1);
|
||||
cv::circle(img, pt_new, 2, Scalar(255, 0, 125), 1);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Takes a descriptor and turns it into an xy point
|
||||
void keypoints2points(const vector<KeyPoint>& in, vector<Point2f>& out)
|
||||
{
|
||||
out.clear();
|
||||
out.reserve(in.size());
|
||||
for (size_t i = 0; i < in.size(); ++i)
|
||||
{
|
||||
out.push_back(in[i].pt);
|
||||
}
|
||||
}
|
||||
|
||||
//Takes an xy point and appends that to a keypoint structure
|
||||
void points2keypoints(const vector<Point2f>& in, vector<KeyPoint>& out)
|
||||
{
|
||||
out.clear();
|
||||
out.reserve(in.size());
|
||||
for (size_t i = 0; i < in.size(); ++i)
|
||||
{
|
||||
out.push_back(KeyPoint(in[i], 1));
|
||||
}
|
||||
}
|
||||
|
||||
//Uses computed homography H to warp original input points to new planar position
|
||||
void warpKeypoints(const Mat& H, const vector<KeyPoint>& in, vector<KeyPoint>& out)
|
||||
{
|
||||
vector<Point2f> pts;
|
||||
keypoints2points(in, pts);
|
||||
vector<Point2f> pts_w(pts.size());
|
||||
Mat m_pts_w(pts_w);
|
||||
perspectiveTransform(Mat(pts), m_pts_w, H);
|
||||
points2keypoints(pts_w, out);
|
||||
}
|
||||
|
||||
//Converts matching indices to xy points
|
||||
void matches2points(const vector<KeyPoint>& train, const vector<KeyPoint>& query,
|
||||
const std::vector<cv::DMatch>& matches, std::vector<cv::Point2f>& pts_train,
|
||||
std::vector<Point2f>& pts_query)
|
||||
{
|
||||
|
||||
pts_train.clear();
|
||||
pts_query.clear();
|
||||
pts_train.reserve(matches.size());
|
||||
pts_query.reserve(matches.size());
|
||||
|
||||
size_t i = 0;
|
||||
|
||||
for (; i < matches.size(); i++)
|
||||
{
|
||||
|
||||
const DMatch & dmatch = matches[i];
|
||||
|
||||
pts_query.push_back(query[dmatch.queryIdx].pt);
|
||||
pts_train.push_back(train[dmatch.trainIdx].pt);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void resetH(Mat&H)
|
||||
{
|
||||
H = Mat::eye(3, 3, CV_32FC1);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int ac, char ** av)
|
||||
{
|
||||
|
||||
if (ac != 2)
|
||||
{
|
||||
help(av);
|
||||
return 1;
|
||||
}
|
||||
|
||||
BriefDescriptorExtractor brief(32);
|
||||
|
||||
VideoCapture capture;
|
||||
capture.open(atoi(av[1]));
|
||||
if (!capture.isOpened())
|
||||
{
|
||||
help(av);
|
||||
cout << "capture device " << atoi(av[1]) << " failed to open!" << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
cout << "following keys do stuff:" << endl;
|
||||
cout << "t : grabs a reference frame to match against" << endl;
|
||||
cout << "l : makes the reference frame new every frame" << endl;
|
||||
cout << "q or escape: quit" << endl;
|
||||
|
||||
Mat frame;
|
||||
|
||||
vector<DMatch> matches;
|
||||
|
||||
BFMatcher desc_matcher(brief.defaultNorm());
|
||||
|
||||
vector<Point2f> train_pts, query_pts;
|
||||
vector<KeyPoint> train_kpts, query_kpts;
|
||||
vector<unsigned char> match_mask;
|
||||
|
||||
Mat gray;
|
||||
|
||||
bool ref_live = true;
|
||||
|
||||
Mat train_desc, query_desc;
|
||||
const int DESIRED_FTRS = 500;
|
||||
GridAdaptedFeatureDetector detector(makePtr<FastFeatureDetector>(10, true), DESIRED_FTRS, 4, 4);
|
||||
|
||||
Mat H_prev = Mat::eye(3, 3, CV_32FC1);
|
||||
for (;;)
|
||||
{
|
||||
capture >> frame;
|
||||
if (frame.empty())
|
||||
break;
|
||||
|
||||
cvtColor(frame, gray, COLOR_RGB2GRAY);
|
||||
|
||||
detector.detect(gray, query_kpts); //Find interest points
|
||||
|
||||
brief.compute(gray, query_kpts, query_desc); //Compute brief descriptors at each keypoint location
|
||||
|
||||
if (!train_kpts.empty())
|
||||
{
|
||||
|
||||
vector<KeyPoint> test_kpts;
|
||||
warpKeypoints(H_prev.inv(), query_kpts, test_kpts);
|
||||
|
||||
Mat mask = windowedMatchingMask(test_kpts, train_kpts, 25, 25);
|
||||
desc_matcher.match(query_desc, train_desc, matches, mask);
|
||||
drawKeypoints(frame, test_kpts, frame, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_OVER_OUTIMG);
|
||||
|
||||
matches2points(train_kpts, query_kpts, matches, train_pts, query_pts);
|
||||
|
||||
if (matches.size() > 5)
|
||||
{
|
||||
Mat H = findHomography(train_pts, query_pts, RANSAC, 4, match_mask);
|
||||
if (countNonZero(Mat(match_mask)) > 15)
|
||||
{
|
||||
H_prev = H;
|
||||
}
|
||||
else
|
||||
resetH(H_prev);
|
||||
drawMatchesRelative(train_kpts, query_kpts, matches, frame, match_mask);
|
||||
}
|
||||
else
|
||||
resetH(H_prev);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
H_prev = Mat::eye(3, 3, CV_32FC1);
|
||||
Mat out;
|
||||
drawKeypoints(gray, query_kpts, out);
|
||||
frame = out;
|
||||
}
|
||||
|
||||
imshow("frame", frame);
|
||||
|
||||
if (ref_live)
|
||||
{
|
||||
train_kpts = query_kpts;
|
||||
query_desc.copyTo(train_desc);
|
||||
}
|
||||
char key = (char)waitKey(2);
|
||||
switch (key)
|
||||
{
|
||||
case 'l':
|
||||
ref_live = true;
|
||||
resetH(H_prev);
|
||||
break;
|
||||
case 't':
|
||||
ref_live = false;
|
||||
train_kpts = query_kpts;
|
||||
query_desc.copyTo(train_desc);
|
||||
resetH(H_prev);
|
||||
break;
|
||||
case 27:
|
||||
case 'q':
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
Reference in New Issue
Block a user