reverted samples with new command argument parser. will be continued after OpenCV release.
This commit is contained in:
@@ -1,4 +1,3 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
@@ -28,26 +27,29 @@ const string bowImageDescriptorsDir = "/bowImageDescriptors";
|
||||
const string svmsDir = "/svms";
|
||||
const string plotsDir = "/plots";
|
||||
|
||||
void help()
|
||||
void help(char** argv)
|
||||
{
|
||||
printf("\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
|
||||
"It shows how to use detectors, descriptors and recognition methods \n"
|
||||
"Usage: \n"
|
||||
"Format:\n"
|
||||
"./bagofwords_classification \n"
|
||||
"--voc_path=<Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). \n"
|
||||
" Note: VOC2007-VOC2010 are supported.> \n"
|
||||
"--result_directory=<Path to result directory. Following folders will be created in [result directory]: \n"
|
||||
" bowImageDescriptors - to store image descriptors, \n"
|
||||
" svms - to store trained svms, \n"
|
||||
" plots - to store files for plots creating. \n"
|
||||
"[--feature_detector]=<Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n"
|
||||
" Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS> \n"
|
||||
"[--descriptor_extractor]=<Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n"
|
||||
" Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF> \n"
|
||||
"[--descriptor_matcher]=<Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n"
|
||||
" Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT> \n"
|
||||
"\n");
|
||||
cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
|
||||
<< "It shows how to use detectors, descriptors and recognition methods \n"
|
||||
"Using OpenCV version %s\n" << CV_VERSION << "\n"
|
||||
<< "Call: \n"
|
||||
<< "Format:\n ./" << argv[0] << " [VOC path] [result directory] \n"
|
||||
<< " or: \n"
|
||||
<< " ./" << argv[0] << " [VOC path] [result directory] [feature detector] [descriptor extractor] [descriptor matcher] \n"
|
||||
<< "\n"
|
||||
<< "Input parameters: \n"
|
||||
<< "[VOC path] Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). Note: VOC2007-VOC2010 are supported. \n"
|
||||
<< "[result directory] Path to result diractory. Following folders will be created in [result directory]: \n"
|
||||
<< " bowImageDescriptors - to store image descriptors, \n"
|
||||
<< " svms - to store trained svms, \n"
|
||||
<< " plots - to store files for plots creating. \n"
|
||||
<< "[feature detector] Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n"
|
||||
<< " Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS \n"
|
||||
<< "[descriptor extractor] Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n"
|
||||
<< " Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF \n"
|
||||
<< "[descriptor matcher] Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n"
|
||||
<< " Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT \n"
|
||||
<< "\n";
|
||||
}
|
||||
|
||||
|
||||
@@ -2505,24 +2507,16 @@ void computeGnuPlotOutput( const string& resPath, const string& objClassName, Vo
|
||||
|
||||
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
const string vocPath = parser.get<string>("--voc_path");
|
||||
const string resPath = parser.get<string>("--result_directory");
|
||||
const string featureDetectName = parser.get<string>("--feature_detector");
|
||||
const string descExtName = parser.get<string>("--descriptor_extractor");
|
||||
const string descMatchName = parser.get<string>("--descriptor_matcher");
|
||||
|
||||
if( vocPath.empty() || resPath.empty())
|
||||
if( argc != 3 && argc != 6 )
|
||||
{
|
||||
help();
|
||||
printf("Cannot find --voc_path=%s or --result_directory=%s\n", vocPath.c_str(), resPath.c_str());
|
||||
help(argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
const string vocPath = argv[1], resPath = argv[2];
|
||||
|
||||
// Read or set default parameters
|
||||
string vocName;
|
||||
DDMParams ddmParams;
|
||||
@@ -2540,12 +2534,12 @@ int main(int argc, const char** argv)
|
||||
else
|
||||
{
|
||||
vocName = getVocName(vocPath);
|
||||
if( featureDetectName.empty() || descExtName.empty() || descMatchName.empty())
|
||||
if( argc!= 6 )
|
||||
{
|
||||
cout << "Feature detector, descriptor extractor, descriptor matcher must be set" << endl;
|
||||
return -1;
|
||||
}
|
||||
ddmParams = DDMParams( featureDetectName.c_str(), descExtName.c_str(), descMatchName.c_str()); // from command line
|
||||
ddmParams = DDMParams( argv[3], argv[4], argv[5] ); // from command line
|
||||
// vocabTrainParams and svmTrainParamsExt is set by defaults
|
||||
paramsFS.open( resPath + "/" + paramsFile, FileStorage::WRITE );
|
||||
if( paramsFS.isOpened() )
|
||||
|
||||
@@ -1,40 +1,32 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/video/background_segm.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nDo background segmentation, especially demonstrating the use of cvUpdateBGStatModel().\n"
|
||||
" Learns the background at the start and then segments.\n"
|
||||
" Learning is togged by the space key. Will read from file or camera\n"
|
||||
"Usage: \n"
|
||||
" ./bgfg_segm [--file_name]=<input file, camera as defautl>\n\n");
|
||||
"Learns the background at the start and then segments.\n"
|
||||
"Learning is togged by the space key. Will read from file or camera\n"
|
||||
"Call:\n"
|
||||
"./ bgfg_segm [file name -- if no name, read from camera]\n\n");
|
||||
}
|
||||
|
||||
//this is a sample for foreground detection functions
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string fileName = parser.get<string>("file_name", "0");
|
||||
VideoCapture cap;
|
||||
bool update_bg_model = true;
|
||||
|
||||
|
||||
if(fileName == "0" )
|
||||
if( argc < 2 )
|
||||
cap.open(0);
|
||||
else
|
||||
cap.open(fileName.c_str());
|
||||
|
||||
cap.open(argv[1]);
|
||||
help();
|
||||
|
||||
if( !cap.isOpened() )
|
||||
{
|
||||
help();
|
||||
printf("can not open camera or video file\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
* Created on: Oct 17, 2010
|
||||
* Author: ethan
|
||||
*/
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
@@ -12,7 +11,6 @@
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
using std::cout;
|
||||
@@ -20,15 +18,13 @@ using std::cerr;
|
||||
using std::endl;
|
||||
using std::vector;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis program shows how to use BRIEF descriptor to match points in features2d\n"
|
||||
"It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n"
|
||||
"Usage: \n"
|
||||
" ./brief_match_test [--first_file]=<first file name, left01.jpg as default> \n"
|
||||
" [--second_file]=<second file name, left02.jpg as default> \n"
|
||||
"Example: \n"
|
||||
"./brief_match_test --first_file=left01.jpg --second_file=left02.jpg \n");
|
||||
void help(char **av)
|
||||
{
|
||||
cerr << "usage: " << av[0] << " im1.jpg im2.jpg"
|
||||
<< "\n"
|
||||
<< "This program shows how to use BRIEF descriptor to match points in features2d\n"
|
||||
<< "It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n"
|
||||
<< endl;
|
||||
}
|
||||
|
||||
//Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors
|
||||
@@ -59,22 +55,16 @@ double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*k
|
||||
|
||||
|
||||
|
||||
int main(int ac, const char ** av)
|
||||
int main(int ac, char ** av)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(ac, av);
|
||||
|
||||
string im1_name, im2_name;
|
||||
im1_name = parser.get<string>("first_file", "left01.jpg");
|
||||
im2_name = parser.get<string>("second_file", "left02.jpg");
|
||||
|
||||
if (im1_name.empty() || im2_name.empty())
|
||||
if (ac != 3)
|
||||
{
|
||||
help();
|
||||
printf("\n You have to indicate two files first_file and second_file \n");
|
||||
return -1;
|
||||
help(av);
|
||||
return 1;
|
||||
}
|
||||
string im1_name, im2_name;
|
||||
im1_name = av[1];
|
||||
im2_name = av[2];
|
||||
|
||||
Mat im1 = imread(im1_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||
Mat im2 = imread(im2_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||
|
||||
@@ -9,59 +9,69 @@
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
const char * usage =
|
||||
" \nexample command line for calibration from a live feed.\n"
|
||||
" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe\n"
|
||||
" \n"
|
||||
" example command line for calibration from a list of stored images:\n"
|
||||
" imagelist_creator image_list.xml *.png\n"
|
||||
" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe image_list.xml\n"
|
||||
" where image_list.xml is the standard OpenCV XML/YAML\n"
|
||||
" use imagelist_creator to create the xml or yaml list\n"
|
||||
" file consisting of the list of strings, e.g.:\n"
|
||||
" \n"
|
||||
"<?xml version=\"1.0\"?>\n"
|
||||
"<opencv_storage>\n"
|
||||
"<images>\n"
|
||||
"view000.png\n"
|
||||
"view001.png\n"
|
||||
"<!-- view002.png -->\n"
|
||||
"view003.png\n"
|
||||
"view010.png\n"
|
||||
"one_extra_view.jpg\n"
|
||||
"</images>\n"
|
||||
"</opencv_storage>\n";
|
||||
|
||||
|
||||
|
||||
|
||||
const char* liveCaptureHelp =
|
||||
"When the live video from camera is used as input, the following hot-keys may be used:\n"
|
||||
" <ESC>, 'q' - quit the program\n"
|
||||
" 'g' - start capturing images\n"
|
||||
" 'u' - switch undistortion on/off\n";
|
||||
|
||||
void help()
|
||||
{
|
||||
printf( "This is a camera calibration sample.\n"
|
||||
"Usage: calibration\n"
|
||||
" -w=<board_width> # the number of inner corners per one of board dimension\n"
|
||||
" -h=<board_height> # the number of inner corners per another board dimension\n"
|
||||
" [-pt]=<pattern> # the type of pattern: chessboard or circles' grid\n"
|
||||
" [-n]=<number_of_frames> # the number of frames to use for calibration\n"
|
||||
" -w <board_width> # the number of inner corners per one of board dimension\n"
|
||||
" -h <board_height> # the number of inner corners per another board dimension\n"
|
||||
" [-pt <pattern>] # the type of pattern: chessboard or circles' grid\n"
|
||||
" [-n <number_of_frames>] # the number of frames to use for calibration\n"
|
||||
" # (if not specified, it will be set to the number\n"
|
||||
" # of board views actually available)\n"
|
||||
" [-d]=<delay> # a minimum delay in ms between subsequent attempts to capture a next view\n"
|
||||
" [-d <delay>] # a minimum delay in ms between subsequent attempts to capture a next view\n"
|
||||
" # (used only for video capturing)\n"
|
||||
" [-s]=<squareSize> # square size in some user-defined units (1 by default)\n"
|
||||
" [-o]=<out_camera_params> # the output filename for intrinsic [and extrinsic] parameters\n"
|
||||
" [-s <squareSize>] # square size in some user-defined units (1 by default)\n"
|
||||
" [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
|
||||
" [-op] # write detected feature points\n"
|
||||
" [-oe] # write extrinsic parameters\n"
|
||||
" [-zt] # assume zero tangential distortion\n"
|
||||
" [-a]=<aspectRatio> # fix aspect ratio (fx/fy)\n"
|
||||
" [-a <aspectRatio>] # fix aspect ratio (fx/fy)\n"
|
||||
" [-p] # fix the principal point at the center\n"
|
||||
" [-v] # flip the captured images around the horizontal axis\n"
|
||||
" [-V] # use a video file, and not an image list, uses\n"
|
||||
" # [input_data] string for the video file name\n"
|
||||
" [-su] # show undistorted images after calibration\n"
|
||||
" [-input_data]=<data file> # input data, one of the following:\n"
|
||||
" [input_data] # input data, one of the following:\n"
|
||||
" # - text file with a list of the images of the board\n"
|
||||
" # the text file can be generated with imagelist_creator\n"
|
||||
" # - name of video file with a video of the board\n"
|
||||
" [-cameraId]=<camera index># if input_data not specified, a live view from the camera is used\n"
|
||||
" \nExample command line for calibration from a live feed:\n"
|
||||
" ./calibration -w=4 -h=5 -s=0.025 -o=camera.yml -op -oe\n"
|
||||
" \n"
|
||||
" Example command line for calibration from a list of stored images:\n"
|
||||
" imagelist_creator image_list.xml *.png\n"
|
||||
" ./calibration -w=4 -h-5 -s=0.025 -o=camera.yml -op -oe -input_data=image_list.xml\n"
|
||||
" where image_list.xml is the standard OpenCV XML/YAML\n"
|
||||
" use imagelist_creator to create the xml or yaml list\n"
|
||||
" file consisting of the list of strings, e.g.:\n"
|
||||
" \n"
|
||||
"<?xml version=\"1.0\"?>\n"
|
||||
"<opencv_storage>\n"
|
||||
"<images>\n"
|
||||
"view000.png\n"
|
||||
"view001.png\n"
|
||||
"<!-- view002.png -->\n"
|
||||
"view003.png\n"
|
||||
"view010.png\n"
|
||||
"one_extra_view.jpg\n"
|
||||
"</images>\n"
|
||||
"</opencv_storage>\n"
|
||||
"\nWhen the live video from camera is used as input, the following hot-keys may be used:\n"
|
||||
" <ESC>, 'q' - quit the program\n"
|
||||
" 'g' - start capturing images\n"
|
||||
" 'u' - switch undistortion on/off\n");
|
||||
" # if input_data not specified, a live view from the camera is used\n"
|
||||
"\n" );
|
||||
printf("\n%s",usage);
|
||||
printf( "\n%s", liveCaptureHelp );
|
||||
}
|
||||
|
||||
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
|
||||
@@ -279,74 +289,126 @@ bool runAndSave(const string& outputFilename,
|
||||
}
|
||||
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
help();
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
Size boardSize, imageSize;
|
||||
boardSize.width = parser.get<int>("w");
|
||||
boardSize.height = parser.get<int>("h");
|
||||
float squareSize = parser.get<float>("s", 1.f);
|
||||
float aspectRatio = parser.get<float>("a", 1.f);
|
||||
float squareSize = 1.f, aspectRatio = 1.f;
|
||||
Mat cameraMatrix, distCoeffs;
|
||||
string outputFilename = parser.get<string>("o","out_camera_data.yml");
|
||||
string inputFilename = parser.get<string>("input_data");
|
||||
int nframes = parser.get<int>("n", 10);
|
||||
bool writeExtrinsics = parser.get<bool>("oe");
|
||||
bool writePoints = parser.get<bool>("op");
|
||||
bool flipVertical = parser.get<bool>("v");
|
||||
bool showUndistorted = parser.get<bool>("su");
|
||||
bool videofile = parser.get<bool>("V");
|
||||
unsigned int delay = parser.get<unsigned int>("d", 1000);
|
||||
unsigned int cameraId = parser.get<unsigned int>("cameraId",0);
|
||||
const char* outputFilename = "out_camera_data.yml";
|
||||
const char* inputFilename = 0;
|
||||
|
||||
int i, nframes = 10;
|
||||
bool writeExtrinsics = false, writePoints = false;
|
||||
bool undistortImage = false;
|
||||
int flags = 0;
|
||||
VideoCapture capture;
|
||||
bool flipVertical = false;
|
||||
bool showUndistorted = false;
|
||||
bool videofile = false;
|
||||
int delay = 1000;
|
||||
clock_t prevTimestamp = 0;
|
||||
int mode = DETECTION;
|
||||
int cameraId = 0;
|
||||
vector<vector<Point2f> > imagePoints;
|
||||
vector<string> imageList;
|
||||
Pattern pattern = CHESSBOARD;
|
||||
|
||||
if( (boardSize.width < 1) || (boardSize.height < 1))
|
||||
if( argc < 2 )
|
||||
{
|
||||
help();
|
||||
return fprintf( stderr, "Invalid board width or height. It must be more than zero\n" ), -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(parser.get<string>("pt")=="circles")
|
||||
pattern = CIRCLES_GRID;
|
||||
else if(parser.get<string>("pt")=="acircles")
|
||||
pattern = ASYMMETRIC_CIRCLES_GRID;
|
||||
if(squareSize <= 0)
|
||||
for( i = 1; i < argc; i++ )
|
||||
{
|
||||
help();
|
||||
return fprintf( stderr, "Invalid board square width. It must be more than zero.\n" ), -1;
|
||||
}
|
||||
if(nframes < 4)
|
||||
{
|
||||
help();
|
||||
return printf("Invalid number of images. It must be more than 3\n" ), -1;
|
||||
}
|
||||
if(aspectRatio <= 0)
|
||||
{
|
||||
help();
|
||||
return printf("Invalid aspect ratio. It must be more than zero\n" ), -1;
|
||||
}
|
||||
const char* s = argv[i];
|
||||
if( strcmp( s, "-w" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &boardSize.width ) != 1 || boardSize.width <= 0 )
|
||||
return fprintf( stderr, "Invalid board width\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-h" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &boardSize.height ) != 1 || boardSize.height <= 0 )
|
||||
return fprintf( stderr, "Invalid board height\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-pt" ) == 0 )
|
||||
{
|
||||
i++;
|
||||
if( !strcmp( argv[i], "circles" ) )
|
||||
pattern = CIRCLES_GRID;
|
||||
else if( !strcmp( argv[i], "acircles" ) )
|
||||
pattern = ASYMMETRIC_CIRCLES_GRID;
|
||||
else if( !strcmp( argv[i], "chessboard" ) )
|
||||
pattern = CHESSBOARD;
|
||||
else
|
||||
return fprintf( stderr, "Invalid pattern type: must be chessboard or circles\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-s" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
|
||||
return fprintf( stderr, "Invalid board square width\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-n" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &nframes ) != 1 || nframes <= 3 )
|
||||
return printf("Invalid number of images\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-a" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
|
||||
return printf("Invalid aspect ratio\n" ), -1;
|
||||
flags |= CV_CALIB_FIX_ASPECT_RATIO;
|
||||
}
|
||||
else if( strcmp( s, "-d" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
|
||||
return printf("Invalid delay\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-op" ) == 0 )
|
||||
{
|
||||
writePoints = true;
|
||||
}
|
||||
else if( strcmp( s, "-oe" ) == 0 )
|
||||
{
|
||||
writeExtrinsics = true;
|
||||
}
|
||||
else if( strcmp( s, "-zt" ) == 0 )
|
||||
{
|
||||
flags |= CV_CALIB_ZERO_TANGENT_DIST;
|
||||
}
|
||||
else if( strcmp( s, "-p" ) == 0 )
|
||||
{
|
||||
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
|
||||
}
|
||||
else if( strcmp( s, "-v" ) == 0 )
|
||||
{
|
||||
flipVertical = true;
|
||||
}
|
||||
else if( strcmp( s, "-V" ) == 0 )
|
||||
{
|
||||
videofile = true;
|
||||
}
|
||||
else if( strcmp( s, "-o" ) == 0 )
|
||||
{
|
||||
outputFilename = argv[++i];
|
||||
}
|
||||
else if( strcmp( s, "-su" ) == 0 )
|
||||
{
|
||||
showUndistorted = true;
|
||||
}
|
||||
else if( s[0] != '-' )
|
||||
{
|
||||
if( isdigit(s[0]) )
|
||||
sscanf(s, "%d", &cameraId);
|
||||
else
|
||||
inputFilename = s;
|
||||
}
|
||||
else
|
||||
flags |= CV_CALIB_FIX_ASPECT_RATIO;
|
||||
if(!delay)
|
||||
{
|
||||
help();
|
||||
return printf("Invalid delay. It must be more than zero.\n" ), -1;
|
||||
return fprintf( stderr, "Unknown option %s", s ), -1;
|
||||
}
|
||||
if(parser.get<bool>("zt"))
|
||||
flags |= CV_CALIB_ZERO_TANGENT_DIST;
|
||||
if(parser.get<bool>("p"))
|
||||
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
|
||||
|
||||
if( !inputFilename.empty() )
|
||||
if( inputFilename )
|
||||
{
|
||||
if( !videofile && readStringList(inputFilename, imageList) )
|
||||
mode = CAPTURING;
|
||||
@@ -362,9 +424,11 @@ int main( int argc, const char** argv )
|
||||
if( !imageList.empty() )
|
||||
nframes = (int)imageList.size();
|
||||
|
||||
if( capture.isOpened() )
|
||||
printf( "%s", liveCaptureHelp );
|
||||
|
||||
namedWindow( "Image View", 1 );
|
||||
|
||||
int i;
|
||||
for(i = 0;;i++)
|
||||
{
|
||||
Mat view, viewGray;
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include <ctype.h>
|
||||
|
||||
using namespace cv;
|
||||
@@ -11,17 +10,19 @@ using namespace std;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis is a demo that shows mean-shift based tracking\n"
|
||||
"You select a color objects such as your face and it tracks it.\n"
|
||||
"This reads from video camera (0 by default, or the camera number the user enters\n"
|
||||
"Usage:\n"
|
||||
"./camshiftdemo [--cameraIndex]=<camera number, zero as default>\n"
|
||||
"\nHot keys: \n"
|
||||
"\tESC - quit the program\n"
|
||||
"\tc - stop the tracking\n"
|
||||
"\tb - switch to/from backprojection view\n"
|
||||
"\th - show/hide object histogram\n"
|
||||
"To initialize tracking, select the object with mouse\n");
|
||||
cout << "\nThis is a demo that shows mean-shift based tracking\n"
|
||||
<< "You select a color objects such as your face and it tracks it.\n"
|
||||
<< "This reads from video camera (0 by default, or the camera number the user enters\n"
|
||||
<< "Call:\n"
|
||||
<< "\n./camshiftdemo [camera number]"
|
||||
<< "\n" << endl;
|
||||
|
||||
cout << "\n\nHot keys: \n"
|
||||
"\tESC - quit the program\n"
|
||||
"\tc - stop the tracking\n"
|
||||
"\tb - switch to/from backprojection view\n"
|
||||
"\th - show/hide object histogram\n"
|
||||
"To initialize tracking, select the object with mouse\n" << endl;
|
||||
}
|
||||
|
||||
Mat image;
|
||||
@@ -63,13 +64,8 @@ void onMouse( int event, int x, int y, int, void* )
|
||||
|
||||
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
unsigned int cameraInd = parser.get<unsigned int>("cameraIndex", 0);
|
||||
VideoCapture cap;
|
||||
Rect trackWindow;
|
||||
RotatedRect trackBox;
|
||||
@@ -77,15 +73,20 @@ int main( int argc, const char** argv )
|
||||
float hranges[] = {0,180};
|
||||
const float* phranges = hranges;
|
||||
|
||||
cap.open(cameraInd);
|
||||
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
|
||||
cap.open(argc == 2 ? argv[1][0] - '0' : 0);
|
||||
else if( argc == 2 )
|
||||
cap.open(argv[1]);
|
||||
|
||||
if( !cap.isOpened() )
|
||||
{
|
||||
help();
|
||||
printf("***Could not initialize capturing...***\n");
|
||||
cout << "***Could not initialize capturing...***\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
help();
|
||||
|
||||
namedWindow( "Histogram", 1 );
|
||||
namedWindow( "CamShift Demo", 1 );
|
||||
setMouseCallback( "CamShift Demo", onMouse, 0 );
|
||||
|
||||
@@ -2,34 +2,33 @@
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
|
||||
"edge template and a query edge image.\n"
|
||||
"Usage:\n"
|
||||
"./chamfer [<image edge map, logo_in_clutter.png as default>\n"
|
||||
"<template edge map, logo.png as default>]\n"
|
||||
"Example: \n"
|
||||
" ./chamfer logo_in_clutter.png logo.png\n");
|
||||
cout <<
|
||||
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
|
||||
"edge template and a query edge image.\n"
|
||||
"Call:\n"
|
||||
"./chamfer [<image edge map> <template edge map>]\n"
|
||||
"By default\n"
|
||||
"the inputs are ./chamfer logo_in_clutter.png logo.png\n"<< endl;
|
||||
}
|
||||
int main( int argc, const char** argv )
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string image = parser.get<string>("0","logo_in_clutter.png");
|
||||
string tempLate = parser.get<string>("1","logo.png");
|
||||
Mat img = imread(image,0);
|
||||
if( argc != 1 && argc != 3 )
|
||||
{
|
||||
help();
|
||||
return 0;
|
||||
}
|
||||
Mat img = imread(argc == 3 ? argv[1] : "logo_in_clutter.png", 0);
|
||||
Mat cimg;
|
||||
cvtColor(img, cimg, CV_GRAY2BGR);
|
||||
Mat tpl = imread(tempLate,0);
|
||||
|
||||
Mat tpl = imread(argc == 3 ? argv[2] : "logo.png", 0);
|
||||
|
||||
// if the image and the template are not edge maps but normal grayscale images,
|
||||
// you might want to uncomment the lines below to produce the maps. You can also
|
||||
// run Sobel instead of Canny.
|
||||
@@ -42,7 +41,7 @@ int main( int argc, const char** argv )
|
||||
int best = chamerMatching( img, tpl, results, costs );
|
||||
if( best < 0 )
|
||||
{
|
||||
printf("not found;\n");
|
||||
cout << "not found;\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user