some samples updated according to new CommandLineParser class

This commit is contained in:
Kirill Kornyakov 2011-05-21 14:09:03 +00:00
parent 4ba6793568
commit aadb1669a7
9 changed files with 454 additions and 431 deletions

View File

@ -35,7 +35,8 @@ void PreprocessArgs(int _argc, const char* _argv[], int& argc, char**& argv)
else if (find_symbol == 0 || find_symbol == ((int)buffer_string.length() - 1)) else if (find_symbol == 0 || find_symbol == ((int)buffer_string.length() - 1))
{ {
buffer_string.erase(find_symbol, (find_symbol + 1)); buffer_string.erase(find_symbol, (find_symbol + 1));
buffer_vector.push_back(buffer_string); if(!buffer_string.empty())
buffer_vector.push_back(buffer_string);
} }
else else
{ {
@ -77,7 +78,14 @@ CommandLineParser::CommandLineParser(int _argc, const char* _argv[])
{ {
data[cur_name].push_back(""); data[cur_name].push_back("");
} }
cur_name=argv[i]; cur_name=argv[i];
while (cur_name.find('-') == 0)
{
cur_name.erase(0,1);
}
was_pushed=false; was_pushed=false;
if (data.find(cur_name) != data.end()) if (data.find(cur_name) != data.end())
@ -170,3 +178,4 @@ cv::Size CommandLineParser::fromStringsVec<cv::Size>(const std::vector<std::stri
return res; return res;
} }

View File

@ -35,27 +35,17 @@
//M*/ //M*/
#include <opencv2/core/core.hpp>
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream> #include <iostream>
#include <cstdio> #include <cstdio>
#include <cstring> #include <cstring>
#include <ctime> #include <ctime>
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/highgui/highgui.hpp>
void help(char **argv) using namespace std;
{ using namespace cv;
std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"
<< "Usage: " << std::endl <<
argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<
"Example: " << std::endl <<
argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<
" iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<
"If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<
"Please note: Background should not contain large surfaces with skin tone." <<
"\n\n ESC will stop\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< std::endl;
}
class ASDFrameHolder class ASDFrameHolder
{ {
@ -159,7 +149,6 @@ void ASDFrameHolder::setImage(IplImage *sourceImage)
//-------------------- ASDFrameSequencer -----------------------// //-------------------- ASDFrameSequencer -----------------------//
ASDFrameSequencer::~ASDFrameSequencer() ASDFrameSequencer::~ASDFrameSequencer()
{ {
close(); close();
@ -215,7 +204,6 @@ bool ASDCVFrameSequencer::isOpen()
//-------------------- ASDFrameSequencerWebCam -----------------------// //-------------------- ASDFrameSequencerWebCam -----------------------//
bool ASDFrameSequencerWebCam::open(int cameraIndex) bool ASDFrameSequencerWebCam::open(int cameraIndex)
{ {
close(); close();
@ -335,19 +323,41 @@ void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gVa
} }
}; };
int main(int argc, char** argv ) void help(const char *exe_name)
{ {
std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"
<< "Usage: " << std::endl <<
exe_name << " --fileMask --firstFrame --lastFrame" << std::endl << std::endl <<
"Example: " << std::endl <<
exe_name << " --fileMask=C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg --firstFrame=0 --lastFrame=1000" << std::endl <<
" iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<
"If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<
"Please note: Background should not contain large surfaces with skin tone." <<
"\n\n ESC will stop\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< std::endl;
}
int main(int argc, const char** argv )
{
help(argv[0]);
CommandLineParser parser(argc, argv);
string fileMask = parser.get<string>("fileMask");
int firstFrame = parser.get<int>("firstFrame", 0);
int lastFrame = parser.get<int>("lastFrame", 0);
IplImage *img, *filterMask = NULL; IplImage *img, *filterMask = NULL;
CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE); CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);
ASDFrameSequencer *sequencer; ASDFrameSequencer *sequencer;
CvFont base_font; CvFont base_font;
char caption[2048], s[256], windowName[256]; char caption[2048], s[256], windowName[256];
long int clockTotal = 0, numFrames = 0; long int clockTotal = 0, numFrames = 0;
std::clock_t clock; std::clock_t clock;
if (argc < 4) if (argc < 4)
{ {
help(argv);
sequencer = new ASDFrameSequencerWebCam(); sequencer = new ASDFrameSequencerWebCam();
(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1); (dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);
@ -358,8 +368,9 @@ int main(int argc, char** argv )
} }
else else
{ {
// A sequence of images captured from video source, is stored here
sequencer = new ASDFrameSequencerImageFile(); sequencer = new ASDFrameSequencerImageFile();
(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here (dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(fileMask.c_str(), firstFrame, lastFrame );
} }
std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences"); std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");
@ -367,10 +378,6 @@ int main(int argc, char** argv )
cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE); cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5); cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);
// Usage:
// c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000
std::cout << "Press ESC to stop." << std::endl << std::endl;
while ((img = sequencer->getNextImage()) != 0) while ((img = sequencer->getNextImage()) != 0)
{ {
numFrames++; numFrames++;

View File

@ -25,10 +25,14 @@
#include <stdlib.h> #include <stdlib.h>
#include <ctype.h> #include <ctype.h>
#include <opencv2/core/core.hpp>
#include <opencv2/video/background_segm.hpp> #include <opencv2/video/background_segm.hpp>
#include <opencv2/imgproc/imgproc_c.h> #include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
//VARIABLES for CODEBOOK METHOD: //VARIABLES for CODEBOOK METHOD:
CvBGCodeBookModel* model = 0; CvBGCodeBookModel* model = 0;
const int NCHANNELS = 3; const int NCHANNELS = 3;
@ -38,26 +42,28 @@ void help(void)
{ {
printf("\nLearn background and find foreground using simple average and average difference learning method:\n" printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
"Originally from the book: Learning OpenCV by O'Reilly press\n" "Originally from the book: Learning OpenCV by O'Reilly press\n"
"\nUSAGE:\nbgfg_codebook [--nframes=300] [movie filename, else from camera]\n" "\nUSAGE:\n"
"***Keep the focus on the video windows, NOT the consol***\n\n" "./bgfg_codebook [--nframes=300] \n"
"INTERACTIVE PARAMETERS:\n" " [--input = movie filename or camera index]\n"
"\tESC,q,Q - quit the program\n" "***Keep the focus on the video windows, NOT the consol***\n\n"
"\th - print this help\n" "INTERACTIVE PARAMETERS:\n"
"\tp - pause toggle\n" "\tESC,q,Q - quit the program\n"
"\ts - single step\n" "\th - print this help\n"
"\tr - run mode (single step off)\n" "\tp - pause toggle\n"
"=== AVG PARAMS ===\n" "\ts - single step\n"
"\t- - bump high threshold UP by 0.25\n" "\tr - run mode (single step off)\n"
"\t= - bump high threshold DOWN by 0.25\n" "=== AVG PARAMS ===\n"
"\t[ - bump low threshold UP by 0.25\n" "\t- - bump high threshold UP by 0.25\n"
"\t] - bump low threshold DOWN by 0.25\n" "\t= - bump high threshold DOWN by 0.25\n"
"=== CODEBOOK PARAMS ===\n" "\t[ - bump low threshold UP by 0.25\n"
"\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n" "\t] - bump low threshold DOWN by 0.25\n"
"\ta - adjust all 3 channels at once\n" "=== CODEBOOK PARAMS ===\n"
"\tb - adjust both 2 and 3 at once\n" "\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
"\ti,o - bump upper threshold up,down by 1\n" "\ta - adjust all 3 channels at once\n"
"\tk,l - bump lower threshold up,down by 1\n" "\tb - adjust both 2 and 3 at once\n"
"\tSPACE - reset the model\n" "\ti,o - bump upper threshold up,down by 1\n"
"\tk,l - bump lower threshold up,down by 1\n"
"\tSPACE - reset the model\n"
); );
} }
@ -65,15 +71,20 @@ void help(void)
//USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera] //USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V //If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
// //
int main(int argc, char** argv) int main(int argc, const char** argv)
{ {
const char* filename = 0; help();
CommandLineParser parser(argc, argv);
string inputName = parser.get<string>("input", "0");
int nframesToLearnBG = parser.get<int>("nframes", 300);
IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0; IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
CvCapture* capture = 0; CvCapture* capture = 0;
int c, n, nframes = 0; int c, n, nframes = 0;
int nframesToLearnBG = 300;
model = cvCreateBGCodeBookModel(); model = cvCreateBGCodeBookModel();
@ -87,38 +98,30 @@ int main(int argc, char** argv)
bool pause = false; bool pause = false;
bool singlestep = false; bool singlestep = false;
for( n = 1; n < argc; n++ ) if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
{ {
static const char* nframesOpt = "--nframes="; printf("Capture from camera\n");
if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 ) capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
if( !capture)
{ {
if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 ) printf ("Capture from CAM %d", c);
printf (" didn't work\n");
}
}
else
{
printf("Capture from file %s\n",inputName.c_str());
capture = cvCreateFileCapture(inputName.c_str());
if( !capture)
{ {
printf ("Capture from file %s", inputName.c_str());
printf (" didn't work\n");
help(); help();
return -1; return -1;
} }
} }
else
filename = argv[n];
}
if( !filename )
{
printf("Capture from camera\n");
capture = cvCaptureFromCAM( 0 );
}
else
{
printf("Capture from file %s\n",filename);
capture = cvCreateFileCapture( filename );
}
if( !capture )
{
printf( "Can not initialize video capturing\n\n" );
help();
return -1;
}
//MAIN PROCESSING LOOP: //MAIN PROCESSING LOOP:
for(;;) for(;;)

View File

@ -34,18 +34,17 @@ int main( int argc, const char** argv )
CommandLineParser parser(argc, argv); CommandLineParser parser(argc, argv);
string cascadeName = parser.get<string>("--cascade", "../../data/haarcascades/haarcascade_frontalface_alt.xml"); string cascadeName = parser.get<string>("cascade", "../../data/haarcascades/haarcascade_frontalface_alt.xml");
string nestedCascadeName = parser.get<string>("nested-cascade", "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml");
double scale = parser.get<double>("scale", 1.0);
string inputName = parser.get<string>("input", "0"); //read from camera by default
if (!cascadeName.empty()) if (!cascadeName.empty())
cout << " from which we have cascadeName= " << cascadeName << endl; cout << " from which we have cascadeName= " << cascadeName << endl;
string nestedCascadeName = parser.get<string>("--nested-cascade", "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml");
if (!nestedCascadeName.empty()) if (!nestedCascadeName.empty())
cout << " from which we have nestedCascadeName= " << nestedCascadeName << endl; cout << " from which we have nestedCascadeName= " << nestedCascadeName << endl;
double scale = parser.get<double>("--scale", 1.0);
string inputName = parser.get<string>("--input", "0"); //read from camera by default
CvCapture* capture = 0; CvCapture* capture = 0;
Mat frame, frameCopy, image; Mat frame, frameCopy, image;
CascadeClassifier cascade, nestedCascade; CascadeClassifier cascade, nestedCascade;

View File

@ -4,6 +4,7 @@
* Author: Liu Liu * Author: Liu Liu
* liuliu.1987+opencv@gmail.com * liuliu.1987+opencv@gmail.com
*/ */
#include <opencv2/core/core.hpp>
#include <opencv2/objdetect/objdetect.hpp> #include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/features2d/features2d.hpp> #include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui.hpp>
@ -14,16 +15,17 @@
#include <vector> #include <vector>
using namespace std; using namespace std;
using namespace cv;
void help() void help()
{ {
printf( printf( "This program demonstrated the use of the SURF Detector and Descriptor using\n"
"This program demonstrated the use of the SURF Detector and Descriptor using\n" "either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n" "on planar objects.\n"
"on planar objects.\n" "Call:\n"
"Call:\n" "./find_obj [--object_filename]=<object_filename, box.png as default> \n"
"./find_obj [<object_filename default box.png> <scene_filename default box_in_scene.png>]\n\n" "[--scene_filename]=<scene_filename box_in_scene.png as default>]\n\n"
); );
} }
// define whether to use approximate nearest-neighbor search // define whether to use approximate nearest-neighbor search
@ -209,13 +211,16 @@ locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors
return 1; return 1;
} }
int main(int argc, char** argv) int main(int argc, const char** argv)
{ {
const char* object_filename = argc == 3 ? argv[1] : "box.png"; help();
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
CommandLineParser parser(argc, argv);
string objectFileName = parser.get<string>("object_filename", "box.png");
string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
CvMemStorage* storage = cvCreateMemStorage(0); CvMemStorage* storage = cvCreateMemStorage(0);
help();
cvNamedWindow("Object", 1); cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1); cvNamedWindow("Object Correspond", 1);
@ -232,13 +237,11 @@ int main(int argc, char** argv)
{{255,255,255}} {{255,255,255}}
}; };
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE ); IplImage* object = cvLoadImage( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE ); IplImage* image = cvLoadImage( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image ) if( !object || !image )
{ {
fprintf( stderr, "Can not load %s and/or %s\n" fprintf( stderr, "Can not load %s and/or %s\n", objectFileName.c_str(), sceneFileName.c_str() );
"Usage: find_obj [<object_filename> <scene_filename>]\n",
object_filename, scene_filename );
exit(-1); exit(-1);
} }
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3); IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);

View File

@ -9,30 +9,35 @@
#include <vector> #include <vector>
using namespace cv; using namespace cv;
void help() void help()
{ {
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n" printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
"descriptor classifier" "descriptor classifier"
"Usage:\n" "Usage:\n"
"./find_obj_ferns [<object_filename default: box.png> <scene_filename default:box_in_scene.png>]\n" "./find_obj_ferns [--object_filename]=<object_filename, box.png as default> \n"
"\n"); "[--scene_filename]=<scene_filename box_in_scene.png as default>]\n\n");
} }
int main(int argc, char** argv)
int main(int argc, const char** argv)
{ {
const char* object_filename = argc > 1 ? argv[1] : "box.png";
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
int i;
help(); help();
CommandLineParser parser(argc, argv);
string objectFileName = parser.get<string>("object_filename", "box.png");
string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
cvNamedWindow("Object", 1); cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1); cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1); cvNamedWindow("Object Correspondence", 1);
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE ); Mat object = imread( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
Mat image; Mat image;
double imgscale = 1; double imgscale = 1;
Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE ); Mat _image = imread( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC); resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
@ -40,7 +45,7 @@ int main(int argc, char** argv)
{ {
fprintf( stderr, "Can not load %s and/or %s\n" fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj_ferns [<object_filename> <scene_filename>]\n", "Usage: find_obj_ferns [<object_filename> <scene_filename>]\n",
object_filename, scene_filename ); objectFileName.c_str(), sceneFileName.c_str() );
exit(-1); exit(-1);
} }
@ -60,7 +65,7 @@ int main(int argc, char** argv)
vector<KeyPoint> objKeypoints, imgKeypoints; vector<KeyPoint> objKeypoints, imgKeypoints;
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2); PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
string model_filename = format("%s_model.xml.gz", object_filename); string model_filename = format("%s_model.xml.gz", objectFileName.c_str());
printf("Trying to load %s ...\n", model_filename.c_str()); printf("Trying to load %s ...\n", model_filename.c_str());
FileStorage fs(model_filename, FileStorage::READ); FileStorage fs(model_filename, FileStorage::READ);
if( fs.isOpened() ) if( fs.isOpened() )
@ -106,6 +111,7 @@ int main(int argc, char** argv)
t = (double)getTickCount() - t; t = (double)getTickCount() - t;
printf("%gms\n", t*1000/getTickFrequency()); printf("%gms\n", t*1000/getTickFrequency());
int i = 0;
if( found ) if( found )
{ {
for( i = 0; i < 4; i++ ) for( i = 0; i < 4; i++ )

View File

@ -1,5 +1,7 @@
#include "opencv2/core/core.hpp"
#include "opencv2/objdetect/objdetect.hpp" #include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui.hpp"
#include <stdio.h> #include <stdio.h>
#ifdef HAVE_CONFIG_H #ifdef HAVE_CONFIG_H
@ -13,42 +15,40 @@ using namespace cv;
void help() void help()
{ {
printf( "This program demonstrated the use of the latentSVM detector.\n" printf( "This program demonstrated the use of the latentSVM detector.\n"
"It reads in a trained object model and then uses that to detect the object in an image\n" "It reads in a trained object model and then uses that to detect the object in an image\n"
"Call:\n" "Call:\n"
"./latentsvmdetect [<image_filename> <model_filename> [<threads_number>]]\n" "./latentsvmdetect [--image_filename]=<image_filename, cat.jpg as default> \n"
" The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n" " [--model_filename] = <model_filename, cat.xml as default> \n"
" Press any key to quit.\n"); " [--threads_number] = <number of threads, -1 as default>\n"
" The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"
" Press any key to quit.\n");
} }
const char* model_filename = "cat.xml";
const char* image_filename = "cat.jpg";
int tbbNumThreads = -1;
void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1) void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)
{ {
CvMemStorage* storage = cvCreateMemStorage(0); CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* detections = 0; CvSeq* detections = 0;
int i = 0; int i = 0;
int64 start = 0, finish = 0; int64 start = 0, finish = 0;
#ifdef HAVE_TBB #ifdef HAVE_TBB
tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);
if (numThreads > 0) if (numThreads > 0)
{ {
init.initialize(numThreads); init.initialize(numThreads);
printf("Number of threads %i\n", numThreads); printf("Number of threads %i\n", numThreads);
} }
else else
{ {
printf("Number of threads is not correct for TBB version"); printf("Number of threads is not correct for TBB version");
return; return;
} }
#endif #endif
start = cvGetTickCount();
start = cvGetTickCount();
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads); detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);
finish = cvGetTickCount(); finish = cvGetTickCount();
printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0)); printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));
#ifdef HAVE_TBB #ifdef HAVE_TBB
init.terminate(); init.terminate();
@ -56,43 +56,43 @@ void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, in
for( i = 0; i < detections->total; i++ ) for( i = 0; i < detections->total; i++ )
{ {
CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i ); CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );
CvRect bounding_box = detection.rect; CvRect bounding_box = detection.rect;
cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y), cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y),
cvPoint(bounding_box.x + bounding_box.width, cvPoint(bounding_box.x + bounding_box.width,
bounding_box.y + bounding_box.height), bounding_box.y + bounding_box.height),
CV_RGB(255,0,0), 3 ); CV_RGB(255,0,0), 3 );
} }
cvReleaseMemStorage( &storage ); cvReleaseMemStorage( &storage );
} }
int main(int argc, char* argv[]) int main(int argc, const char* argv[])
{ {
help(); help();
if (argc > 2)
{ CommandLineParser parser(argc, argv);
image_filename = argv[1];
model_filename = argv[2]; string imageFileName = parser.get<string>("image_filename", "cat.jpg");
if (argc > 3) string modelFileName = parser.get<string>("model_filename", "cat.xml");
{ int tbbNumThreads = parser.get<int>("threads_number", -1);
tbbNumThreads = atoi(argv[3]);
} IplImage* image = cvLoadImage(imageFileName.c_str());
} if (!image)
IplImage* image = cvLoadImage(image_filename); {
if (!image) printf( "Unable to load the image\n"
{
printf( "Unable to load the image\n"
"Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" ); "Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );
return -1; return -1;
} }
CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename); CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(modelFileName.c_str());
if (!detector) if (!detector)
{ {
printf( "Unable to load the model\n" printf( "Unable to load the model\n"
"Pass it as the second parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" ); "Pass it as the second parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );
cvReleaseImage( &image ); cvReleaseImage( &image );
return -1; return -1;
} }
detect_and_draw_objects( image, detector, tbbNumThreads ); detect_and_draw_objects( image, detector, tbbNumThreads );
cvNamedWindow( "test", 0 ); cvNamedWindow( "test", 0 );
cvShowImage( "test", image ); cvShowImage( "test", image );
cvWaitKey(0); cvWaitKey(0);
@ -100,5 +100,5 @@ int main(int argc, char* argv[])
cvReleaseImage( &image ); cvReleaseImage( &image );
cvDestroyAllWindows(); cvDestroyAllWindows();
return 0; return 0;
} }

View File

@ -2,17 +2,22 @@
* Copyright<EFBFBD> 2009, Liu Liu All rights reserved. * Copyright<EFBFBD> 2009, Liu Liu All rights reserved.
*/ */
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp" #include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include <iostream>
using namespace std;
using namespace cv;
void help() void help()
{ {
printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n" printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n" "It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
"\nCall:\n" "\nCall:\n"
"./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n"); "./mser_sample [--image_filename] <path_and_image_filename, default is 'puzzle.png'>\n\n");
} }
static CvScalar colors[] = static CvScalar colors[] =
@ -44,90 +49,81 @@ static uchar bcolors[][3] =
}; };
int main( int argc, char** argv ) int main( int argc, const char** argv )
{ {
char path[1024]; help();
IplImage* img;
help();
if (argc!=2)
{
strcpy(path,"puzzle.png");
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
if (!img)
{
printf("\nUsage: mser_sample <path_to_image>\n");
return 0;
}
}
else
{
strcpy(path,argv[1]);
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
}
if (!img) CommandLineParser parser(argc, argv);
{
printf("Unable to load image %s\n",path);
return 0;
}
IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
IplImage* ellipses = cvCloneImage(rsp);
cvCvtColor(img,ellipses,CV_GRAY2BGR);
CvSeq* contours;
CvMemStorage* storage= cvCreateMemStorage();
IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
double t = (double)cvGetTickCount(); string imageFileName = parser.get<string>("image_filename", "puzzle.png");
cvExtractMSER( hsv, NULL, &contours, storage, params );
t = cvGetTickCount() - t;
printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
uchar* rsptr = (uchar*)rsp->imageData;
// draw mser with different color
for ( int i = contours->total-1; i >= 0; i-- )
{
CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
for ( int j = 0; j < r->total; j++ )
{
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
}
}
// find ellipse ( it seems cvfitellipse2 have error or sth?
for ( int i = 0; i < contours->total; i++ )
{
CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
CvBox2D box = cvFitEllipse2( r );
box.angle=(float)CV_PI/2-box.angle;
if ( r->color > 0 ) IplImage* img;
cvEllipseBox( ellipses, box, colors[9], 2 );
else
cvEllipseBox( ellipses, box, colors[2], 2 );
} img = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
if (!img)
{
printf("Unable to load image %s\n",imageFileName.c_str());
help();
return 0;
}
cvSaveImage( "rsp.png", rsp ); IplImage* rsp = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_COLOR );
IplImage* ellipses = cvCloneImage(rsp);
cvCvtColor(img,ellipses,CV_GRAY2BGR);
CvSeq* contours;
CvMemStorage* storage= cvCreateMemStorage();
IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
cvNamedWindow( "original", 0 ); double t = (double)cvGetTickCount();
cvShowImage( "original", img ); cvExtractMSER( hsv, NULL, &contours, storage, params );
t = cvGetTickCount() - t;
printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
uchar* rsptr = (uchar*)rsp->imageData;
// draw mser with different color
for ( int i = contours->total-1; i >= 0; i-- )
{
CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
for ( int j = 0; j < r->total; j++ )
{
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
}
}
// find ellipse ( it seems cvfitellipse2 have error or sth?
for ( int i = 0; i < contours->total; i++ )
{
CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
CvBox2D box = cvFitEllipse2( r );
box.angle=(float)CV_PI/2-box.angle;
cvNamedWindow( "response", 0 ); if ( r->color > 0 )
cvShowImage( "response", rsp ); cvEllipseBox( ellipses, box, colors[9], 2 );
else
cvEllipseBox( ellipses, box, colors[2], 2 );
cvNamedWindow( "ellipses", 0 ); }
cvShowImage( "ellipses", ellipses );
cvWaitKey(0); cvSaveImage( "rsp.png", rsp );
cvDestroyWindow( "original" ); cvNamedWindow( "original", 0 );
cvDestroyWindow( "response" ); cvShowImage( "original", img );
cvDestroyWindow( "ellipses" );
cvReleaseImage(&rsp);
cvReleaseImage(&img);
cvReleaseImage(&ellipses);
cvNamedWindow( "response", 0 );
cvShowImage( "response", rsp );
cvNamedWindow( "ellipses", 0 );
cvShowImage( "ellipses", ellipses );
cvWaitKey(0);
cvDestroyWindow( "original" );
cvDestroyWindow( "response" );
cvDestroyWindow( "ellipses" );
cvReleaseImage(&rsp);
cvReleaseImage(&img);
cvReleaseImage(&ellipses);
} }

View File

@ -7,18 +7,20 @@
* *
*/ */
#include <opencv2/core/core.hpp>
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp" #include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include <string> #include <string>
void help() void help()
{ {
printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n" printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
"Correspondences are drawn\n"); "Correspondences are drawn\n");
printf("Format: \n./one_way_sample [path_to_samples] [image1] [image2]\n"); printf("Format: \n./one_way_sample <path_to_samples> <image1> <image2>\n");
printf("For example: ./one_way_sample ../../../opencv/samples/c scene_l.bmp scene_r.bmp\n"); printf("For example: ./one_way_sample --path=../../../opencv/samples/c --first_image=scene_l.bmp --second_image=scene_r.bmp\n");
} }
using namespace cv; using namespace cv;
@ -26,21 +28,19 @@ using namespace cv;
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2, IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
const vector<KeyPoint>& features2, const vector<int>& desc_idx); const vector<KeyPoint>& features2, const vector<int>& desc_idx);
int main(int argc, char** argv) int main(int argc, const char** argv)
{ {
help();
CommandLineParser parser(argc, argv);
std::string path_name = parser.get<string>("path", "../../../opencv/samples/c");
std::string img1_name = path_name + "/" + parser.get<string>("first_image", "scene_l.bmp");
std::string img2_name = path_name + "/" + parser.get<string>("second_image", "scene_r.bmp");
const char images_list[] = "one_way_train_images.txt"; const char images_list[] = "one_way_train_images.txt";
const CvSize patch_size = cvSize(24, 24); const CvSize patch_size = cvSize(24, 24);
const int pose_count = 50; const int pose_count = 1; //50
if (argc != 3 && argc != 4)
{
help();
return 0;
}
std::string path_name = argv[1];
std::string img1_name = path_name + "/" + std::string(argv[2]);
std::string img2_name = path_name + "/" + std::string(argv[3]);
printf("Reading the images...\n"); printf("Reading the images...\n");
IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE); IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);