"atomic bomb" commit. Reorganized OpenCV directory structure

This commit is contained in:
Vadim Pisarevsky
2010-05-11 17:44:00 +00:00
commit 127d6649a1
1761 changed files with 1766340 additions and 0 deletions

97
samples/c/CMakeLists.txt Normal file
View File

@@ -0,0 +1,97 @@
# ----------------------------------------------------------------------------
# CMake file for C samples. See root CMakeLists.txt
#
# ----------------------------------------------------------------------------
if (BUILD_EXAMPLES)
project(c_samples)
if(CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function")
endif()
include_directories(
"${CMAKE_SOURCE_DIR}/modules/core/include"
"${CMAKE_SOURCE_DIR}/modules/imgproc/include"
"${CMAKE_SOURCE_DIR}/modules/video/include"
"${CMAKE_SOURCE_DIR}/modules/highgui/include"
"${CMAKE_SOURCE_DIR}/modules/ml/include"
"${CMAKE_SOURCE_DIR}/modules/calib3d/include"
"${CMAKE_SOURCE_DIR}/modules/features2d/include"
"${CMAKE_SOURCE_DIR}/modules/objdetect/include"
"${CMAKE_SOURCE_DIR}/modules/legacy/include"
"${CMAKE_SOURCE_DIR}/modules/contrib/include"
)
# ---------------------------------------------
# Define executable targets
# ---------------------------------------------
MACRO(MY_DEFINE_EXAMPLE name srcs)
add_executable(${name} ${srcs})
set_target_properties(${name} PROPERTIES PROJECT_LABEL "(EXAMPLE) ${name}")
add_dependencies(${name} opencv_core opencv_imgproc opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_features2d opencv_calib3d opencv_legacy opencv_contrib)
target_link_libraries(${name} ${OPENCV_LINKER_LIBS} opencv_core opencv_imgproc opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_features2d opencv_calib3d opencv_legacy opencv_contrib)
if(WIN32)
install(TARGETS ${name}
RUNTIME DESTINATION "samples/c" COMPONENT main)
endif()
ENDMACRO(MY_DEFINE_EXAMPLE)
MY_DEFINE_EXAMPLE(adaptiveskindetector adaptiveskindetector.cpp)
MY_DEFINE_EXAMPLE(bgfg_codebook bgfg_codebook.cpp)
MY_DEFINE_EXAMPLE(bgfg_segm bgfg_segm.cpp)
MY_DEFINE_EXAMPLE(blobtrack_sample blobtrack_sample.cpp)
MY_DEFINE_EXAMPLE(camshiftdemo camshiftdemo.c)
MY_DEFINE_EXAMPLE(contours contours.c)
MY_DEFINE_EXAMPLE(convert_cascade convert_cascade.c)
MY_DEFINE_EXAMPLE(convexhull convexhull.c)
MY_DEFINE_EXAMPLE(delaunay delaunay.c)
MY_DEFINE_EXAMPLE(demhist demhist.c)
MY_DEFINE_EXAMPLE(detectors_sample detectors_sample.cpp)
MY_DEFINE_EXAMPLE(dft dft.c)
MY_DEFINE_EXAMPLE(distrans distrans.c)
MY_DEFINE_EXAMPLE(drawing drawing.c)
MY_DEFINE_EXAMPLE(edge edge.c)
MY_DEFINE_EXAMPLE(facedetect facedetect.cpp)
MY_DEFINE_EXAMPLE(ffilldemo ffilldemo.c)
MY_DEFINE_EXAMPLE(fback fback.cpp)
MY_DEFINE_EXAMPLE(fback_c fback_c.c)
MY_DEFINE_EXAMPLE(find_obj find_obj.cpp)
MY_DEFINE_EXAMPLE(fitellipse fitellipse.cpp)
MY_DEFINE_EXAMPLE(houghlines houghlines.c)
MY_DEFINE_EXAMPLE(image image.cpp)
MY_DEFINE_EXAMPLE(inpaint inpaint.cpp)
MY_DEFINE_EXAMPLE(kalman kalman.c)
MY_DEFINE_EXAMPLE(kmeans kmeans.c)
MY_DEFINE_EXAMPLE(laplace laplace.c)
MY_DEFINE_EXAMPLE(letter_recog letter_recog.cpp)
MY_DEFINE_EXAMPLE(lkdemo lkdemo.c)
MY_DEFINE_EXAMPLE(minarea minarea.c)
MY_DEFINE_EXAMPLE(morphology morphology.c)
MY_DEFINE_EXAMPLE(motempl motempl.c)
MY_DEFINE_EXAMPLE(mushroom mushroom.cpp)
MY_DEFINE_EXAMPLE(tree_engine tree_engine.cpp)
MY_DEFINE_EXAMPLE(one_way_sample one_way_sample.cpp)
MY_DEFINE_EXAMPLE(peopledetect peopledetect.cpp)
MY_DEFINE_EXAMPLE(pyramid_segmentation pyramid_segmentation.c)
MY_DEFINE_EXAMPLE(squares squares.c)
MY_DEFINE_EXAMPLE(stereo_calib stereo_calib.cpp)
MY_DEFINE_EXAMPLE(stereo_match stereo_match.cpp)
MY_DEFINE_EXAMPLE(watershed watershed.cpp)
MY_DEFINE_EXAMPLE(grabcut grabcut.cpp)
MY_DEFINE_EXAMPLE(polar_transforms polar_transforms.c)
MY_DEFINE_EXAMPLE(calibration calibration.cpp)
MY_DEFINE_EXAMPLE(calibration_artificial calibration_artificial.cpp)
MY_DEFINE_EXAMPLE(mser mser_sample.cpp)
MY_DEFINE_EXAMPLE(find_obj_calonder find_obj_calonder.cpp)
MY_DEFINE_EXAMPLE(find_obj_ferns find_obj_ferns.cpp)
endif(BUILD_EXAMPLES)
if (INSTALL_C_EXAMPLES AND NOT WIN32)
file(GLOB C_SAMPLES *.c *.cpp *.jpg *.png *.data makefile.* build_all.sh *.dsp *.cmd )
install(FILES ${C_SAMPLES}
DESTINATION share/opencv/samples/c
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif ()

View File

@@ -0,0 +1,406 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install, copy or use the software.
//
// Copyright (C) 2009, Farhad Dadgostar
// Intel Corporation and third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <cvaux.h>
#include <highgui.h>
class ASDFrameHolder
{
private:
IplImage *image;
double timeStamp;
public:
ASDFrameHolder();
virtual ~ASDFrameHolder();
virtual void assignFrame(IplImage *sourceImage, double frameTime);
inline IplImage *getImage();
inline double getTimeStamp();
virtual void setImage(IplImage *sourceImage);
};
class ASDFrameSequencer
{
public:
virtual ~ASDFrameSequencer();
virtual IplImage *getNextImage();
virtual void close();
virtual bool isOpen();
virtual void getFrameCaption(char *caption);
};
class ASDCVFrameSequencer : public ASDFrameSequencer
{
protected:
CvCapture *capture;
public:
virtual IplImage *getNextImage();
virtual void close();
virtual bool isOpen();
};
class ASDFrameSequencerWebCam : public ASDCVFrameSequencer
{
public:
virtual bool open(int cameraIndex);
};
class ASDFrameSequencerVideoFile : public ASDCVFrameSequencer
{
public:
virtual bool open(const char *fileName);
};
class ASDFrameSequencerImageFile : public ASDFrameSequencer {
private:
char sFileNameMask[2048];
int nCurrentIndex, nStartIndex, nEndIndex;
public:
virtual void open(const char *fileNameMask, int startIndex, int endIndex);
virtual void getFrameCaption(char *caption);
virtual IplImage *getNextImage();
virtual void close();
virtual bool isOpen();
};
//-------------------- ASDFrameHolder -----------------------//
ASDFrameHolder::ASDFrameHolder( )
{
image = NULL;
timeStamp = 0;
};
ASDFrameHolder::~ASDFrameHolder( )
{
cvReleaseImage(&image);
};
void ASDFrameHolder::assignFrame(IplImage *sourceImage, double frameTime)
{
if (image != NULL)
{
cvReleaseImage(&image);
image = NULL;
}
image = cvCloneImage(sourceImage);
timeStamp = frameTime;
};
IplImage *ASDFrameHolder::getImage()
{
return image;
};
double ASDFrameHolder::getTimeStamp()
{
return timeStamp;
};
void ASDFrameHolder::setImage(IplImage *sourceImage)
{
image = sourceImage;
};
//-------------------- ASDFrameSequencer -----------------------//
ASDFrameSequencer::~ASDFrameSequencer()
{
close();
};
IplImage *ASDFrameSequencer::getNextImage()
{
return NULL;
};
void ASDFrameSequencer::close()
{
};
bool ASDFrameSequencer::isOpen()
{
return false;
};
void ASDFrameSequencer::getFrameCaption(char *caption) {
return;
};
IplImage* ASDCVFrameSequencer::getNextImage()
{
IplImage *image;
image = cvQueryFrame(capture);
if (image != NULL)
{
return cvCloneImage(image);
}
else
{
return NULL;
}
};
void ASDCVFrameSequencer::close()
{
if (capture != NULL)
{
cvReleaseCapture(&capture);
}
};
bool ASDCVFrameSequencer::isOpen()
{
return (capture != NULL);
};
//-------------------- ASDFrameSequencerWebCam -----------------------//
bool ASDFrameSequencerWebCam::open(int cameraIndex)
{
close();
capture = cvCaptureFromCAM(cameraIndex);
if (!capture)
{
return false;
}
else
{
return true;
}
};
//-------------------- ASDFrameSequencerVideoFile -----------------------//
bool ASDFrameSequencerVideoFile::open(const char *fileName)
{
close();
capture = cvCaptureFromFile(fileName);
if (!capture)
{
return false;
}
else
{
return true;
}
};
//-------------------- ASDFrameSequencerImageFile -----------------------//
void ASDFrameSequencerImageFile::open(const char *fileNameMask, int startIndex, int endIndex)
{
nCurrentIndex = startIndex-1;
nStartIndex = startIndex;
nEndIndex = endIndex;
std::sprintf(sFileNameMask, "%s", fileNameMask);
};
void ASDFrameSequencerImageFile::getFrameCaption(char *caption) {
std::sprintf(caption, sFileNameMask, nCurrentIndex);
};
IplImage* ASDFrameSequencerImageFile::getNextImage()
{
char fileName[2048];
nCurrentIndex++;
if (nCurrentIndex > nEndIndex)
return NULL;
std::sprintf(fileName, sFileNameMask, nCurrentIndex);
IplImage* img = cvLoadImage(fileName);
return img;
};
void ASDFrameSequencerImageFile::close()
{
nCurrentIndex = nEndIndex+1;
};
bool ASDFrameSequencerImageFile::isOpen()
{
return (nCurrentIndex <= nEndIndex);
};
void putTextWithShadow(IplImage *img, const char *str, CvPoint point, CvFont *font, CvScalar color = CV_RGB(255, 255, 128))
{
cvPutText(img, str, cvPoint(point.x-1,point.y-1), font, CV_RGB(0, 0, 0));
cvPutText(img, str, point, font, color);
};
#define ASD_RGB_SET_PIXEL(pointer, r, g, b) { (*pointer) = (unsigned char)b; (*(pointer+1)) = (unsigned char)g; (*(pointer+2)) = (unsigned char)r; }
#define ASD_RGB_GET_PIXEL(pointer, r, g, b) {b = (unsigned char)(*(pointer)); g = (unsigned char)(*(pointer+1)); r = (unsigned char)(*(pointer+2));}
void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gValue, int bValue)
{
int x, y, nWidth, nHeight;
double destX, destY, dx, dy;
uchar c;
unsigned char *pSrc;
nWidth = buffer->width;
nHeight = buffer->height;
dx = double(rgbDestImage->width)/double(nWidth);
dy = double(rgbDestImage->height)/double(nHeight);
destX = 0;
for (x = 0; x < nWidth; x++)
{
destY = 0;
for (y = 0; y < nHeight; y++)
{
c = ((uchar*)(buffer->imageData + buffer->widthStep*y))[x];
if (c)
{
pSrc = (unsigned char *)rgbDestImage->imageData + rgbDestImage->widthStep*int(destY) + (int(destX)*rgbDestImage->nChannels);
ASD_RGB_SET_PIXEL(pSrc, rValue, gValue, bValue);
}
destY += dy;
}
destY = 0;
destX += dx;
}
};
int main(int argc, char** argv )
{
IplImage *img, *filterMask = NULL;
CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);
ASDFrameSequencer *sequencer;
CvFont base_font;
char caption[2048], s[256], windowName[256];
long int clockTotal = 0, numFrames = 0;
std::clock_t clock;
if (argc < 4)
{
std::cout << "Usage: " << std::endl <<
argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<
"Example: " << std::endl <<
argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<
" iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<
"If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<
"Please note: Background should not contain large surfaces with skin tone." <<
std::endl;
sequencer = new ASDFrameSequencerWebCam();
(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);
if (! sequencer->isOpen())
{
std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;
}
}
else
{
sequencer = new ASDFrameSequencerImageFile();
(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here
}
std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");
cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);
// Usage:
// c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000
std::cout << "Press ESC to stop." << std::endl << std::endl;
while ((img = sequencer->getNextImage()) != 0)
{
numFrames++;
if (filterMask == NULL)
{
filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);
}
clock = std::clock();
filter.process(img, filterMask); // process the frame
clockTotal += (std::clock() - clock);
displayBuffer(img, filterMask, 0, 255, 0);
sequencer->getFrameCaption(caption);
std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);
putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);
std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);
putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);
cvShowImage (windowName, img);
cvReleaseImage(&img);
if (cvWaitKey(1) == 27)
break;
}
sequencer->close();
delete sequencer;
cvReleaseImage(&filterMask);
cvDestroyWindow(windowName);
std::cout << "Finished, " << numFrames << " frames processed." << std::endl;
return 0;
}

File diff suppressed because it is too large Load Diff

BIN
samples/c/airplane.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

BIN
samples/c/baboon.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

BIN
samples/c/baboon200.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

238
samples/c/bgfg_codebook.cpp Normal file
View File

@@ -0,0 +1,238 @@
// Background average sample code done with averages and done with codebooks
// (adapted from the OpenCV book sample)
//
// NOTE: To get the keyboard to work, you *have* to have one of the video windows be active
// and NOT the consule window.
//
// Gary Bradski Oct 3, 2008.
//
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
************************************************** */
#include "cvaux.h"
#include "cxmisc.h"
#include "highgui.h"
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
//VARIABLES for CODEBOOK METHOD:
CvBGCodeBookModel* model = 0;
const int NCHANNELS = 3;
bool ch[NCHANNELS]={true,true,true}; // This sets what channels should be adjusted for background bounds
void help(void)
{
printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
"\nUSAGE:\nbgfg_codebook [--nframes=300] [movie filename, else from camera]\n"
"***Keep the focus on the video windows, NOT the consol***\n\n"
"INTERACTIVE PARAMETERS:\n"
"\tESC,q,Q - quit the program\n"
"\th - print this help\n"
"\tp - pause toggle\n"
"\ts - single step\n"
"\tr - run mode (single step off)\n"
"=== AVG PARAMS ===\n"
"\t- - bump high threshold UP by 0.25\n"
"\t= - bump high threshold DOWN by 0.25\n"
"\t[ - bump low threshold UP by 0.25\n"
"\t] - bump low threshold DOWN by 0.25\n"
"=== CODEBOOK PARAMS ===\n"
"\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
"\ta - adjust all 3 channels at once\n"
"\tb - adjust both 2 and 3 at once\n"
"\ti,o - bump upper threshold up,down by 1\n"
"\tk,l - bump lower threshold up,down by 1\n"
"\tSPACE - reset the model\n"
);
}
//
//USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
{
const char* filename = 0;
IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
CvCapture* capture = 0;
int c, n, nframes = 0;
int nframesToLearnBG = 300;
model = cvCreateBGCodeBookModel();
//Set color thresholds to default values
model->modMin[0] = 3;
model->modMin[1] = model->modMin[2] = 3;
model->modMax[0] = 10;
model->modMax[1] = model->modMax[2] = 10;
model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;
bool pause = false;
bool singlestep = false;
for( n = 1; n < argc; n++ )
{
static const char* nframesOpt = "--nframes=";
if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 )
{
if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 )
{
help();
return -1;
}
}
else
filename = argv[n];
}
if( !filename )
{
printf("Capture from camera\n");
capture = cvCaptureFromCAM( 0 );
}
else
{
printf("Capture from file %s\n",filename);
capture = cvCreateFileCapture( filename );
}
if( !capture )
{
printf( "Can not initialize video capturing\n\n" );
help();
return -1;
}
//MAIN PROCESSING LOOP:
for(;;)
{
if( !pause )
{
rawImage = cvQueryFrame( capture );
++nframes;
if(!rawImage)
break;
}
if( singlestep )
pause = true;
//First time:
if( nframes == 1 && rawImage )
{
// CODEBOOK METHOD ALLOCATION
yuvImage = cvCloneImage(rawImage);
ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
cvSet(ImaskCodeBook,cvScalar(255));
cvNamedWindow( "Raw", 1 );
cvNamedWindow( "ForegroundCodeBook",1);
cvNamedWindow( "CodeBook_ConnectComp",1);
}
// If we've got an rawImage and are good to go:
if( rawImage )
{
cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
//This is where we build our background model
if( !pause && nframes-1 < nframesToLearnBG )
cvBGCodeBookUpdate( model, yuvImage );
if( nframes-1 == nframesToLearnBG )
cvBGCodeBookClearStale( model, model->t/2 );
//Find the foreground if any
if( nframes-1 >= nframesToLearnBG )
{
// Find foreground by codebook method
cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );
// This part just to visualize bounding boxes and centers if desired
cvCopy(ImaskCodeBook,ImaskCodeBookCC);
cvSegmentFGMask( ImaskCodeBookCC );
}
//Display
cvShowImage( "Raw", rawImage );
cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);
}
// User input:
c = cvWaitKey(10)&0xFF;
c = tolower(c);
// End processing on ESC, q or Q
if(c == 27 || c == 'q')
break;
//Else check for user input
switch( c )
{
case 'h':
help();
break;
case 'p':
pause = !pause;
break;
case 's':
singlestep = !singlestep;
pause = false;
break;
case 'r':
pause = false;
singlestep = false;
break;
case ' ':
cvBGCodeBookClearStale( model, 0 );
nframes = 0;
break;
//CODEBOOK PARAMS
case 'y': case '0':
case 'u': case '1':
case 'v': case '2':
case 'a': case '3':
case 'b':
ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3';
ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b';
ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b';
printf("CodeBook YUV Channels active: %d, %d, %d\n", ch[0], ch[1], ch[2] );
break;
case 'i': //modify max classification bounds (max bound goes higher)
case 'o': //modify max classification bounds (max bound goes lower)
case 'k': //modify min classification bounds (min bound goes lower)
case 'l': //modify min classification bounds (min bound goes higher)
{
uchar* ptr = c == 'i' || c == 'o' ? model->modMax : model->modMin;
for(n=0; n<NCHANNELS; n++)
{
if( ch[n] )
{
int v = ptr[n] + (c == 'i' || c == 'l' ? 1 : -1);
ptr[n] = CV_CAST_8U(v);
}
printf("%d,", ptr[n]);
}
printf(" CodeBook %s Side\n", c == 'i' || c == 'o' ? "High" : "Low" );
}
break;
}
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Raw" );
cvDestroyWindow( "ForegroundCodeBook");
cvDestroyWindow( "CodeBook_ConnectComp");
return 0;
}

62
samples/c/bgfg_segm.cpp Normal file
View File

@@ -0,0 +1,62 @@
#include "cvaux.h"
#include "highgui.h"
#include <stdio.h>
//this is a sample for foreground detection functions
int main(int argc, char** argv)
{
IplImage* tmp_frame = NULL;
CvCapture* cap = NULL;
bool update_bg_model = true;
if( argc < 2 )
cap = cvCaptureFromCAM(0);
else
cap = cvCaptureFromFile(argv[1]);
if( !cap )
{
printf("can not open camera or video file\n");
return -1;
}
tmp_frame = cvQueryFrame(cap);
if(!tmp_frame)
{
printf("can not read data from the video source\n");
return -1;
}
cvNamedWindow("BG", 1);
cvNamedWindow("FG", 1);
CvBGStatModel* bg_model = 0;
for( int fr = 1;tmp_frame; tmp_frame = cvQueryFrame(cap), fr++ )
{
if(!bg_model)
{
//create BG model
bg_model = cvCreateGaussianBGModel( tmp_frame );
//bg_model = cvCreateFGDStatModel( temp );
continue;
}
double t = (double)cvGetTickCount();
cvUpdateBGStatModel( tmp_frame, bg_model, update_bg_model ? -1 : 0 );
t = (double)cvGetTickCount() - t;
printf( "%d. %.1f\n", fr, t/(cvGetTickFrequency()*1000.) );
cvShowImage("BG", bg_model->background);
cvShowImage("FG", bg_model->foreground);
char k = cvWaitKey(5);
if( k == 27 ) break;
if( k == ' ' )
update_bg_model = !update_bg_model;
}
cvReleaseBGStatModel( &bg_model );
cvReleaseCapture(&cap);
return 0;
}

View File

@@ -0,0 +1,751 @@
#include "cvaux.h"
#include "highgui.h"
#include <stdio.h>
/* Select appropriate case insensitive string comparison function: */
#if defined WIN32 || defined _MSC_VER
#define MY_STRNICMP strnicmp
#define MY_STRICMP stricmp
#else
#define MY_STRNICMP strncasecmp
#define MY_STRICMP strcasecmp
#endif
/* List of foreground (FG) DETECTION modules: */
static CvFGDetector* cvCreateFGDetector0 () { return cvCreateFGDetectorBase(CV_BG_MODEL_FGD, NULL); }
static CvFGDetector* cvCreateFGDetector0Simple() { return cvCreateFGDetectorBase(CV_BG_MODEL_FGD_SIMPLE, NULL); }
static CvFGDetector* cvCreateFGDetector1 () { return cvCreateFGDetectorBase(CV_BG_MODEL_MOG, NULL); }
typedef struct DefModule_FGDetector
{
CvFGDetector* (*create)();
const char* nickname;
const char* description;
} DefModule_FGDetector;
DefModule_FGDetector FGDetector_Modules[] =
{
{cvCreateFGDetector0,"FG_0","Foreground Object Detection from Videos Containing Complex Background. ACM MM2003."},
{cvCreateFGDetector0Simple,"FG_0S","Simplified version of FG_0"},
{cvCreateFGDetector1,"FG_1","Adaptive background mixture models for real-time tracking. CVPR1999"},
{NULL,NULL,NULL}
};
/* List of BLOB DETECTION modules: */
typedef struct DefModule_BlobDetector
{
CvBlobDetector* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobDetector;
DefModule_BlobDetector BlobDetector_Modules[] =
{
{cvCreateBlobDetectorCC,"BD_CC","Detect new blob by tracking CC of FG mask"},
{cvCreateBlobDetectorSimple,"BD_Simple","Detect new blob by uniform moving of connected components of FG mask"},
{NULL,NULL,NULL}
};
/* List of BLOB TRACKING modules: */
typedef struct DefModule_BlobTracker
{
CvBlobTracker* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTracker;
DefModule_BlobTracker BlobTracker_Modules[] =
{
{cvCreateBlobTrackerCCMSPF,"CCMSPF","connected component tracking and MSPF resolver for collision"},
{cvCreateBlobTrackerCC,"CC","Simple connected component tracking"},
{cvCreateBlobTrackerMS,"MS","Mean shift algorithm "},
{cvCreateBlobTrackerMSFG,"MSFG","Mean shift algorithm with FG mask using"},
{cvCreateBlobTrackerMSPF,"MSPF","Particle filtering based on MS weight"},
{NULL,NULL,NULL}
};
/* List of BLOB TRAJECTORY GENERATION modules: */
typedef struct DefModule_BlobTrackGen
{
CvBlobTrackGen* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTrackGen;
DefModule_BlobTrackGen BlobTrackGen_Modules[] =
{
{cvCreateModuleBlobTrackGenYML,"YML","Generate track record in YML format as synthetic video data"},
{cvCreateModuleBlobTrackGen1,"RawTracks","Generate raw track record (x,y,sx,sy),()... in each line"},
{NULL,NULL,NULL}
};
/* List of BLOB TRAJECTORY POST PROCESSING modules: */
typedef struct DefModule_BlobTrackPostProc
{
CvBlobTrackPostProc* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTrackPostProc;
DefModule_BlobTrackPostProc BlobTrackPostProc_Modules[] =
{
{cvCreateModuleBlobTrackPostProcKalman,"Kalman","Kalman filtering of blob position and size"},
{NULL,"None","No post processing filter"},
// {cvCreateModuleBlobTrackPostProcTimeAverRect,"TimeAverRect","Average by time using rectangle window"},
// {cvCreateModuleBlobTrackPostProcTimeAverExp,"TimeAverExp","Average by time using exponential window"},
{NULL,NULL,NULL}
};
/* List of BLOB TRAJECTORY ANALYSIS modules: */
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisDetector();
typedef struct DefModule_BlobTrackAnalysis
{
CvBlobTrackAnalysis* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTrackAnalysis;
DefModule_BlobTrackAnalysis BlobTrackAnalysis_Modules[] =
{
{cvCreateModuleBlobTrackAnalysisHistPVS,"HistPVS","Histogram of 5D feature vector analysis (x,y,vx,vy,state)"},
{NULL,"None","No trajectory analiser"},
{cvCreateModuleBlobTrackAnalysisHistP,"HistP","Histogram of 2D feature vector analysis (x,y)"},
{cvCreateModuleBlobTrackAnalysisHistPV,"HistPV","Histogram of 4D feature vector analysis (x,y,vx,vy)"},
{cvCreateModuleBlobTrackAnalysisHistSS,"HistSS","Histogram of 4D feature vector analysis (startpos,endpos)"},
{cvCreateModuleBlobTrackAnalysisTrackDist,"TrackDist","Compare tracks directly"},
{cvCreateModuleBlobTrackAnalysisIOR,"IOR","Integrator (by OR operation) of several analysers "},
{NULL,NULL,NULL}
};
/* List of Blob Trajectory ANALYSIS modules: */
/*================= END MODULES DECRIPTION ===================================*/
/* Run pipeline on all frames: */
static int RunBlobTrackingAuto( CvCapture* pCap, CvBlobTrackerAuto* pTracker,char* fgavi_name = NULL, char* btavi_name = NULL )
{
int OneFrameProcess = 0;
int key;
int FrameNum = 0;
CvVideoWriter* pFGAvi = NULL;
CvVideoWriter* pBTAvi = NULL;
//cvNamedWindow( "FG", 0 );
/* Main loop: */
for( FrameNum=0; pCap && (key=cvWaitKey(OneFrameProcess?0:1))!=27;
FrameNum++)
{ /* Main loop: */
IplImage* pImg = NULL;
IplImage* pMask = NULL;
if(key!=-1)
{
OneFrameProcess = 1;
if(key=='r')OneFrameProcess = 0;
}
pImg = cvQueryFrame(pCap);
if(pImg == NULL) break;
/* Process: */
pTracker->Process(pImg, pMask);
if(fgavi_name)
if(pTracker->GetFGMask())
{ /* Debug FG: */
IplImage* pFG = pTracker->GetFGMask();
CvSize S = cvSize(pFG->width,pFG->height);
static IplImage* pI = NULL;
if(pI==NULL)pI = cvCreateImage(S,pFG->depth,3);
cvCvtColor( pFG, pI, CV_GRAY2BGR );
if(fgavi_name)
{ /* Save fg to avi file: */
if(pFGAvi==NULL)
{
pFGAvi=cvCreateVideoWriter(
fgavi_name,
CV_FOURCC('x','v','i','d'),
25,
S );
}
cvWriteFrame( pFGAvi, pI );
}
if(pTracker->GetBlobNum()>0)
{ /* Draw detected blobs: */
int i;
for(i=pTracker->GetBlobNum();i>0;i--)
{
CvBlob* pB = pTracker->GetBlob(i-1);
CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB));
CvSize s = cvSize(MAX(1,cvRound(CV_BLOB_RX(pB))), MAX(1,cvRound(CV_BLOB_RY(pB))));
int c = cvRound(255*pTracker->GetState(CV_BLOB_ID(pB)));
cvEllipse( pI,
p,
s,
0, 0, 360,
CV_RGB(c,255-c,0), cvRound(1+(3*c)/255) );
} /* Next blob: */;
}
cvNamedWindow( "FG",0);
cvShowImage( "FG",pI);
} /* Debug FG. */
/* Draw debug info: */
if(pImg)
{ /* Draw all information about test sequence: */
char str[1024];
int line_type = CV_AA; // Change it to 8 to see non-antialiased graphics.
CvFont font;
int i;
IplImage* pI = cvCloneImage(pImg);
cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, line_type );
for(i=pTracker->GetBlobNum(); i>0; i--)
{
CvSize TextSize;
CvBlob* pB = pTracker->GetBlob(i-1);
CvPoint p = cvPoint(cvRound(pB->x*256),cvRound(pB->y*256));
CvSize s = cvSize(MAX(1,cvRound(CV_BLOB_RX(pB)*256)), MAX(1,cvRound(CV_BLOB_RY(pB)*256)));
int c = cvRound(255*pTracker->GetState(CV_BLOB_ID(pB)));
cvEllipse( pI,
p,
s,
0, 0, 360,
CV_RGB(c,255-c,0), cvRound(1+(3*0)/255), CV_AA, 8 );
p.x >>= 8;
p.y >>= 8;
s.width >>= 8;
s.height >>= 8;
sprintf(str,"%03d",CV_BLOB_ID(pB));
cvGetTextSize( str, &font, &TextSize, NULL );
p.y -= s.height;
cvPutText( pI, str, p, &font, CV_RGB(0,255,255));
{
const char* pS = pTracker->GetStateDesc(CV_BLOB_ID(pB));
if(pS)
{
char* pStr = strdup(pS);
char* pStrFree = pStr;
while (pStr && strlen(pStr) > 0)
{
char* str_next = strchr(pStr,'\n');
if(str_next)
{
str_next[0] = 0;
str_next++;
}
p.y += TextSize.height+1;
cvPutText( pI, pStr, p, &font, CV_RGB(0,255,255));
pStr = str_next;
}
free(pStrFree);
}
}
} /* Next blob. */;
cvNamedWindow( "Tracking", 0);
cvShowImage( "Tracking",pI );
if(btavi_name && pI)
{ /* Save to avi file: */
CvSize S = cvSize(pI->width,pI->height);
if(pBTAvi==NULL)
{
pBTAvi=cvCreateVideoWriter(
btavi_name,
CV_FOURCC('x','v','i','d'),
25,
S );
}
cvWriteFrame( pBTAvi, pI );
}
cvReleaseImage(&pI);
} /* Draw all information about test sequence. */
} /* Main loop. */
if(pFGAvi)cvReleaseVideoWriter( &pFGAvi );
if(pBTAvi)cvReleaseVideoWriter( &pBTAvi );
return 0;
} /* RunBlobTrackingAuto */
/* Read parameters from command line
* and transfer to specified module:
*/
static void set_params(int argc, char* argv[], CvVSModule* pM, const char* prefix, const char* module)
{
int prefix_len = strlen(prefix);
int i;
for(i=0; i<argc; ++i)
{
int j;
char* ptr_eq = NULL;
int cmd_param_len=0;
char* cmd = argv[i];
if(MY_STRNICMP(prefix,cmd,prefix_len)!=0) continue;
cmd += prefix_len;
if(cmd[0]!=':')continue;
cmd++;
ptr_eq = strchr(cmd,'=');
if(ptr_eq)cmd_param_len = ptr_eq-cmd;
for(j=0; ; ++j)
{
int param_len;
const char* param = pM->GetParamName(j);
if(param==NULL) break;
param_len = strlen(param);
if(cmd_param_len!=param_len) continue;
if(MY_STRNICMP(param,cmd,param_len)!=0) continue;
cmd+=param_len;
if(cmd[0]!='=')continue;
cmd++;
pM->SetParamStr(param,cmd);
printf("%s:%s param set to %g\n",module,param,pM->GetParam(param));
}
}
pM->ParamUpdate();
} /* set_params */
/* Print all parameter values for given module: */
static void print_params(CvVSModule* pM, const char* module, const char* log_name)
{
FILE* log = log_name?fopen(log_name,"at"):NULL;
int i;
if(pM->GetParamName(0) == NULL ) return;
printf("%s(%s) module parameters:\n",module,pM->GetNickName());
if(log)
fprintf(log,"%s(%s) module parameters:\n",module,pM->GetNickName());
for (i=0; ; ++i)
{
const char* param = pM->GetParamName(i);
const char* str = param?pM->GetParamStr(param):NULL;
if(param == NULL)break;
if(str)
{
printf(" %s: %s\n",param,str);
if(log)
fprintf(log," %s: %s\n",param,str);
}
else
{
printf(" %s: %g\n",param,pM->GetParam(param));
if(log)
fprintf(log," %s: %g\n",param,pM->GetParam(param));
}
}
if(log) fclose(log);
} /* print_params */
int main(int argc, char* argv[])
{ /* Main function: */
CvCapture* pCap = NULL;
CvBlobTrackerAutoParam1 param = {0};
CvBlobTrackerAuto* pTracker = NULL;
float scale = 1;
const char* scale_name = NULL;
char* yml_name = NULL;
char** yml_video_names = NULL;
int yml_video_num = 0;
char* avi_name = NULL;
const char* fg_name = NULL;
char* fgavi_name = NULL;
char* btavi_name = NULL;
const char* bd_name = NULL;
const char* bt_name = NULL;
const char* btgen_name = NULL;
const char* btpp_name = NULL;
const char* bta_name = NULL;
char* bta_data_name = NULL;
char* track_name = NULL;
char* comment_name = NULL;
char* FGTrainFrames = NULL;
char* log_name = NULL;
char* savestate_name = NULL;
char* loadstate_name = NULL;
const char* bt_corr = NULL;
DefModule_FGDetector* pFGModule = NULL;
DefModule_BlobDetector* pBDModule = NULL;
DefModule_BlobTracker* pBTModule = NULL;
DefModule_BlobTrackPostProc* pBTPostProcModule = NULL;
DefModule_BlobTrackGen* pBTGenModule = NULL;
DefModule_BlobTrackAnalysis* pBTAnalysisModule = NULL;
cvInitSystem(argc, argv);
if(argc < 2)
{ /* Print help: */
int i;
printf("blobtrack [fg=<fg_name>] [bd=<bd_name>]\n"
" [bt=<bt_name>] [btpp=<btpp_name>]\n"
" [bta=<bta_name>\n"
" [bta_data=<bta_data_name>\n"
" [bt_corr=<bt_corr_way>]\n"
" [btgen=<btgen_name>]\n"
" [track=<track_file_name>]\n"
" [scale=<scale val>] [noise=<noise_name>] [IVar=<IVar_name>]\n"
" [FGTrainFrames=<FGTrainFrames>]\n"
" [btavi=<avi output>] [fgavi=<avi output on FG>]\n"
" <avi_file>\n");
printf(" <bt_corr_way> is way of blob position corrrection for \"Blob Tracking\" module\n"
" <bt_corr_way>=none,PostProcRes\n"
" <FGTrainFrames> is number of frames for FG training\n"
" <track_file_name> is file name for save tracked trajectories\n"
" <bta_data> is file name for data base of trajectory analysis module\n"
" <avi_file> is file name of avi to process by BlobTrackerAuto\n");
puts("\nModules:");
#define PR(_name,_m,_mt)\
printf("<%s> is \"%s\" module name and can be:\n",_name,_mt);\
for(i=0; _m[i].nickname; ++i)\
{\
printf(" %d. %s",i+1,_m[i].nickname);\
if(_m[i].description)printf(" - %s",_m[i].description);\
printf("\n");\
}
PR("fg_name",FGDetector_Modules,"FG/BG Detection");
PR("bd_name",BlobDetector_Modules,"Blob Entrance Detection");
PR("bt_name",BlobTracker_Modules,"Blob Tracking");
PR("btpp_name",BlobTrackPostProc_Modules, "Blob Trajectory Post Processing");
PR("btgen_name",BlobTrackGen_Modules, "Blob Trajectory Generation");
PR("bta_name",BlobTrackAnalysis_Modules, "Blob Trajectory Analysis");
#undef PR
return 0;
} /* Print help. */
{ /* Parse arguments: */
int i;
for(i=1; i<argc; ++i)
{
int bParsed = 0;
size_t len = strlen(argv[i]);
#define RO(_n1,_n2) if(strncmp(argv[i],_n1,strlen(_n1))==0) {_n2 = argv[i]+strlen(_n1);bParsed=1;};
RO("fg=",fg_name);
RO("fgavi=",fgavi_name);
RO("btavi=",btavi_name);
RO("bd=",bd_name);
RO("bt=",bt_name);
RO("bt_corr=",bt_corr);
RO("btpp=",btpp_name);
RO("bta=",bta_name);
RO("bta_data=",bta_data_name);
RO("btgen=",btgen_name);
RO("track=",track_name);
RO("comment=",comment_name);
RO("FGTrainFrames=",FGTrainFrames);
RO("log=",log_name);
RO("savestate=",savestate_name);
RO("loadstate=",loadstate_name);
#undef RO
{
char* ext = argv[i] + len-4;
if( strrchr(argv[i],'=') == NULL &&
!bParsed &&
(len>3 && (MY_STRICMP(ext,".avi") == 0 )))
{
avi_name = argv[i];
break;
}
} /* Next argument. */
}
} /* Parse arguments. */
if(track_name)
{ /* Set Trajectory Generator module: */
int i;
if(!btgen_name)btgen_name=BlobTrackGen_Modules[0].nickname;
for(i=0; BlobTrackGen_Modules[i].nickname; ++i)
{
if(MY_STRICMP(BlobTrackGen_Modules[i].nickname,btgen_name)==0)
pBTGenModule = BlobTrackGen_Modules + i;
}
} /* Set Trajectory Generato module. */
/* Initialize postprocessing module if tracker
* correction by postprocessing is required.
*/
if(bt_corr && MY_STRICMP(bt_corr,"PostProcRes")!=0 && !btpp_name)
{
btpp_name = bt_corr;
if(MY_STRICMP(btpp_name,"none")!=0)bt_corr = "PostProcRes";
}
{ /* Set default parameters for one processing: */
if(!bt_corr) bt_corr = "none";
if(!fg_name) fg_name = FGDetector_Modules[0].nickname;
if(!bd_name) bd_name = BlobDetector_Modules[0].nickname;
if(!bt_name) bt_name = BlobTracker_Modules[0].nickname;
if(!btpp_name) btpp_name = BlobTrackPostProc_Modules[0].nickname;
if(!bta_name) bta_name = BlobTrackAnalysis_Modules[0].nickname;
if(!scale_name) scale_name = "1";
}
if(scale_name)
scale = (float)atof(scale_name);
for(pFGModule=FGDetector_Modules; pFGModule->nickname; ++pFGModule)
if( fg_name && MY_STRICMP(fg_name,pFGModule->nickname)==0 ) break;
for(pBDModule=BlobDetector_Modules; pBDModule->nickname; ++pBDModule)
if( bd_name && MY_STRICMP(bd_name,pBDModule->nickname)==0 ) break;
for(pBTModule=BlobTracker_Modules; pBTModule->nickname; ++pBTModule)
if( bt_name && MY_STRICMP(bt_name,pBTModule->nickname)==0 ) break;
for(pBTPostProcModule=BlobTrackPostProc_Modules; pBTPostProcModule->nickname; ++pBTPostProcModule)
if( btpp_name && MY_STRICMP(btpp_name,pBTPostProcModule->nickname)==0 ) break;
for(pBTAnalysisModule=BlobTrackAnalysis_Modules; pBTAnalysisModule->nickname; ++pBTAnalysisModule)
if( bta_name && MY_STRICMP(bta_name,pBTAnalysisModule->nickname)==0 ) break;
/* Create source video: */
if(avi_name)
pCap = cvCaptureFromFile(avi_name);
if(pCap==NULL)
{
printf("Can't open %s file\n",avi_name);
return -1;
}
{ /* Display parameters: */
int i;
FILE* log = log_name?fopen(log_name,"at"):NULL;
if(log)
{ /* Print to log file: */
fprintf(log,"\n=== Blob Tracking pipline in processing mode===\n");
if(avi_name)
{
fprintf(log,"AVIFile: %s\n",avi_name);
}
fprintf(log,"FGDetector: %s\n", pFGModule->nickname);
fprintf(log,"BlobDetector: %s\n", pBDModule->nickname);
fprintf(log,"BlobTracker: %s\n", pBTModule->nickname);
fprintf(log,"BlobTrackPostProc: %s\n", pBTPostProcModule->nickname);
fprintf(log,"BlobCorrection: %s\n", bt_corr);
fprintf(log,"Blob Trajectory Generator: %s (%s)\n",
pBTGenModule?pBTGenModule->nickname:"None",
track_name?track_name:"none");
fprintf(log,"BlobTrackAnalysis: %s\n", pBTAnalysisModule->nickname);
fclose(log);
}
printf("\n=== Blob Tracking pipline in %s mode===\n","processing");
if(yml_name)
{
printf("ConfigFile: %s\n",yml_name);
printf("BG: %s\n",yml_video_names[0]);
printf("FG: ");
for(i=1;i<(yml_video_num);++i){printf("%s",yml_video_names[i]);if((i+1)<yml_video_num)printf("|");};
printf("\n");
}
if(avi_name)
{
printf("AVIFile: %s\n",avi_name);
}
printf("FGDetector: %s\n", pFGModule->nickname);
printf("BlobDetector: %s\n", pBDModule->nickname);
printf("BlobTracker: %s\n", pBTModule->nickname);
printf("BlobTrackPostProc: %s\n", pBTPostProcModule->nickname);
printf("BlobCorrection: %s\n", bt_corr);
printf("Blob Trajectory Generator: %s (%s)\n",
pBTGenModule?pBTGenModule->nickname:"None",
track_name?track_name:"none");
printf("BlobTrackAnalysis: %s\n", pBTAnalysisModule->nickname);
} /* Display parameters. */
{ /* Create autotracker module and its components: */
param.FGTrainFrames = FGTrainFrames?atoi(FGTrainFrames):0;
/* Create FG Detection module: */
param.pFG = pFGModule->create();
if(!param.pFG)
puts("Can not create FGDetector module");
param.pFG->SetNickName(pFGModule->nickname);
set_params(argc, argv, param.pFG, "fg", pFGModule->nickname);
/* Create Blob Entrance Detection module: */
param.pBD = pBDModule->create();
if(!param.pBD)
puts("Can not create BlobDetector module");
param.pBD->SetNickName(pBDModule->nickname);
set_params(argc, argv, param.pBD, "bd", pBDModule->nickname);
/* Create blob tracker module: */
param.pBT = pBTModule->create();
if(!param.pBT)
puts("Can not create BlobTracker module");
param.pBT->SetNickName(pBTModule->nickname);
set_params(argc, argv, param.pBT, "bt", pBTModule->nickname);
/* Create blob trajectory generation module: */
param.pBTGen = NULL;
if(pBTGenModule && track_name && pBTGenModule->create)
{
param.pBTGen = pBTGenModule->create();
param.pBTGen->SetFileName(track_name);
}
if(param.pBTGen)
{
param.pBTGen->SetNickName(pBTGenModule->nickname);
set_params(argc, argv, param.pBTGen, "btgen", pBTGenModule->nickname);
}
/* Create blob trajectory post processing module: */
param.pBTPP = NULL;
if(pBTPostProcModule && pBTPostProcModule->create)
{
param.pBTPP = pBTPostProcModule->create();
}
if(param.pBTPP)
{
param.pBTPP->SetNickName(pBTPostProcModule->nickname);
set_params(argc, argv, param.pBTPP, "btpp", pBTPostProcModule->nickname);
}
param.UsePPData = (bt_corr && MY_STRICMP(bt_corr,"PostProcRes")==0);
/* Create blob trajectory analysis module: */
param.pBTA = NULL;
if(pBTAnalysisModule && pBTAnalysisModule->create)
{
param.pBTA = pBTAnalysisModule->create();
param.pBTA->SetFileName(bta_data_name);
}
if(param.pBTA)
{
param.pBTA->SetNickName(pBTAnalysisModule->nickname);
set_params(argc, argv, param.pBTA, "bta", pBTAnalysisModule->nickname);
}
/* Create whole pipline: */
pTracker = cvCreateBlobTrackerAuto1(&param);
if(!pTracker)
puts("Can not create BlobTrackerAuto");
}
{ /* Load states of each module from state file: */
CvFileStorage* fs = NULL;
if(loadstate_name)
fs=cvOpenFileStorage(loadstate_name,NULL,CV_STORAGE_READ);
if(fs)
{
printf("Load states for modules...\n");
if(param.pBT)
{
CvFileNode* fn = cvGetFileNodeByName(fs,NULL,"BlobTracker");
param.pBT->LoadState(fs,fn);
}
if(param.pBTA)
{
CvFileNode* fn = cvGetFileNodeByName(fs,NULL,"BlobTrackAnalyser");
param.pBTA->LoadState(fs,fn);
}
if(pTracker)
{
CvFileNode* fn = cvGetFileNodeByName(fs,NULL,"BlobTrackerAuto");
pTracker->LoadState(fs,fn);
}
cvReleaseFileStorage(&fs);
printf("... Modules states loaded\n");
}
} /* Load states of each module. */
{ /* Print module parameters: */
struct DefMMM
{
CvVSModule* pM;
const char* name;
} Modules[] = {
{(CvVSModule*)param.pFG,"FGdetector"},
{(CvVSModule*)param.pBD,"BlobDetector"},
{(CvVSModule*)param.pBT,"BlobTracker"},
{(CvVSModule*)param.pBTGen,"TrackGen"},
{(CvVSModule*)param.pBTPP,"PostProcessing"},
{(CvVSModule*)param.pBTA,"TrackAnalysis"},
{NULL,NULL}
};
int i;
for(i=0; Modules[i].name; ++i)
{
if(Modules[i].pM)
print_params(Modules[i].pM,Modules[i].name,log_name);
}
} /* Print module parameters. */
/* Run pipeline: */
RunBlobTrackingAuto( pCap, pTracker, fgavi_name, btavi_name );
{ /* Save state and release modules: */
CvFileStorage* fs = NULL;
if(savestate_name)
{
fs=cvOpenFileStorage(savestate_name,NULL,CV_STORAGE_WRITE);
}
if(fs)
{
cvStartWriteStruct(fs,"BlobTracker",CV_NODE_MAP);
if(param.pBT)param.pBT->SaveState(fs);
cvEndWriteStruct(fs);
cvStartWriteStruct(fs,"BlobTrackerAuto",CV_NODE_MAP);
if(pTracker)pTracker->SaveState(fs);
cvEndWriteStruct(fs);
cvStartWriteStruct(fs,"BlobTrackAnalyser",CV_NODE_MAP);
if(param.pBTA)param.pBTA->SaveState(fs);
cvEndWriteStruct(fs);
cvReleaseFileStorage(&fs);
}
if(param.pBT)cvReleaseBlobTracker(&param.pBT);
if(param.pBD)cvReleaseBlobDetector(&param.pBD);
if(param.pBTGen)cvReleaseBlobTrackGen(&param.pBTGen);
if(param.pBTA)cvReleaseBlobTrackAnalysis(&param.pBTA);
if(param.pFG)cvReleaseFGDetector(&param.pFG);
if(pTracker)cvReleaseBlobTrackerAuto(&pTracker);
} /* Save state and release modules. */
if(pCap)
cvReleaseCapture(&pCap);
return 0;
} /* main() */

BIN
samples/c/box.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
samples/c/box_in_scene.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

16
samples/c/build_all.sh Normal file
View File

@@ -0,0 +1,16 @@
#!/bin/sh
if [[ $# > 0 ]] ; then
base=`basename $1 .c`
echo "compiling $base"
gcc -ggdb `pkg-config opencv --cflags --libs` $base.c -o $base
else
for i in *.c; do
echo "compiling $i"
gcc -ggdb `pkg-config --cflags opencv` -o `basename $i .c` $i `pkg-config --libs opencv`;
done
for i in *.cpp; do
echo "compiling $i"
g++ -ggdb `pkg-config --cflags opencv` -o `basename $i .cpp` $i `pkg-config --libs opencv`;
done
fi

505
samples/c/calibration.cpp Normal file
View File

@@ -0,0 +1,505 @@
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <string.h>
#include <time.h>
// example command line (for copy-n-paste):
// calibration -w 6 -h 8 -s 2 -n 10 -o camera.yml -op -oe [<list_of_views.txt>]
/* The list of views may look as following (discard the starting and ending ------ separators):
-------------------
view000.png
view001.png
#view002.png
view003.png
view010.png
one_extra_view.jpg
-------------------
that is, the file will contain 6 lines, view002.png will not be used for calibration,
other ones will be (those, in which the chessboard pattern will be found)
*/
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
double compute_reprojection_error( const CvMat* object_points,
const CvMat* rot_vects, const CvMat* trans_vects,
const CvMat* camera_matrix, const CvMat* dist_coeffs,
const CvMat* image_points, const CvMat* point_counts,
CvMat* per_view_errors )
{
CvMat* image_points2 = cvCreateMat( image_points->rows,
image_points->cols, image_points->type );
int i, image_count = rot_vects->rows, points_so_far = 0;
double total_err = 0, err;
for( i = 0; i < image_count; i++ )
{
CvMat object_points_i, image_points_i, image_points2_i;
int point_count = point_counts->data.i[i];
CvMat rot_vect, trans_vect;
cvGetCols( object_points, &object_points_i,
points_so_far, points_so_far + point_count );
cvGetCols( image_points, &image_points_i,
points_so_far, points_so_far + point_count );
cvGetCols( image_points2, &image_points2_i,
points_so_far, points_so_far + point_count );
points_so_far += point_count;
cvGetRow( rot_vects, &rot_vect, i );
cvGetRow( trans_vects, &trans_vect, i );
cvProjectPoints2( &object_points_i, &rot_vect, &trans_vect,
camera_matrix, dist_coeffs, &image_points2_i,
0, 0, 0, 0, 0 );
err = cvNorm( &image_points_i, &image_points2_i, CV_L1 );
if( per_view_errors )
per_view_errors->data.db[i] = err/point_count;
total_err += err;
}
cvReleaseMat( &image_points2 );
return total_err/points_so_far;
}
int run_calibration( CvSeq* image_points_seq, CvSize img_size, CvSize board_size,
float square_size, float aspect_ratio, int flags,
CvMat* camera_matrix, CvMat* dist_coeffs, CvMat** extr_params,
CvMat** reproj_errs, double* avg_reproj_err )
{
int code;
int image_count = image_points_seq->total;
int point_count = board_size.width*board_size.height;
CvMat* image_points = cvCreateMat( 1, image_count*point_count, CV_32FC2 );
CvMat* object_points = cvCreateMat( 1, image_count*point_count, CV_32FC3 );
CvMat* point_counts = cvCreateMat( 1, image_count, CV_32SC1 );
CvMat rot_vects, trans_vects;
int i, j, k;
CvSeqReader reader;
cvStartReadSeq( image_points_seq, &reader );
// initialize arrays of points
for( i = 0; i < image_count; i++ )
{
CvPoint2D32f* src_img_pt = (CvPoint2D32f*)reader.ptr;
CvPoint2D32f* dst_img_pt = ((CvPoint2D32f*)image_points->data.fl) + i*point_count;
CvPoint3D32f* obj_pt = ((CvPoint3D32f*)object_points->data.fl) + i*point_count;
for( j = 0; j < board_size.height; j++ )
for( k = 0; k < board_size.width; k++ )
{
*obj_pt++ = cvPoint3D32f(j*square_size, k*square_size, 0);
*dst_img_pt++ = *src_img_pt++;
}
CV_NEXT_SEQ_ELEM( image_points_seq->elem_size, reader );
}
cvSet( point_counts, cvScalar(point_count) );
*extr_params = cvCreateMat( image_count, 6, CV_32FC1 );
cvGetCols( *extr_params, &rot_vects, 0, 3 );
cvGetCols( *extr_params, &trans_vects, 3, 6 );
cvZero( camera_matrix );
cvZero( dist_coeffs );
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
{
camera_matrix->data.db[0] = aspect_ratio;
camera_matrix->data.db[4] = 1.;
}
cvCalibrateCamera2( object_points, image_points, point_counts,
img_size, camera_matrix, dist_coeffs,
&rot_vects, &trans_vects, flags );
code = cvCheckArr( camera_matrix, CV_CHECK_QUIET ) &&
cvCheckArr( dist_coeffs, CV_CHECK_QUIET ) &&
cvCheckArr( *extr_params, CV_CHECK_QUIET );
*reproj_errs = cvCreateMat( 1, image_count, CV_64FC1 );
*avg_reproj_err =
compute_reprojection_error( object_points, &rot_vects, &trans_vects,
camera_matrix, dist_coeffs, image_points, point_counts, *reproj_errs );
cvReleaseMat( &object_points );
cvReleaseMat( &image_points );
cvReleaseMat( &point_counts );
return code;
}
void save_camera_params( const char* out_filename, int image_count, CvSize img_size,
CvSize board_size, float square_size,
float aspect_ratio, int flags,
const CvMat* camera_matrix, CvMat* dist_coeffs,
const CvMat* extr_params, const CvSeq* image_points_seq,
const CvMat* reproj_errs, double avg_reproj_err )
{
CvFileStorage* fs = cvOpenFileStorage( out_filename, 0, CV_STORAGE_WRITE );
time_t t;
time( &t );
struct tm *t2 = localtime( &t );
char buf[1024];
strftime( buf, sizeof(buf)-1, "%c", t2 );
cvWriteString( fs, "calibration_time", buf );
cvWriteInt( fs, "image_count", image_count );
cvWriteInt( fs, "image_width", img_size.width );
cvWriteInt( fs, "image_height", img_size.height );
cvWriteInt( fs, "board_width", board_size.width );
cvWriteInt( fs, "board_height", board_size.height );
cvWriteReal( fs, "square_size", square_size );
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
cvWriteReal( fs, "aspect_ratio", aspect_ratio );
if( flags != 0 )
{
sprintf( buf, "flags: %s%s%s%s",
flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
flags & CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspect_ratio" : "",
flags & CV_CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
cvWriteComment( fs, buf, 0 );
}
cvWriteInt( fs, "flags", flags );
cvWrite( fs, "camera_matrix", camera_matrix );
cvWrite( fs, "distortion_coefficients", dist_coeffs );
cvWriteReal( fs, "avg_reprojection_error", avg_reproj_err );
if( reproj_errs )
cvWrite( fs, "per_view_reprojection_errors", reproj_errs );
if( extr_params )
{
cvWriteComment( fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
cvWrite( fs, "extrinsic_parameters", extr_params );
}
if( image_points_seq )
{
cvWriteComment( fs, "the array of board corners projections used for calibration", 0 );
assert( image_points_seq->total == image_count );
CvMat* image_points = cvCreateMat( 1, image_count*board_size.width*board_size.height, CV_32FC2 );
cvCvtSeqToArray( image_points_seq, image_points->data.fl );
cvWrite( fs, "image_points", image_points );
cvReleaseMat( &image_points );
}
cvReleaseFileStorage( &fs );
}
int main( int argc, char** argv )
{
CvSize board_size = {0,0};
float square_size = 1.f, aspect_ratio = 1.f;
const char* out_filename = "out_camera_data.yml";
const char* input_filename = 0;
int i, image_count = 10;
int write_extrinsics = 0, write_points = 0;
int flags = 0;
CvCapture* capture = 0;
FILE* f = 0;
char imagename[1024];
CvMemStorage* storage;
CvSeq* image_points_seq = 0;
int elem_size, flip_vertical = 0;
int delay = 1000;
clock_t prev_timestamp = 0;
CvPoint2D32f* image_points_buf = 0;
CvFont font = cvFont( 1, 1 );
double _camera[9], _dist_coeffs[4];
CvMat camera = cvMat( 3, 3, CV_64F, _camera );
CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs );
CvMat *extr_params = 0, *reproj_errs = 0;
double avg_reproj_err = 0;
int mode = DETECTION;
int undistort_image = 0;
CvSize img_size = {0,0};
const char* live_capture_help =
"When the live video from camera is used as input, the following hot-keys may be used:\n"
" <ESC>, 'q' - quit the program\n"
" 'g' - start capturing images\n"
" 'u' - switch undistortion on/off\n";
if( argc < 2 )
{
printf( "This is a camera calibration sample.\n"
"Usage: calibration\n"
" -w <board_width> # the number of inner corners per one of board dimension\n"
" -h <board_height> # the number of inner corners per another board dimension\n"
" [-n <number_of_frames>] # the number of frames to use for calibration\n"
" # (if not specified, it will be set to the number\n"
" # of board views actually available)\n"
" [-d <delay>] # a minimum delay in ms between subsequent attempts to capture a next view\n"
" # (used only for video capturing)\n"
" [-s <square_size>] # square size in some user-defined units (1 by default)\n"
" [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
" [-op] # write detected feature points\n"
" [-oe] # write extrinsic parameters\n"
" [-zt] # assume zero tangential distortion\n"
" [-a <aspect_ratio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [-v] # flip the captured images around the horizontal axis\n"
" [input_data] # input data, one of the following:\n"
" # - text file with a list of the images of the board\n"
" # - name of video file with a video of the board\n"
" # if input_data not specified, a live view from the camera is used\n"
"\n" );
printf( "%s", live_capture_help );
return 0;
}
for( i = 1; i < argc; i++ )
{
const char* s = argv[i];
if( strcmp( s, "-w" ) == 0 )
{
if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 )
return fprintf( stderr, "Invalid board width\n" ), -1;
}
else if( strcmp( s, "-h" ) == 0 )
{
if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 )
return fprintf( stderr, "Invalid board height\n" ), -1;
}
else if( strcmp( s, "-s" ) == 0 )
{
if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 )
return fprintf( stderr, "Invalid board square width\n" ), -1;
}
else if( strcmp( s, "-n" ) == 0 )
{
if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 )
return printf("Invalid number of images\n" ), -1;
}
else if( strcmp( s, "-a" ) == 0 )
{
if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 )
return printf("Invalid aspect ratio\n" ), -1;
}
else if( strcmp( s, "-d" ) == 0 )
{
if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
return printf("Invalid delay\n" ), -1;
}
else if( strcmp( s, "-op" ) == 0 )
{
write_points = 1;
}
else if( strcmp( s, "-oe" ) == 0 )
{
write_extrinsics = 1;
}
else if( strcmp( s, "-zt" ) == 0 )
{
flags |= CV_CALIB_ZERO_TANGENT_DIST;
}
else if( strcmp( s, "-p" ) == 0 )
{
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
}
else if( strcmp( s, "-v" ) == 0 )
{
flip_vertical = 1;
}
else if( strcmp( s, "-o" ) == 0 )
{
out_filename = argv[++i];
}
else if( s[0] != '-' )
input_filename = s;
else
return fprintf( stderr, "Unknown option %s", s ), -1;
}
if( input_filename )
{
capture = cvCreateFileCapture( input_filename );
if( !capture )
{
f = fopen( input_filename, "rt" );
if( !f )
return fprintf( stderr, "The input file could not be opened\n" ), -1;
image_count = -1;
}
mode = CAPTURING;
}
else
capture = cvCreateCameraCapture(0);
if( !capture && !f )
return fprintf( stderr, "Could not initialize video capture\n" ), -2;
if( capture )
printf( "%s", live_capture_help );
elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]);
storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
cvNamedWindow( "Image View", 1 );
for(;;)
{
IplImage *view = 0, *view_gray = 0;
int count = 0, found, blink = 0;
CvPoint text_origin;
CvSize text_size = {0,0};
int base_line = 0;
char s[100];
int key;
if( f && fgets( imagename, sizeof(imagename)-2, f ))
{
int l = strlen(imagename);
if( l > 0 && imagename[l-1] == '\n' )
imagename[--l] = '\0';
if( l > 0 )
{
if( imagename[0] == '#' )
continue;
view = cvLoadImage( imagename, 1 );
}
}
else if( capture )
{
IplImage* view0 = cvQueryFrame( capture );
if( view0 )
{
view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
if( view0->origin == IPL_ORIGIN_BL )
cvFlip( view0, view, 0 );
else
cvCopy( view0, view );
}
}
if( !view )
{
if( image_points_seq->total > 0 )
{
image_count = image_points_seq->total;
goto calibrate;
}
break;
}
if( flip_vertical )
cvFlip( view, view, 0 );
img_size = cvGetSize(view);
found = cvFindChessboardCorners( view, board_size,
image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );
#if 1
// improve the found corners' coordinate accuracy
view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
cvCvtColor( view, view_gray, CV_BGR2GRAY );
cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
cvReleaseImage( &view_gray );
#endif
if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) )
{
cvSeqPush( image_points_seq, image_points_buf );
prev_timestamp = clock();
blink = !f;
#if 1
if( capture )
{
sprintf( imagename, "view%03d.png", image_points_seq->total - 1 );
cvSaveImage( imagename, view );
}
#endif
}
cvDrawChessboardCorners( view, board_size, image_points_buf, count, found );
cvGetTextSize( "100/100", &font, &text_size, &base_line );
text_origin.x = view->width - text_size.width - 10;
text_origin.y = view->height - base_line - 10;
if( mode == CAPTURING )
{
if( image_count > 0 )
sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count );
else
sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 );
}
else if( mode == CALIBRATED )
sprintf( s, "Calibrated" );
else
sprintf( s, "Press 'g' to start" );
cvPutText( view, s, text_origin, &font, mode != CALIBRATED ?
CV_RGB(255,0,0) : CV_RGB(0,255,0));
if( blink )
cvNot( view, view );
if( mode == CALIBRATED && undistort_image )
{
IplImage* t = cvCloneImage( view );
cvUndistort2( t, view, &camera, &dist_coeffs );
cvReleaseImage( &t );
}
cvShowImage( "Image View", view );
key = cvWaitKey(capture ? 50 : 500);
if( key == 27 )
break;
if( key == 'u' && mode == CALIBRATED )
undistort_image = !undistort_image;
if( capture && key == 'g' )
{
mode = CAPTURING;
cvClearMemStorage( storage );
image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
}
if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count )
{
calibrate:
cvReleaseMat( &extr_params );
cvReleaseMat( &reproj_errs );
int code = run_calibration( image_points_seq, img_size, board_size,
square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params,
&reproj_errs, &avg_reproj_err );
// save camera parameters in any case, to catch Inf's/NaN's
save_camera_params( out_filename, image_count, img_size,
board_size, square_size, aspect_ratio, flags,
&camera, &dist_coeffs, write_extrinsics ? extr_params : 0,
write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err );
if( code )
mode = CALIBRATED;
else
mode = DETECTION;
}
if( !view )
break;
cvReleaseImage( &view );
}
if( capture )
cvReleaseCapture( &capture );
if( storage )
cvReleaseMemStorage( &storage );
return 0;
}

View File

@@ -0,0 +1,322 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <iterator>
#include "cv.h"
#include "highgui.h"
using namespace cv;
using namespace std;
namespace cv
{
/* copy of class defines int tests/cv/chessboardgenerator.h */
class ChessBoardGenerator
{
public:
double sensorWidth;
double sensorHeight;
size_t squareEdgePointsNum;
double min_cos;
mutable double cov;
Size patternSize;
int rendererResolutionMultiplier;
ChessBoardGenerator(const Size& patternSize = Size(8, 6));
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
Size cornersSize() const;
private:
void generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const;
Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
float sqWidth, float sqHeight, const vector<Point3f>& whole, vector<Point2f>& corners) const;
void generateBasis(Point3f& pb1, Point3f& pb2) const;
Point3f generateChessBoardCenter(const Mat& camMat, const Size& imgSize) const;
Mat rvec, tvec;
};
};
const Size imgSize(800, 600);
const Size brdSize(8, 7);
const size_t brds_num = 20;
template<class T> ostream& operator<<(ostream& out, const Mat_<T>& mat)
{
for(int j = 0; j < mat.rows; ++j)
for(int i = 0; i < mat.cols; ++i)
out << mat(j, i) << " ";
return out;
}
int main()
{
cout << "Initializing background...";
Mat background(imgSize, CV_8UC3);
randu(background, Scalar::all(32), Scalar::all(255));
GaussianBlur(background, background, Size(5, 5), 2);
cout << "Done" << endl;
cout << "Initializing chess board generator...";
ChessBoardGenerator cbg(brdSize);
cbg.rendererResolutionMultiplier = 4;
cout << "Done" << endl;
/* camera params */
Mat_<double> camMat(3, 3);
camMat << 300., 0., background.cols/2., 0, 300., background.rows/2., 0., 0., 1.;
Mat_<double> distCoeffs(1, 5);
distCoeffs << 1.2, 0.2, 0., 0., 0.;
cout << "Generating chessboards...";
vector<Mat> boards(brds_num);
vector<Point2f> tmp;
for(size_t i = 0; i < brds_num; ++i)
cout << (boards[i] = cbg(background, camMat, distCoeffs, tmp), i) << " ";
cout << "Done" << endl;
vector<Point3f> chessboard3D;
for(int j = 0; j < cbg.cornersSize().height; ++j)
for(int i = 0; i < cbg.cornersSize().width; ++i)
chessboard3D.push_back(Point3i(i, j, 0));
/* init points */
vector< vector<Point3f> > objectPoints;
vector< vector<Point2f> > imagePoints;
cout << endl << "Finding chessboards' corners...";
for(size_t i = 0; i < brds_num; ++i)
{
cout << i;
namedWindow("Current chessboard"); imshow("Current chessboard", boards[i]); waitKey(100);
bool found = findChessboardCorners(boards[i], cbg.cornersSize(), tmp);
if (found)
{
imagePoints.push_back(tmp);
objectPoints.push_back(chessboard3D);
cout<< "-found ";
}
else
cout<< "-not-found ";
drawChessboardCorners(boards[i], cbg.cornersSize(), Mat(tmp), found);
imshow("Current chessboard", boards[i]); waitKey(1000);
}
cout << "Done" << endl;
cvDestroyAllWindows();
Mat camMat_est;
Mat distCoeffs_est;
vector<Mat> rvecs, tvecs;
cout << "Calibrating...";
double rep_err = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs, tvecs);
cout << "Done" << endl;
cout << endl << "Average Reprojection error: " << rep_err/brds_num/cbg.cornersSize().area() << endl;
cout << "==================================" << endl;
cout << "Original camera matrix:\n" << camMat << endl;
cout << "Original distCoeffs:\n" << distCoeffs << endl;
cout << "==================================" << endl;
cout << "Estiamted camera matrix:\n" << (Mat_<double>&)camMat_est << endl;
cout << "Estiamted distCoeffs:\n" << (Mat_<double>&)distCoeffs_est << endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Copy of tests/cv/src/chessboardgenerator code. Just do not want to add dependency.
ChessBoardGenerator::ChessBoardGenerator(const Size& _patternSize) : sensorWidth(32), sensorHeight(24),
squareEdgePointsNum(200), min_cos(sqrt(2.f)*0.5f), cov(0.5),
patternSize(_patternSize), rendererResolutionMultiplier(4), tvec(Mat::zeros(1, 3, CV_32F))
{
Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
}
void cv::ChessBoardGenerator::generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const
{
Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);
for(size_t n = 0; n < squareEdgePointsNum; ++n)
out.push_back( p1 + step * (float)n);
}
Size cv::ChessBoardGenerator::cornersSize() const
{
return Size(patternSize.width-1, patternSize.height-1);
}
struct Mult
{
float m;
Mult(int mult) : m((float)mult) {}
Point2f operator()(const Point2f& p)const { return p * m; }
};
void cv::ChessBoardGenerator::generateBasis(Point3f& pb1, Point3f& pb2) const
{
RNG& rng = theRNG();
Vec3f n;
for(;;)
{
n[0] = rng.uniform(-1.f, 1.f);
n[1] = rng.uniform(-1.f, 1.f);
n[2] = rng.uniform(-1.f, 1.f);
float len = (float)norm(n);
n[0]/=len;
n[1]/=len;
n[2]/=len;
if (fabs(n[2]) > min_cos)
break;
}
Vec3f n_temp = n; n_temp[0] += 100;
Vec3f b1 = n.cross(n_temp);
Vec3f b2 = n.cross(b1);
float len_b1 = (float)norm(b1);
float len_b2 = (float)norm(b2);
pb1 = Point3f(b1[0]/len_b1, b1[1]/len_b1, b1[2]/len_b1);
pb2 = Point3f(b2[0]/len_b1, b2[1]/len_b2, b2[2]/len_b2);
}
Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
float sqWidth, float sqHeight, const vector<Point3f>& whole,
vector<Point2f>& corners) const
{
vector< vector<Point> > squares_black;
for(int i = 0; i < patternSize.width; ++i)
for(int j = 0; j < patternSize.height; ++j)
if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) )
{
vector<Point3f> pts_square3d;
vector<Point2f> pts_square2d;
Point3f p1 = zero + (i + 0) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
Point3f p2 = zero + (i + 1) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
Point3f p3 = zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
Point3f p4 = zero + (i + 0) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
generateEdge(p1, p2, pts_square3d);
generateEdge(p2, p3, pts_square3d);
generateEdge(p3, p4, pts_square3d);
generateEdge(p4, p1, pts_square3d);
projectPoints( Mat(pts_square3d), rvec, tvec, camMat, distCoeffs, pts_square2d);
squares_black.resize(squares_black.size() + 1);
vector<Point2f> temp;
approxPolyDP(Mat(pts_square2d), temp, 1.0, true);
transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));
}
/* calculate corners */
vector<Point3f> corners3d;
for(int j = 0; j < patternSize.height - 1; ++j)
for(int i = 0; i < patternSize.width - 1; ++i)
corners3d.push_back(zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2);
corners.clear();
projectPoints( Mat(corners3d), rvec, tvec, camMat, distCoeffs, corners);
vector<Point3f> whole3d;
vector<Point2f> whole2d;
generateEdge(whole[0], whole[1], whole3d);
generateEdge(whole[1], whole[2], whole3d);
generateEdge(whole[2], whole[3], whole3d);
generateEdge(whole[3], whole[0], whole3d);
projectPoints( Mat(whole3d), rvec, tvec, camMat, distCoeffs, whole2d);
vector<Point2f> temp_whole2d;
approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true);
vector< vector<Point > > whole_contour(1);
transform(temp_whole2d.begin(), temp_whole2d.end(),
back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));
Mat result;
if (rendererResolutionMultiplier == 1)
{
result = bg.clone();
drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
}
else
{
Mat tmp;
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
}
return result;
}
Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const
{
cov = min(cov, 0.8);
double fovx, fovy, focalLen;
Point2d principalPoint;
double aspect;
calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight,
fovx, fovy, focalLen, principalPoint, aspect);
RNG& rng = theRNG();
float d1 = static_cast<float>(rng.uniform(0.1, 10.0));
float ah = static_cast<float>(rng.uniform(-fovx/2 * cov, fovx/2 * cov) * CV_PI / 180);
float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);
Point3f p;
p.z = cos(ah) * d1;
p.x = sin(ah) * d1;
p.y = p.z * tan(av);
Point3f pb1, pb2;
generateBasis(pb1, pb2);
float cbHalfWidth = static_cast<float>(norm(p) * sin( min(fovx, fovy) * 0.5 * CV_PI / 180));
float cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
vector<Point3f> pts3d(4);
vector<Point2f> pts2d(4);
for(;;)
{
pts3d[0] = p + pb1 * cbHalfWidth + cbHalfHeight * pb2;
pts3d[1] = p + pb1 * cbHalfWidth - cbHalfHeight * pb2;
pts3d[2] = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
pts3d[3] = p - pb1 * cbHalfWidth + cbHalfHeight * pb2;
/* can remake with better perf */
projectPoints( Mat(pts3d), rvec, tvec, camMat, distCoeffs, pts2d);
bool inrect1 = pts2d[0].x < bg.cols && pts2d[0].y < bg.rows && pts2d[0].x > 0 && pts2d[0].y > 0;
bool inrect2 = pts2d[1].x < bg.cols && pts2d[1].y < bg.rows && pts2d[1].x > 0 && pts2d[1].y > 0;
bool inrect3 = pts2d[2].x < bg.cols && pts2d[2].y < bg.rows && pts2d[2].x > 0 && pts2d[2].y > 0;
bool inrect4 = pts2d[3].x < bg.cols && pts2d[3].y < bg.rows && pts2d[3].x > 0 && pts2d[3].y > 0;
if ( inrect1 && inrect2 && inrect3 && inrect4)
break;
cbHalfWidth*=0.8f;
cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
}
cbHalfWidth *= static_cast<float>(patternSize.width)/(patternSize.width + 1);
cbHalfHeight *= static_cast<float>(patternSize.height)/(patternSize.height + 1);
Point3f zero = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
float sqWidth = 2 * cbHalfWidth/patternSize.width;
float sqHeight = 2 * cbHalfHeight/patternSize.height;
return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight, pts3d, corners);
}

231
samples/c/camshiftdemo.c Normal file
View File

@@ -0,0 +1,231 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <ctype.h>
#endif
IplImage *image = 0, *hsv = 0, *hue = 0, *mask = 0, *backproject = 0, *histimg = 0;
CvHistogram *hist = 0;
int backproject_mode = 0;
int select_object = 0;
int track_object = 0;
int show_hist = 1;
CvPoint origin;
CvRect selection;
CvRect track_window;
CvBox2D track_box;
CvConnectedComp track_comp;
int hdims = 16;
float hranges_arr[] = {0,180};
float* hranges = hranges_arr;
int vmin = 10, vmax = 256, smin = 30;
void on_mouse( int event, int x, int y, int flags, void* param )
{
if( !image )
return;
if( image->origin )
y = image->height - y;
if( select_object )
{
selection.x = MIN(x,origin.x);
selection.y = MIN(y,origin.y);
selection.width = selection.x + CV_IABS(x - origin.x);
selection.height = selection.y + CV_IABS(y - origin.y);
selection.x = MAX( selection.x, 0 );
selection.y = MAX( selection.y, 0 );
selection.width = MIN( selection.width, image->width );
selection.height = MIN( selection.height, image->height );
selection.width -= selection.x;
selection.height -= selection.y;
}
switch( event )
{
case CV_EVENT_LBUTTONDOWN:
origin = cvPoint(x,y);
selection = cvRect(x,y,0,0);
select_object = 1;
break;
case CV_EVENT_LBUTTONUP:
select_object = 0;
if( selection.width > 0 && selection.height > 0 )
track_object = -1;
break;
}
}
CvScalar hsv2rgb( float hue )
{
int rgb[3], p, sector;
static const int sector_data[][3]=
{{0,2,1}, {1,2,0}, {1,0,2}, {2,0,1}, {2,1,0}, {0,1,2}};
hue *= 0.033333333333333333333333333333333f;
sector = cvFloor(hue);
p = cvRound(255*(hue - sector));
p ^= sector & 1 ? 255 : 0;
rgb[sector_data[sector][0]] = 255;
rgb[sector_data[sector][1]] = 0;
rgb[sector_data[sector][2]] = p;
return cvScalar(rgb[2], rgb[1], rgb[0],0);
}
int main( int argc, char** argv )
{
CvCapture* capture = 0;
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
else if( argc == 2 )
capture = cvCaptureFromAVI( argv[1] );
if( !capture )
{
fprintf(stderr,"Could not initialize capturing...\n");
return -1;
}
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tc - stop the tracking\n"
"\tb - switch to/from backprojection view\n"
"\th - show/hide object histogram\n"
"To initialize tracking, select the object with mouse\n" );
cvNamedWindow( "Histogram", 1 );
cvNamedWindow( "CamShiftDemo", 1 );
cvSetMouseCallback( "CamShiftDemo", on_mouse, 0 );
cvCreateTrackbar( "Vmin", "CamShiftDemo", &vmin, 256, 0 );
cvCreateTrackbar( "Vmax", "CamShiftDemo", &vmax, 256, 0 );
cvCreateTrackbar( "Smin", "CamShiftDemo", &smin, 256, 0 );
for(;;)
{
IplImage* frame = 0;
int i, bin_w, c;
frame = cvQueryFrame( capture );
if( !frame )
break;
if( !image )
{
/* allocate all the buffers */
image = cvCreateImage( cvGetSize(frame), 8, 3 );
image->origin = frame->origin;
hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
hue = cvCreateImage( cvGetSize(frame), 8, 1 );
mask = cvCreateImage( cvGetSize(frame), 8, 1 );
backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
histimg = cvCreateImage( cvSize(320,200), 8, 3 );
cvZero( histimg );
}
cvCopy( frame, image, 0 );
cvCvtColor( image, hsv, CV_BGR2HSV );
if( track_object )
{
int _vmin = vmin, _vmax = vmax;
cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
cvSplit( hsv, hue, 0, 0, 0 );
if( track_object < 0 )
{
float max_val = 0.f;
cvSetImageROI( hue, selection );
cvSetImageROI( mask, selection );
cvCalcHist( &hue, hist, 0, mask );
cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
cvResetImageROI( hue );
cvResetImageROI( mask );
track_window = selection;
track_object = 1;
cvZero( histimg );
bin_w = histimg->width / hdims;
for( i = 0; i < hdims; i++ )
{
int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
CvScalar color = hsv2rgb(i*180.f/hdims);
cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
cvPoint((i+1)*bin_w,histimg->height - val),
color, -1, 8, 0 );
}
}
cvCalcBackProject( &hue, backproject, hist );
cvAnd( backproject, mask, backproject, 0 );
cvCamShift( backproject, track_window,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
&track_comp, &track_box );
track_window = track_comp.rect;
if( backproject_mode )
cvCvtColor( backproject, image, CV_GRAY2BGR );
if( !image->origin )
track_box.angle = -track_box.angle;
cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
}
if( select_object && selection.width > 0 && selection.height > 0 )
{
cvSetImageROI( image, selection );
cvXorS( image, cvScalarAll(255), image, 0 );
cvResetImageROI( image );
}
cvShowImage( "CamShiftDemo", image );
cvShowImage( "Histogram", histimg );
c = cvWaitKey(10);
if( (char) c == 27 )
break;
switch( (char) c )
{
case 'b':
backproject_mode ^= 1;
break;
case 'c':
track_object = 0;
cvZero( histimg );
break;
case 'h':
show_hist ^= 1;
if( !show_hist )
cvDestroyWindow( "Histogram" );
else
cvNamedWindow( "Histogram", 1 );
break;
default:
;
}
}
cvReleaseCapture( &capture );
cvDestroyWindow("CamShiftDemo");
return 0;
}
#ifdef _EiC
main(1,"camshiftdemo.c");
#endif

92
samples/c/contours.c Normal file
View File

@@ -0,0 +1,92 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <math.h>
#endif
#define w 500
int levels = 3;
CvSeq* contours = 0;
void on_trackbar(int pos)
{
IplImage* cnt_img = cvCreateImage( cvSize(w,w), 8, 3 );
CvSeq* _contours = contours;
int _levels = levels - 3;
if( _levels <= 0 ) // get to the nearest face to make it look more funny
_contours = _contours->h_next->h_next->h_next;
cvZero( cnt_img );
cvDrawContours( cnt_img, _contours, CV_RGB(255,0,0), CV_RGB(0,255,0), _levels, 3, CV_AA, cvPoint(0,0) );
cvShowImage( "contours", cnt_img );
cvReleaseImage( &cnt_img );
}
int main( int argc, char** argv )
{
int i, j;
CvMemStorage* storage = cvCreateMemStorage(0);
IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 );
cvZero( img );
for( i=0; i < 6; i++ )
{
int dx = (i%2)*250 - 30;
int dy = (i/2)*150;
CvScalar white = cvRealScalar(255);
CvScalar black = cvRealScalar(0);
if( i == 0 )
{
for( j = 0; j <= 10; j++ )
{
double angle = (j+5)*CV_PI/21;
cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)),
cvRound(dy+100-90*sin(angle))),
cvPoint(cvRound(dx+100+j*10-30*cos(angle)),
cvRound(dy+100-30*sin(angle))), white, 1, 8, 0);
}
}
cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
}
cvNamedWindow( "image", 1 );
cvShowImage( "image", img );
cvFindContours( img, storage, &contours, sizeof(CvContour),
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
// comment this out if you do not want approximation
contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );
cvNamedWindow( "contours", 1 );
cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar );
on_trackbar(0);
cvWaitKey(0);
cvReleaseMemStorage( &storage );
cvReleaseImage( &img );
return 0;
}
#ifdef _EiC
main(1,"");
#endif

View File

@@ -0,0 +1,42 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#endif
int main( int argc, char** argv )
{
const char* size_opt = "--size=";
char comment[1024];
CvHaarClassifierCascade* cascade = 0;
CvSize size;
if( argc != 4 || strncmp( argv[1], size_opt, strlen(size_opt) ) != 0 )
{
printf( "Usage: convert_cascade --size=\"<width>x<height>\" input_cascade_path output_cascade_filename\n" );
return -1;
}
sscanf( argv[1], "--size=%ux%u", &size.width, &size.height );
cascade = cvLoadHaarClassifierCascade( argv[2], size );
if( !cascade )
{
fprintf( stderr, "Input cascade could not be found/opened\n" );
return -1;
}
sprintf( comment, "Automatically converted from %s, window size = %dx%d", argv[2], size.width, size.height );
cvSave( argv[3], cascade, 0, comment, cvAttrList(0,0) );
return 0;
}
#ifdef _EiC
main(1,"facedetect.c");
#endif

106
samples/c/convexhull.c Normal file
View File

@@ -0,0 +1,106 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdlib.h>
#endif
#define ARRAY 1
int main( int argc, char** argv )
{
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
#if !ARRAY
CvMemStorage* storage = cvCreateMemStorage(0);
#endif
cvNamedWindow( "hull", 1 );
for(;;)
{
char key;
int i, count = rand()%100 + 1, hullcount;
CvPoint pt0;
#if !ARRAY
CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
sizeof(CvPoint), storage );
CvSeq* hull;
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
cvSeqPush( ptseq, &pt0 );
}
hull = cvConvexHull2( ptseq, 0, CV_CLOCKWISE, 0 );
hullcount = hull->total;
#else
CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
int* hull = (int*)malloc( count * sizeof(hull[0]));
CvMat pointMat = cvMat( 1, count, CV_32SC2, points );
CvMat hullMat = cvMat( 1, count, CV_32SC1, hull );
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
points[i] = pt0;
}
cvConvexHull2( &pointMat, &hullMat, CV_CLOCKWISE, 0 );
hullcount = hullMat.cols;
#endif
cvZero( img );
for( i = 0; i < count; i++ )
{
#if !ARRAY
pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i );
#else
pt0 = points[i];
#endif
cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 );
}
#if !ARRAY
pt0 = **CV_GET_SEQ_ELEM( CvPoint*, hull, hullcount - 1 );
#else
pt0 = points[hull[hullcount-1]];
#endif
for( i = 0; i < hullcount; i++ )
{
#if !ARRAY
CvPoint pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, i );
#else
CvPoint pt = points[hull[i]];
#endif
cvLine( img, pt0, pt, CV_RGB( 0, 255, 0 ), 1, CV_AA, 0 );
pt0 = pt;
}
cvShowImage( "hull", img );
key = (char) cvWaitKey(0);
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
#if !ARRAY
cvClearMemStorage( storage );
#else
free( points );
free( hull );
#endif
}
cvDestroyWindow( "hull" );
return 0;
}
#ifdef _EiC
main(1,"convexhull.c");
#endif

92
samples/c/cvsample.dsp Normal file
View File

@@ -0,0 +1,92 @@
# Microsoft Developer Studio Project File - Name="cvsample" - Package Owner=<4>
# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Console Application" 0x0103
CFG=cvsample - Win32 Release
!MESSAGE This is not a valid makefile. To build this project using NMAKE,
!MESSAGE use the Export Makefile command and run
!MESSAGE
!MESSAGE NMAKE /f "cvsample.mak".
!MESSAGE
!MESSAGE You can specify a configuration when running NMAKE
!MESSAGE by defining the macro CFG on the command line. For example:
!MESSAGE
!MESSAGE NMAKE /f "cvsample.mak" CFG="cvsample - Win32 Release"
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
!MESSAGE "cvsample - Win32 Release" (based on "Win32 (x86) Console Application")
!MESSAGE "cvsample - Win32 Debug" (based on "Win32 (x86) Console Application")
!MESSAGE
# Begin Project
# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
RSC=rc.exe
!IF "$(CFG)" == "cvsample - Win32 Release"
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 0
# PROP BASE Output_Dir "Release"
# PROP BASE Intermediate_Dir "Release"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 0
# PROP Output_Dir "..\..\_temp\cvsample_Release"
# PROP Intermediate_Dir "..\..\_temp\cvsample_Release"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
F90=df.exe
# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD CPP /nologo /MD /W4 /Gm /GX /Zi /O2 /I "../../cxcore/include" /I "../../cv/include" /I "../../otherlibs/highgui" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib cxcore.lib cv.lib highgui.lib /nologo /subsystem:console /debug /machine:I386 /nodefaultlib:"libmmd.lib" /out:".\cvsample.exe" /libpath:"../../lib"
!ELSEIF "$(CFG)" == "cvsample - Win32 Debug"
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 1
# PROP BASE Output_Dir "Debug"
# PROP BASE Intermediate_Dir "Debug"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 1
# PROP Output_Dir "..\..\_temp\cvsample_Debug"
# PROP Intermediate_Dir "..\..\_temp\cvsample_Debug"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
F90=df.exe
# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
# ADD CPP /nologo /MDd /W4 /Gm /GX /Zi /Od /I "../../cxcore/include" /I "../../cv/include" /I "../../otherlibs/highgui" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib cxcored.lib cvd.lib highguid.lib /nologo /subsystem:console /debug /machine:I386 /nodefaultlib:"libmmdd.lib" /out:".\cvsampled.exe" /pdbtype:sept /libpath:"../../lib"
!ENDIF
# Begin Target
# Name "cvsample - Win32 Release"
# Name "cvsample - Win32 Debug"
# Begin Source File
SOURCE=.\squares.c
# End Source File
# End Target
# End Project

View File

@@ -0,0 +1,413 @@
<?xml version="1.0" encoding="windows-1251"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="8,00"
Name="cvsample"
ProjectGUID="{2820F96A-13D2-4EFE-BC9F-A9AF482026AE}"
RootNamespace="cvsample"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\..\..\_temp\cvsample_Dbg/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Dbg/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcored.lib cvd.lib highguid.lib"
OutputFile=".\cvsampled.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmdd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName)d.pdb"
SubSystem="1"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\..\..\_temp\cvsample_Dbg64/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;WIN64;EM64T;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Dbg64/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcored_64.lib cvd_64.lib highguid_64.lib"
OutputFile=".\cvsampled_64.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmdd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName)d_64.pdb"
SubSystem="1"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\..\..\_temp\cvsample_Rls/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
MinimalRebuild="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Rls/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcore.lib cv.lib highgui.lib"
OutputFile=".\cvsample.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName).pdb"
SubSystem="1"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\..\..\_temp\cvsample_Rls64/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;WIN64;EM64T;NDEBUG;_CONSOLE"
StringPooling="true"
MinimalRebuild="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Rls64/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcore_64.lib cv_64.lib highgui_64.lib"
OutputFile=".\cvsample_64.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName)_64.pdb"
SubSystem="1"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath=".\stereo_calib.cpp"
>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

242
samples/c/delaunay.c Normal file
View File

@@ -0,0 +1,242 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#endif
/* the script demostrates iterative construction of
delaunay triangulation and voronoi tesselation */
CvSubdiv2D* init_delaunay( CvMemStorage* storage,
CvRect rect )
{
CvSubdiv2D* subdiv;
subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
sizeof(CvSubdiv2DPoint),
sizeof(CvQuadEdge2D),
storage );
cvInitSubdivDelaunay2D( subdiv, rect );
return subdiv;
}
void draw_subdiv_point( IplImage* img, CvPoint2D32f fp, CvScalar color )
{
cvCircle( img, cvPoint(cvRound(fp.x), cvRound(fp.y)), 3, color, CV_FILLED, 8, 0 );
}
void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
{
CvSubdiv2DPoint* org_pt;
CvSubdiv2DPoint* dst_pt;
CvPoint2D32f org;
CvPoint2D32f dst;
CvPoint iorg, idst;
org_pt = cvSubdiv2DEdgeOrg(edge);
dst_pt = cvSubdiv2DEdgeDst(edge);
if( org_pt && dst_pt )
{
org = org_pt->pt;
dst = dst_pt->pt;
iorg = cvPoint( cvRound( org.x ), cvRound( org.y ));
idst = cvPoint( cvRound( dst.x ), cvRound( dst.y ));
cvLine( img, iorg, idst, color, 1, CV_AA, 0 );
}
}
void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,
CvScalar delaunay_color, CvScalar voronoi_color )
{
CvSeqReader reader;
int i, total = subdiv->edges->total;
int elem_size = subdiv->edges->elem_size;
cvStartReadSeq( (CvSeq*)(subdiv->edges), &reader, 0 );
for( i = 0; i < total; i++ )
{
CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);
if( CV_IS_SET_ELEM( edge ))
{
draw_subdiv_edge( img, (CvSubdiv2DEdge)edge + 1, voronoi_color );
draw_subdiv_edge( img, (CvSubdiv2DEdge)edge, delaunay_color );
}
CV_NEXT_SEQ_ELEM( elem_size, reader );
}
}
void locate_point( CvSubdiv2D* subdiv, CvPoint2D32f fp, IplImage* img,
CvScalar active_color )
{
CvSubdiv2DEdge e;
CvSubdiv2DEdge e0 = 0;
CvSubdiv2DPoint* p = 0;
cvSubdiv2DLocate( subdiv, fp, &e0, &p );
if( e0 )
{
e = e0;
do
{
draw_subdiv_edge( img, e, active_color );
e = cvSubdiv2DGetEdge(e,CV_NEXT_AROUND_LEFT);
}
while( e != e0 );
}
draw_subdiv_point( img, fp, active_color );
}
void draw_subdiv_facet( IplImage* img, CvSubdiv2DEdge edge )
{
CvSubdiv2DEdge t = edge;
int i, count = 0;
CvPoint* buf = 0;
// count number of edges in facet
do
{
count++;
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
} while (t != edge );
buf = (CvPoint*)malloc( count * sizeof(buf[0]));
// gather points
t = edge;
for( i = 0; i < count; i++ )
{
CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
if( !pt ) break;
buf[i] = cvPoint( cvRound(pt->pt.x), cvRound(pt->pt.y));
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
}
if( i == count )
{
CvSubdiv2DPoint* pt = cvSubdiv2DEdgeDst( cvSubdiv2DRotateEdge( edge, 1 ));
cvFillConvexPoly( img, buf, count, CV_RGB(rand()&255,rand()&255,rand()&255), CV_AA, 0 );
cvPolyLine( img, &buf, &count, 1, 1, CV_RGB(0,0,0), 1, CV_AA, 0);
draw_subdiv_point( img, pt->pt, CV_RGB(0,0,0));
}
free( buf );
}
void paint_voronoi( CvSubdiv2D* subdiv, IplImage* img )
{
CvSeqReader reader;
int i, total = subdiv->edges->total;
int elem_size = subdiv->edges->elem_size;
cvCalcSubdivVoronoi2D( subdiv );
cvStartReadSeq( (CvSeq*)(subdiv->edges), &reader, 0 );
for( i = 0; i < total; i++ )
{
CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);
if( CV_IS_SET_ELEM( edge ))
{
CvSubdiv2DEdge e = (CvSubdiv2DEdge)edge;
// left
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( e, 1 ));
// right
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( e, 3 ));
}
CV_NEXT_SEQ_ELEM( elem_size, reader );
}
}
void run(void)
{
char win[] = "source";
int i;
CvRect rect = { 0, 0, 600, 600 };
CvMemStorage* storage;
CvSubdiv2D* subdiv;
IplImage* img;
CvScalar active_facet_color, delaunay_color, voronoi_color, bkgnd_color;
active_facet_color = CV_RGB( 255, 0, 0 );
delaunay_color = CV_RGB( 0,0,0);
voronoi_color = CV_RGB(0, 180, 0);
bkgnd_color = CV_RGB(255,255,255);
img = cvCreateImage( cvSize(rect.width,rect.height), 8, 3 );
cvSet( img, bkgnd_color, 0 );
cvNamedWindow( win, 1 );
storage = cvCreateMemStorage(0);
subdiv = init_delaunay( storage, rect );
printf("Delaunay triangulation will be build now interactively.\n"
"To stop the process, press any key\n\n");
for( i = 0; i < 200; i++ )
{
CvPoint2D32f fp = cvPoint2D32f( (float)(rand()%(rect.width-10)+5),
(float)(rand()%(rect.height-10)+5));
locate_point( subdiv, fp, img, active_facet_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 )
break;
cvSubdivDelaunay2DInsert( subdiv, fp );
cvCalcSubdivVoronoi2D( subdiv );
cvSet( img, bkgnd_color, 0 );
draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 )
break;
}
cvSet( img, bkgnd_color, 0 );
paint_voronoi( subdiv, img );
cvShowImage( win, img );
cvWaitKey(0);
cvReleaseMemStorage( &storage );
cvReleaseImage(&img);
cvDestroyWindow( win );
}
int main( int argc, char** argv )
{
#ifdef _MSC_VER
argc, argv;
#endif
run();
return 0;
}
#ifdef _EiC
main( 1, "delaunay.c" );
#endif

127
samples/c/demhist.c Normal file
View File

@@ -0,0 +1,127 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#endif
char file_name[] = "baboon.jpg";
int _brightness = 100;
int _contrast = 100;
int hist_size = 64;
float range_0[]={0,256};
float* ranges[] = { range_0 };
IplImage *src_image = 0, *dst_image = 0, *hist_image = 0;
CvHistogram *hist;
uchar lut[256];
CvMat* lut_mat;
/* brightness/contrast callback function */
void update_brightcont( int arg )
{
int brightness = _brightness - 100;
int contrast = _contrast - 100;
int i, bin_w;
float max_value = 0;
/*
* The algorithm is by Werner D. Streidt
* (http://visca.com/ffactory/archives/5-99/msg00021.html)
*/
if( contrast > 0 )
{
double delta = 127.*contrast/100;
double a = 255./(255. - delta*2);
double b = a*(brightness - delta);
for( i = 0; i < 256; i++ )
{
int v = cvRound(a*i + b);
if( v < 0 )
v = 0;
if( v > 255 )
v = 255;
lut[i] = (uchar)v;
}
}
else
{
double delta = -128.*contrast/100;
double a = (256.-delta*2)/255.;
double b = a*brightness + delta;
for( i = 0; i < 256; i++ )
{
int v = cvRound(a*i + b);
if( v < 0 )
v = 0;
if( v > 255 )
v = 255;
lut[i] = (uchar)v;
}
}
cvLUT( src_image, dst_image, lut_mat );
cvShowImage( "image", dst_image );
cvCalcHist( &dst_image, hist, 0, NULL );
cvZero( dst_image );
cvGetMinMaxHistValue( hist, 0, &max_value, 0, 0 );
cvScale( hist->bins, hist->bins, ((double)hist_image->height)/max_value, 0 );
/*cvNormalizeHist( hist, 1000 );*/
cvSet( hist_image, cvScalarAll(255), 0 );
bin_w = cvRound((double)hist_image->width/hist_size);
for( i = 0; i < hist_size; i++ )
cvRectangle( hist_image, cvPoint(i*bin_w, hist_image->height),
cvPoint((i+1)*bin_w, hist_image->height - cvRound(cvGetReal1D(hist->bins,i))),
cvScalarAll(0), -1, 8, 0 );
cvShowImage( "histogram", hist_image );
}
int main( int argc, char** argv )
{
// Load the source image. HighGUI use.
src_image = cvLoadImage( argc == 2 ? argv[1] : file_name, 0 );
if( !src_image )
{
printf("Image was not loaded.\n");
return -1;
}
dst_image = cvCloneImage(src_image);
hist_image = cvCreateImage(cvSize(320,200), 8, 1);
hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
lut_mat = cvCreateMatHeader( 1, 256, CV_8UC1 );
cvSetData( lut_mat, lut, 0 );
cvNamedWindow("image", 0);
cvNamedWindow("histogram", 0);
cvCreateTrackbar("brightness", "image", &_brightness, 200, update_brightcont);
cvCreateTrackbar("contrast", "image", &_contrast, 200, update_brightcont);
update_brightcont(0);
cvWaitKey(0);
cvReleaseImage(&src_image);
cvReleaseImage(&dst_image);
cvReleaseHist(&hist);
return 0;
}
#ifdef _EiC
main(1,"demhist.c");
#endif

View File

@@ -0,0 +1,262 @@
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <iostream>
using namespace cv;
using namespace std;
inline Point2f applyHomography( const Mat_<double>& H, const Point2f& pt )
{
double w = 1./(H(2,0)*pt.x + H(2,1)*pt.y + H(2,2));
return Point2f( (float)((H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w), (float)((H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w) );
}
void drawCorrespondences( const Mat& img1, const Mat& img2, const Mat& transfMtr,
const vector<KeyPoint>& keypoints1, const vector<KeyPoint>& keypoints2,
const vector<int>& matches, const vector<double>& distances,
float maxDist, Mat& drawImg )
{
Scalar RED = CV_RGB(255, 0, 0);
Scalar PINK = CV_RGB(255,130,230);
Scalar GREEN = CV_RGB(0, 255, 0);
Scalar BLUE = CV_RGB(0, 0, 255);
/* Output:
red point - point without corresponding point;
grean point - point having correct corresponding point;
pink point - point having incorrect corresponding point, but excised by threshold of distance;
blue point - point having incorrect corresponding point;
*/
Size size(img1.cols + img2.cols, MAX(img1.rows, img2.rows));
drawImg.create(size, CV_MAKETYPE(img1.depth(), 3));
Mat drawImg1 = drawImg(Rect(0, 0, img1.cols, img1.rows));
cvtColor(img1, drawImg1, CV_GRAY2RGB);
Mat drawImg2 = drawImg(Rect(img1.cols, 0, img2.cols, img2.rows));
cvtColor(img2, drawImg2, CV_GRAY2RGB);
for(vector<KeyPoint>::const_iterator it = keypoints1.begin(); it < keypoints1.end(); ++it )
{
circle(drawImg, it->pt, 3, RED);
}
for(vector<KeyPoint>::const_iterator it = keypoints2.begin(); it < keypoints2.end(); ++it )
{
Point p = it->pt;
circle(drawImg, Point2f(p.x+img1.cols, p.y), 3, RED);
}
Mat vec1(3, 1, CV_32FC1), vec2;
float err = 3;
vector<int>::const_iterator mit = matches.begin();
vector<double>::const_iterator dit = distances.begin();
assert( matches.size() == distances.size() && matches.size() == keypoints1.size() );
for( int i1 = 0; mit < matches.end(); ++mit, ++dit, i1++ )
{
Point2f pt1 = keypoints1[i1].pt, pt2 = keypoints2[*mit].pt;
Point2f diff = applyHomography(transfMtr, pt1) - pt2;
if( norm(diff) < err )
{
circle(drawImg, pt1, 3, GREEN);
circle(drawImg, Point2f(pt2.x+img1.cols, pt2.y), 3, GREEN);
line(drawImg, pt1, Point2f(pt2.x+img1.cols, pt2.y), GREEN);
}
else
{
if( *dit > maxDist )
{
circle(drawImg, pt1, 3, PINK);
circle(drawImg, Point2f(pt2.x+img1.cols, pt2.y), 3, PINK);
}
else
{
circle(drawImg, pt1, 3, BLUE);
circle(drawImg, Point2f(pt2.x+img1.cols, pt2.y), 3, BLUE);
line(drawImg, pt1, Point2f(pt2.x+img1.cols, pt2.y), BLUE);
}
}
}
}
FeatureDetector* createDetector( const string& detectorType )
{
FeatureDetector* fd = 0;
if( !detectorType.compare( "FAST" ) )
{
fd = new FastFeatureDetector( 1/*threshold*/, true/*nonmax_suppression*/ );
}
else if( !detectorType.compare( "STAR" ) )
{
fd = new StarFeatureDetector( 16/*max_size*/, 30/*response_threshold*/, 10/*line_threshold_projected*/,
8/*line_threshold_binarized*/, 5/*suppress_nonmax_size*/ );
}
else if( !detectorType.compare( "SIFT" ) )
{
fd = new SiftFeatureDetector(SIFT::DetectorParams::GET_DEFAULT_THRESHOLD(),
SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD(),
SIFT::DetectorParams::FIRST_ANGLE);
}
else if( !detectorType.compare( "SURF" ) )
{
fd = new SurfFeatureDetector( 400./*hessian_threshold*/, 3 /*octaves*/, 4/*octave_layers*/ );
}
else if( !detectorType.compare( "MSER" ) )
{
fd = new MserFeatureDetector( 5/*delta*/, 60/*min_area*/, 14400/*_max_area*/, 0.25f/*max_variation*/,
0.2/*min_diversity*/, 200/*max_evolution*/, 1.01/*area_threshold*/, 0.003/*min_margin*/,
5/*edge_blur_size*/ );
}
else if( !detectorType.compare( "GFTT" ) )
{
fd = new GoodFeaturesToTrackDetector( 1000/*maxCorners*/, 0.01/*qualityLevel*/, 1./*minDistance*/,
3/*int _blockSize*/, true/*useHarrisDetector*/, 0.04/*k*/ );
}
else
fd = 0;
return fd;
}
DescriptorExtractor* createDescExtractor( const string& descriptorType )
{
DescriptorExtractor* de = 0;
if( !descriptorType.compare( "CALONDER" ) )
{
assert(0);
//de = new CalonderDescriptorExtractor<float>("");
}
else if( !descriptorType.compare( "SURF" ) )
{
de = new SurfDescriptorExtractor( 3/*octaves*/, 4/*octave_layers*/, false/*extended*/ );
}
else
de = 0;
return de;
}
DescriptorMatcher* createDescMatcher( const string& matherType = string() )
{
return new BruteForceMatcher<L2<float> >();
}
const string DETECTOR_TYPE_STR = "detector_type";
const string DESCRIPTOR_TYPE_STR = "descriptor_type";
const string winName = "correspondences";
void iter( Ptr<FeatureDetector> detector, Ptr<DescriptorExtractor> descriptor,
const Mat& img1, float maxDist, Mat& transfMtr, RNG* rng = 0 )
{
if( transfMtr.empty() )
transfMtr = Mat::eye(3, 3, CV_32FC1);
if( rng )
{
transfMtr.at<float>(0,0) = rng->uniform( 0.7f, 1.3f);
transfMtr.at<float>(0,1) = rng->uniform(-0.2f, 0.2f);
transfMtr.at<float>(0,2) = rng->uniform(-0.1f, 0.1f)*img1.cols;
transfMtr.at<float>(1,0) = rng->uniform(-0.2f, 0.2f);
transfMtr.at<float>(1,1) = rng->uniform( 0.7f, 1.3f);
transfMtr.at<float>(1,2) = rng->uniform(-0.1f, 0.3f)*img1.rows;
transfMtr.at<float>(2,0) = rng->uniform( -1e-4f, 1e-4f);
transfMtr.at<float>(2,1) = rng->uniform( -1e-4f, 1e-4f);
transfMtr.at<float>(2,2) = rng->uniform( 0.7f, 1.3f);
}
Mat img2; warpPerspective( img1, img2, transfMtr, img1.size() );
cout << endl << "< Extracting keypoints... ";
vector<KeyPoint> keypoints1, keypoints2;
detector->detect( img1, keypoints1 );
detector->detect( img2, keypoints2 );
cout << keypoints1.size() << " from first image and " << keypoints2.size() << " from second image >" << endl;
if( keypoints1.empty() || keypoints2.empty() )
cout << "end" << endl;
cout << "< Computing descriptors... ";
Mat descs1, descs2;
if( keypoints1.size()>0 && keypoints2.size()>0 )
{
descriptor->compute( img1, keypoints1, descs1 );
descriptor->compute( img2, keypoints2, descs2 );
}
cout << ">" << endl;
cout << "< Matching keypoints by descriptors... ";
vector<int> matches;
vector<double> distances;
Ptr<DescriptorMatcher> matcher = createDescMatcher();
matcher->add( descs2 );
matcher->match( descs1, matches, &distances );
cout << ">" << endl;
// TODO time
Mat drawImg;
drawCorrespondences( img1, img2, transfMtr, keypoints1, keypoints2,
matches, distances, maxDist, drawImg );
imshow( winName, drawImg);
}
Ptr<FeatureDetector> detector;
Ptr<DescriptorExtractor> descriptor;
Mat img1;
Mat transfMtr;
RNG rng;
const float maxDistScale = 0.01f;
int maxDist;
void onMaxDistChange( int maxDist, void* )
{
float realMaxDist = maxDist*maxDistScale;
cout << "maxDist " << realMaxDist << endl;
iter( detector, descriptor, img1, realMaxDist, transfMtr );
}
int main(int argc, char** argv)
{
if( argc != 4 )
{
cout << "Format:" << endl;
cout << "./" << argv[0] << " [detector_type] [descriptor_type] [image]" << endl;
return 0;
}
cout << "< Creating detector, descriptor and matcher... ";
detector = createDetector(argv[1]);
descriptor = createDescExtractor(argv[2]);
//Ptr<DescriptorMatcher> matcher = createDescMatcher(argv[3]);
cout << ">" << endl;
if( detector.empty() || descriptor.empty()/* || matcher.empty() */ )
{
cout << "Can not create detector or descriptor or matcher of given types" << endl;
return 0;
}
cout << "< Reading the image... ";
img1 = imread( argv[3], CV_LOAD_IMAGE_GRAYSCALE);
cout << ">" << endl;
if( img1.empty() )
{
cout << "Can not read image" << endl;
return 0;
}
namedWindow(winName, 1);
maxDist = 12;
createTrackbar( "maxDist", winName, &maxDist, 100, onMaxDistChange );
onMaxDistChange(maxDist, 0);
for(;;)
{
char c = (char)cvWaitKey(0);
if( c == '\x1b' ) // esc
{
cout << "Exiting ..." << endl;
return 0;
}
else if( c == 'n' )
iter(detector, descriptor, img1, maxDist*maxDistScale, transfMtr, &rng);
}
waitKey(0);
}

139
samples/c/dft.c Normal file
View File

@@ -0,0 +1,139 @@
#define CV_NO_BACKWARD_COMPATIBILITY
#include <cxcore.h>
#include <cv.h>
#include <highgui.h>
// Rearrange the quadrants of Fourier image so that the origin is at
// the image center
// src & dst arrays of equal size & type
void cvShiftDFT(CvArr * src_arr, CvArr * dst_arr )
{
CvMat * tmp=0;
CvMat q1stub, q2stub;
CvMat q3stub, q4stub;
CvMat d1stub, d2stub;
CvMat d3stub, d4stub;
CvMat * q1, * q2, * q3, * q4;
CvMat * d1, * d2, * d3, * d4;
CvSize size = cvGetSize(src_arr);
CvSize dst_size = cvGetSize(dst_arr);
int cx, cy;
if(dst_size.width != size.width ||
dst_size.height != size.height){
cvError( CV_StsUnmatchedSizes, "cvShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__ );
}
if(src_arr==dst_arr){
tmp = cvCreateMat(size.height/2, size.width/2, cvGetElemType(src_arr));
}
cx = size.width/2;
cy = size.height/2; // image center
q1 = cvGetSubRect( src_arr, &q1stub, cvRect(0,0,cx, cy) );
q2 = cvGetSubRect( src_arr, &q2stub, cvRect(cx,0,cx,cy) );
q3 = cvGetSubRect( src_arr, &q3stub, cvRect(cx,cy,cx,cy) );
q4 = cvGetSubRect( src_arr, &q4stub, cvRect(0,cy,cx,cy) );
d1 = cvGetSubRect( src_arr, &d1stub, cvRect(0,0,cx,cy) );
d2 = cvGetSubRect( src_arr, &d2stub, cvRect(cx,0,cx,cy) );
d3 = cvGetSubRect( src_arr, &d3stub, cvRect(cx,cy,cx,cy) );
d4 = cvGetSubRect( src_arr, &d4stub, cvRect(0,cy,cx,cy) );
if(src_arr!=dst_arr){
if( !CV_ARE_TYPES_EQ( q1, d1 )){
cvError( CV_StsUnmatchedFormats, "cvShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__ );
}
cvCopy(q3, d1, 0);
cvCopy(q4, d2, 0);
cvCopy(q1, d3, 0);
cvCopy(q2, d4, 0);
}
else{
cvCopy(q3, tmp, 0);
cvCopy(q1, q3, 0);
cvCopy(tmp, q1, 0);
cvCopy(q4, tmp, 0);
cvCopy(q2, q4, 0);
cvCopy(tmp, q2, 0);
}
}
int main(int argc, char ** argv)
{
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
IplImage * im;
IplImage * realInput;
IplImage * imaginaryInput;
IplImage * complexInput;
int dft_M, dft_N;
CvMat* dft_A, tmp;
IplImage * image_Re;
IplImage * image_Im;
double m, M;
im = cvLoadImage( filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !im )
return -1;
realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
cvScale(im, realInput, 1.0, 0.0);
cvZero(imaginaryInput);
cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);
dft_M = cvGetOptimalDFTSize( im->height - 1 );
dft_N = cvGetOptimalDFTSize( im->width - 1 );
dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
// copy A to dft_A and pad dft_A with zeros
cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
cvCopy( complexInput, &tmp, NULL );
if( dft_A->cols > im->width )
{
cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
cvZero( &tmp );
}
// no need to pad bottom part of dft_A with zeros because of
// use nonzero_rows parameter in cvDFT() call below
cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );
cvNamedWindow("win", 0);
cvNamedWindow("magnitude", 0);
cvShowImage("win", im);
// Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, 0, 0 );
// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, NULL);
cvPow( image_Re, image_Re, 0.5 );
// Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
cvLog( image_Re, image_Re ); // log(1 + Mag)
// Rearrange the quadrants of Fourier image so that the origin is at
// the image center
cvShiftDFT( image_Re, image_Re );
cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
cvShowImage("magnitude", image_Re);
cvWaitKey(-1);
return 0;
}

189
samples/c/distrans.c Normal file
View File

@@ -0,0 +1,189 @@
#define CV_NO_BACKWARD_COMPATIBILITY
#ifdef _CH_
#pragma package <opencv>
#endif
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
char wndname[] = "Distance transform";
char tbarname[] = "Threshold";
int mask_size = CV_DIST_MASK_5;
int build_voronoi = 0;
int edge_thresh = 100;
int dist_type = CV_DIST_L1;
// The output and temporary images
IplImage* dist = 0;
IplImage* dist8u1 = 0;
IplImage* dist8u2 = 0;
IplImage* dist8u = 0;
IplImage* dist32s = 0;
IplImage* gray = 0;
IplImage* edge = 0;
IplImage* labels = 0;
// threshold trackbar callback
void on_trackbar( int dummy )
{
static const uchar colors[][3] =
{
{0,0,0},
{255,0,0},
{255,128,0},
{255,255,0},
{0,255,0},
{0,128,255},
{0,255,255},
{0,0,255},
{255,0,255}
};
int msize = mask_size;
int _dist_type = build_voronoi ? CV_DIST_L2 : dist_type;
cvThreshold( gray, edge, (float)edge_thresh, (float)edge_thresh, CV_THRESH_BINARY );
if( build_voronoi )
msize = CV_DIST_MASK_5;
if( _dist_type == CV_DIST_L1 )
{
cvDistTransform( edge, edge, _dist_type, msize, NULL, NULL );
cvConvert( edge, dist );
}
else
cvDistTransform( edge, dist, _dist_type, msize, NULL, build_voronoi ? labels : NULL );
if( !build_voronoi )
{
// begin "painting" the distance transform result
cvConvertScale( dist, dist, 5000.0, 0 );
cvPow( dist, dist, 0.5 );
cvConvertScale( dist, dist32s, 1.0, 0.5 );
cvAndS( dist32s, cvScalarAll(255), dist32s, 0 );
cvConvertScale( dist32s, dist8u1, 1, 0 );
cvConvertScale( dist32s, dist32s, -1, 0 );
cvAddS( dist32s, cvScalarAll(255), dist32s, 0 );
cvConvertScale( dist32s, dist8u2, 1, 0 );
cvMerge( dist8u1, dist8u2, dist8u2, 0, dist8u );
// end "painting" the distance transform result
}
else
{
int i, j;
for( i = 0; i < labels->height; i++ )
{
int* ll = (int*)(labels->imageData + i*labels->widthStep);
float* dd = (float*)(dist->imageData + i*dist->widthStep);
uchar* d = (uchar*)(dist8u->imageData + i*dist8u->widthStep);
for( j = 0; j < labels->width; j++ )
{
int idx = ll[j] == 0 || dd[j] == 0 ? 0 : (ll[j]-1)%8 + 1;
int b = cvRound(colors[idx][0]);
int g = cvRound(colors[idx][1]);
int r = cvRound(colors[idx][2]);
d[j*3] = (uchar)b;
d[j*3+1] = (uchar)g;
d[j*3+2] = (uchar)r;
}
}
}
cvShowImage( wndname, dist8u );
}
int main( int argc, char** argv )
{
char* filename = argc == 2 ? argv[1] : (char*)"stuff.jpg";
if( (gray = cvLoadImage( filename, 0 )) == 0 )
return -1;
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tC - use C/Inf metric\n"
"\tL1 - use L1 metric\n"
"\tL2 - use L2 metric\n"
"\t3 - use 3x3 mask\n"
"\t5 - use 5x5 mask\n"
"\t0 - use precise distance transform\n"
"\tv - switch Voronoi diagram mode on/off\n"
"\tSPACE - loop through all the modes\n" );
dist = cvCreateImage( cvGetSize(gray), IPL_DEPTH_32F, 1 );
dist8u1 = cvCloneImage( gray );
dist8u2 = cvCloneImage( gray );
dist8u = cvCreateImage( cvGetSize(gray), IPL_DEPTH_8U, 3 );
dist32s = cvCreateImage( cvGetSize(gray), IPL_DEPTH_32S, 1 );
edge = cvCloneImage( gray );
labels = cvCreateImage( cvGetSize(gray), IPL_DEPTH_32S, 1 );
cvNamedWindow( wndname, 1 );
cvCreateTrackbar( tbarname, wndname, &edge_thresh, 255, on_trackbar );
for(;;)
{
int c;
// Call to update the view
on_trackbar(0);
c = cvWaitKey(0);
if( (char)c == 27 )
break;
if( (char)c == 'c' || (char)c == 'C' )
dist_type = CV_DIST_C;
else if( (char)c == '1' )
dist_type = CV_DIST_L1;
else if( (char)c == '2' )
dist_type = CV_DIST_L2;
else if( (char)c == '3' )
mask_size = CV_DIST_MASK_3;
else if( (char)c == '5' )
mask_size = CV_DIST_MASK_5;
else if( (char)c == '0' )
mask_size = CV_DIST_MASK_PRECISE;
else if( (char)c == 'v' )
build_voronoi ^= 1;
else if( (char)c == ' ' )
{
if( build_voronoi )
{
build_voronoi = 0;
mask_size = CV_DIST_MASK_3;
dist_type = CV_DIST_C;
}
else if( dist_type == CV_DIST_C )
dist_type = CV_DIST_L1;
else if( dist_type == CV_DIST_L1 )
dist_type = CV_DIST_L2;
else if( mask_size == CV_DIST_MASK_3 )
mask_size = CV_DIST_MASK_5;
else if( mask_size == CV_DIST_MASK_5 )
mask_size = CV_DIST_MASK_PRECISE;
else if( mask_size == CV_DIST_MASK_PRECISE )
build_voronoi = 1;
}
}
cvReleaseImage( &gray );
cvReleaseImage( &edge );
cvReleaseImage( &dist );
cvReleaseImage( &dist8u );
cvReleaseImage( &dist8u1 );
cvReleaseImage( &dist8u2 );
cvReleaseImage( &dist32s );
cvReleaseImage( &labels );
cvDestroyWindow( wndname );
return 0;
}

187
samples/c/drawing.c Normal file
View File

@@ -0,0 +1,187 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdlib.h>
#include <stdio.h>
#endif
#define NUMBER 100
#define DELAY 5
char wndname[] = "Drawing Demo";
CvScalar random_color(CvRNG* rng)
{
int icolor = cvRandInt(rng);
return CV_RGB(icolor&255, (icolor>>8)&255, (icolor>>16)&255);
}
int main( int argc, char** argv )
{
int line_type = CV_AA; // change it to 8 to see non-antialiased graphics
int i;
CvPoint pt1,pt2;
double angle;
CvSize sz;
CvPoint ptt[6];
CvPoint* pt[2];
int arr[2];
CvFont font;
CvRNG rng;
int width = 1000, height = 700;
int width3 = width*3, height3 = height*3;
CvSize text_size;
int ymin = 0;
// Load the source image
IplImage* image = cvCreateImage( cvSize(width,height), 8, 3 );
IplImage* image2;
// Create a window
cvNamedWindow(wndname, 1 );
cvZero( image );
cvShowImage(wndname,image);
cvWaitKey(DELAY);
rng = cvRNG((unsigned)-1);
pt[0] = &(ptt[0]);
pt[1] = &(ptt[3]);
arr[0] = 3;
arr[1] = 3;
for (i = 0; i< NUMBER; i++)
{
pt1.x=cvRandInt(&rng) % width3 - width;
pt1.y=cvRandInt(&rng) % height3 - height;
pt2.x=cvRandInt(&rng) % width3 - width;
pt2.y=cvRandInt(&rng) % height3 - height;
cvLine( image, pt1, pt2, random_color(&rng), cvRandInt(&rng)%10, line_type, 0 );
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
for (i = 0; i< NUMBER; i++)
{
pt1.x=cvRandInt(&rng) % width3 - width;
pt1.y=cvRandInt(&rng) % height3 - height;
pt2.x=cvRandInt(&rng) % width3 - width;
pt2.y=cvRandInt(&rng) % height3 - height;
cvRectangle( image,pt1, pt2, random_color(&rng), cvRandInt(&rng)%10-1, line_type, 0 );
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
for (i = 0; i< NUMBER; i++)
{
pt1.x=cvRandInt(&rng) % width3 - width;
pt1.y=cvRandInt(&rng) % height3 - height;
sz.width =cvRandInt(&rng)%200;
sz.height=cvRandInt(&rng)%200;
angle = (cvRandInt(&rng)%1000)*0.180;
cvEllipse( image, pt1, sz, angle, angle - 100, angle + 200,
random_color(&rng), cvRandInt(&rng)%10-1, line_type, 0 );
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
for (i = 0; i< NUMBER; i++)
{
pt[0][0].x=cvRandInt(&rng) % width3 - width;
pt[0][0].y=cvRandInt(&rng) % height3 - height;
pt[0][1].x=cvRandInt(&rng) % width3 - width;
pt[0][1].y=cvRandInt(&rng) % height3 - height;
pt[0][2].x=cvRandInt(&rng) % width3 - width;
pt[0][2].y=cvRandInt(&rng) % height3 - height;
pt[1][0].x=cvRandInt(&rng) % width3 - width;
pt[1][0].y=cvRandInt(&rng) % height3 - height;
pt[1][1].x=cvRandInt(&rng) % width3 - width;
pt[1][1].y=cvRandInt(&rng) % height3 - height;
pt[1][2].x=cvRandInt(&rng) % width3 - width;
pt[1][2].y=cvRandInt(&rng) % height3 - height;
cvPolyLine( image, pt, arr, 2, 1, random_color(&rng), cvRandInt(&rng)%10, line_type, 0 );
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
for (i = 0; i< NUMBER; i++)
{
pt[0][0].x=cvRandInt(&rng) % width3 - width;
pt[0][0].y=cvRandInt(&rng) % height3 - height;
pt[0][1].x=cvRandInt(&rng) % width3 - width;
pt[0][1].y=cvRandInt(&rng) % height3 - height;
pt[0][2].x=cvRandInt(&rng) % width3 - width;
pt[0][2].y=cvRandInt(&rng) % height3 - height;
pt[1][0].x=cvRandInt(&rng) % width3 - width;
pt[1][0].y=cvRandInt(&rng) % height3 - height;
pt[1][1].x=cvRandInt(&rng) % width3 - width;
pt[1][1].y=cvRandInt(&rng) % height3 - height;
pt[1][2].x=cvRandInt(&rng) % width3 - width;
pt[1][2].y=cvRandInt(&rng) % height3 - height;
cvFillPoly( image, pt, arr, 2, random_color(&rng), line_type, 0 );
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
for (i = 0; i< NUMBER; i++)
{
pt1.x=cvRandInt(&rng) % width3 - width;
pt1.y=cvRandInt(&rng) % height3 - height;
cvCircle( image, pt1, cvRandInt(&rng)%300, random_color(&rng),
cvRandInt(&rng)%10-1, line_type, 0 );
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
for (i = 1; i< NUMBER; i++)
{
pt1.x=cvRandInt(&rng) % width3 - width;
pt1.y=cvRandInt(&rng) % height3 - height;
cvInitFont( &font, cvRandInt(&rng) % 8,
(cvRandInt(&rng)%100)*0.05+0.1, (cvRandInt(&rng)%100)*0.05+0.1,
(cvRandInt(&rng)%5)*0.1, cvRound(cvRandInt(&rng)%10), line_type );
cvPutText( image, "Testing text rendering!", pt1, &font, random_color(&rng));
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX, 3, 3, 0.0, 5, line_type );
cvGetTextSize( "OpenCV forever!", &font, &text_size, &ymin );
pt1.x = (width - text_size.width)/2;
pt1.y = (height + text_size.height)/2;
image2 = cvCloneImage(image);
for( i = 0; i < 255; i++ )
{
cvSubS( image2, cvScalarAll(i), image, 0 );
cvPutText( image, "OpenCV forever!", pt1, &font, CV_RGB(255,i,i));
cvShowImage(wndname,image);
if(cvWaitKey(DELAY) >= 0) return 0;
}
// Wait for a key stroke; the same function arranges events processing
cvWaitKey(0);
cvReleaseImage(&image);
cvReleaseImage(&image2);
cvDestroyWindow(wndname);
return 0;
}
#ifdef _EiC
main(1,"drawing.c");
#endif

70
samples/c/edge.c Normal file
View File

@@ -0,0 +1,70 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#endif
char wndname[] = "Edge";
char tbarname[] = "Threshold";
int edge_thresh = 1;
IplImage *image = 0, *cedge = 0, *gray = 0, *edge = 0;
// define a trackbar callback
void on_trackbar(int h)
{
cvSmooth( gray, edge, CV_BLUR, 3, 3, 0, 0 );
cvNot( gray, edge );
// Run the edge detector on grayscale
cvCanny(gray, edge, (float)edge_thresh, (float)edge_thresh*3, 3);
cvZero( cedge );
// copy edge points
cvCopy( image, cedge, edge );
cvShowImage(wndname, cedge);
}
int main( int argc, char** argv )
{
char* filename = argc == 2 ? argv[1] : (char*)"fruits.jpg";
if( (image = cvLoadImage( filename, 1)) == 0 )
return -1;
// Create the output image
cedge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 3);
// Convert to grayscale
gray = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
edge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
cvCvtColor(image, gray, CV_BGR2GRAY);
// Create a window
cvNamedWindow(wndname, 1);
// create a toolbar
cvCreateTrackbar(tbarname, wndname, &edge_thresh, 100, on_trackbar);
// Show the image
on_trackbar(0);
// Wait for a key stroke; the same function arranges events processing
cvWaitKey(0);
cvReleaseImage(&image);
cvReleaseImage(&gray);
cvReleaseImage(&edge);
cvDestroyWindow(wndname);
return 0;
}
#ifdef _EiC
main(1,"edge.c");
#endif

View File

@@ -0,0 +1,18 @@
PROJECT(opencv_example)
CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
if(COMMAND cmake_policy)
cmake_policy(SET CMP0003 NEW)
endif(COMMAND cmake_policy)
FIND_PACKAGE( OpenCV REQUIRED )
# Declare the target (an executable)
ADD_EXECUTABLE(opencv_example minarea.c)
TARGET_LINK_LIBRARIES(opencv_example ${OpenCV_LIBS})
#MESSAGE(STATUS "OpenCV_LIBS: ${OpenCV_LIBS}")

View File

@@ -0,0 +1,32 @@
Example for CMake build system.
Compile OpenCV with cmake, preferently in an off-tree build, for example:
$ mkdir opencv-release
$ cd opencv-release
$ cmake <OPENCV_SRC_PATH>
$ make
And, *only optionally*, install it with.
$ sudo make install
Then create the binary directory for the example with:
$ mkdir example-release
$ cd example-release
Then, if "make install" have been executed, directly running
$ cmake <OPENCV_SRC_PATH>/samples/c/example_cmake/
will detect the "OpenCVConfig.cmake" file and the project is ready to compile.
If "make install" has not been executed, you'll have to manually pick the opencv
binary directory (Under Windows CMake may remember the correct directory). Open
the CMake gui with:
$ cmake-gui <OPENCV_SRC_PATH>/samples/c/example_cmake/
And pick the correct value for OpenCV_DIR.

107
samples/c/example_cmake/minarea.c Executable file
View File

@@ -0,0 +1,107 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdlib.h>
#endif
#define ARRAY 1
int main( int argc, char** argv )
{
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
#if !ARRAY
CvMemStorage* storage = cvCreateMemStorage(0);
#endif
cvNamedWindow( "rect & circle", 1 );
for(;;)
{
char key;
int i, count = rand()%100 + 1;
CvPoint pt0, pt;
CvBox2D box;
CvPoint2D32f box_vtx[4];
CvPoint2D32f center;
CvPoint icenter;
float radius;
#if !ARRAY
CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
sizeof(CvPoint), storage );
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
cvSeqPush( ptseq, &pt0 );
}
#ifndef _EiC /* unfortunately, here EiC crashes */
box = cvMinAreaRect2( ptseq, 0 );
#endif
cvMinEnclosingCircle( ptseq, &center, &radius );
#else
CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
CvMat pointMat = cvMat( 1, count, CV_32SC2, points );
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
points[i] = pt0;
}
#ifndef _EiC
box = cvMinAreaRect2( &pointMat, 0 );
#endif
cvMinEnclosingCircle( &pointMat, &center, &radius );
#endif
cvBoxPoints( box, box_vtx );
cvZero( img );
for( i = 0; i < count; i++ )
{
#if !ARRAY
pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i );
#else
pt0 = points[i];
#endif
cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 );
}
#ifndef _EiC
pt0.x = cvRound(box_vtx[3].x);
pt0.y = cvRound(box_vtx[3].y);
for( i = 0; i < 4; i++ )
{
pt.x = cvRound(box_vtx[i].x);
pt.y = cvRound(box_vtx[i].y);
cvLine(img, pt0, pt, CV_RGB(0, 255, 0), 1, CV_AA, 0);
pt0 = pt;
}
#endif
icenter.x = cvRound(center.x);
icenter.y = cvRound(center.y);
cvCircle( img, icenter, cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 );
cvShowImage( "rect & circle", img );
key = (char) cvWaitKey(0);
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
#if !ARRAY
cvClearMemStorage( storage );
#else
free( points );
#endif
}
cvDestroyWindow( "rect & circle" );
return 0;
}
#ifdef _EiC
main(1,"convexhull.c");
#endif

1
samples/c/facedetect.cmd Executable file
View File

@@ -0,0 +1 @@
facedetect --cascade="../../data/haarcascades/haarcascade_frontalface_alt.xml" --nested-cascade="../../data/haarcascades/haarcascade_eye.xml" --scale=1.3 %1

213
samples/c/facedetect.cpp Normal file
View File

@@ -0,0 +1,213 @@
#define CV_NO_BACKWARD_COMPATIBILITY
#include "cv.h"
#include "highgui.h"
#include <iostream>
#include <cstdio>
#ifdef _EiC
#define WIN32
#endif
using namespace std;
using namespace cv;
void detectAndDraw( Mat& img,
CascadeClassifier& cascade, CascadeClassifier& nestedCascade,
double scale);
String cascadeName =
"../../data/haarcascades/haarcascade_frontalface_alt.xml";
String nestedCascadeName =
"../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";
int main( int argc, const char** argv )
{
CvCapture* capture = 0;
Mat frame, frameCopy, image;
const String scaleOpt = "--scale=";
size_t scaleOptLen = scaleOpt.length();
const String cascadeOpt = "--cascade=";
size_t cascadeOptLen = cascadeOpt.length();
const String nestedCascadeOpt = "--nested-cascade";
size_t nestedCascadeOptLen = nestedCascadeOpt.length();
String inputName;
CascadeClassifier cascade, nestedCascade;
double scale = 1;
for( int i = 1; i < argc; i++ )
{
if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
cascadeName.assign( argv[i] + cascadeOptLen );
else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
{
if( argv[i][nestedCascadeOpt.length()] == '=' )
nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
if( !nestedCascade.load( nestedCascadeName ) )
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
}
else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
{
if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
scale = 1;
}
else if( argv[i][0] == '-' )
{
cerr << "WARNING: Unknown option %s" << argv[i] << endl;
}
else
inputName.assign( argv[i] );
}
if( !cascade.load( cascadeName ) )
{
cerr << "ERROR: Could not load classifier cascade" << endl;
cerr << "Usage: facedetect [--cascade=\"<cascade_path>\"]\n"
" [--nested-cascade[=\"nested_cascade_path\"]]\n"
" [--scale[=<image scale>\n"
" [filename|camera_index]\n" ;
return -1;
}
if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
else if( inputName.size() )
{
image = imread( inputName, 1 );
if( image.empty() )
capture = cvCaptureFromAVI( inputName.c_str() );
}
else
image = imread( "lena.jpg", 1 );
cvNamedWindow( "result", 1 );
if( capture )
{
for(;;)
{
IplImage* iplImg = cvQueryFrame( capture );
frame = iplImg;
if( frame.empty() )
break;
if( iplImg->origin == IPL_ORIGIN_TL )
frame.copyTo( frameCopy );
else
flip( frame, frameCopy, 0 );
detectAndDraw( frameCopy, cascade, nestedCascade, scale );
if( waitKey( 10 ) >= 0 )
goto _cleanup_;
}
waitKey(0);
_cleanup_:
cvReleaseCapture( &capture );
}
else
{
if( !image.empty() )
{
detectAndDraw( image, cascade, nestedCascade, scale );
waitKey(0);
}
else if( !inputName.empty() )
{
/* assume it is a text file containing the
list of the image filenames to be processed - one per line */
FILE* f = fopen( inputName.c_str(), "rt" );
if( f )
{
char buf[1000+1];
while( fgets( buf, 1000, f ) )
{
int len = (int)strlen(buf), c;
while( len > 0 && isspace(buf[len-1]) )
len--;
buf[len] = '\0';
cout << "file " << buf << endl;
image = imread( buf, 1 );
if( !image.empty() )
{
detectAndDraw( image, cascade, nestedCascade, scale );
c = waitKey(0);
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
}
fclose(f);
}
}
}
cvDestroyWindow("result");
return 0;
}
void detectAndDraw( Mat& img,
CascadeClassifier& cascade, CascadeClassifier& nestedCascade,
double scale)
{
int i = 0;
double t = 0;
vector<Rect> faces;
const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(0,128,255),
CV_RGB(0,255,255),
CV_RGB(0,255,0),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)} ;
Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
cvtColor( img, gray, CV_BGR2GRAY );
resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
equalizeHist( smallImg, smallImg );
t = (double)cvGetTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
//|CV_HAAR_FIND_BIGGEST_OBJECT
//|CV_HAAR_DO_ROUGH_SEARCH
|CV_HAAR_SCALE_IMAGE
,
Size(30, 30) );
t = (double)cvGetTickCount() - t;
printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
{
Mat smallImgROI;
vector<Rect> nestedObjects;
Point center;
Scalar color = colors[i%8];
int radius;
center.x = cvRound((r->x + r->width*0.5)*scale);
center.y = cvRound((r->y + r->height*0.5)*scale);
radius = cvRound((r->width + r->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
if( nestedCascade.empty() )
continue;
smallImgROI = smallImg(*r);
nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
1.1, 2, 0
//|CV_HAAR_FIND_BIGGEST_OBJECT
//|CV_HAAR_DO_ROUGH_SEARCH
//|CV_HAAR_DO_CANNY_PRUNING
|CV_HAAR_SCALE_IMAGE
,
Size(30, 30) );
for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
{
center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
radius = cvRound((nr->width + nr->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
}
cv::imshow( "result", img );
}

48
samples/c/fback.cpp Normal file
View File

@@ -0,0 +1,48 @@
#undef _GLIBCXX_DEBUG
#include "cv.h"
#include "highgui.h"
using namespace cv;
void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step,
double scale, const Scalar& color)
{
for(int y = 0; y < cflowmap.rows; y += step)
for(int x = 0; x < cflowmap.cols; x += step)
{
const Point2f& fxy = flow.at<Point2f>(y, x);
line(cflowmap, Point(x,y), Point(cvRound(x+fxy.x), cvRound(y+fxy.y)),
color);
circle(cflowmap, Point(x,y), 2, color, -1);
}
}
int main(int argc, char** argv)
{
VideoCapture cap(0);
if( !cap.isOpened() )
return -1;
Mat prevgray, gray, flow, cflow, frame;
namedWindow("flow", 1);
for(;;)
{
cap >> frame;
cvtColor(frame, gray, CV_BGR2GRAY);
if( prevgray.data )
{
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvtColor(prevgray, cflow, CV_GRAY2BGR);
drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0));
imshow("flow", cflow);
}
if(waitKey(30)>=0)
break;
std::swap(prevgray, gray);
}
return 0;
}

61
samples/c/fback_c.c Normal file
View File

@@ -0,0 +1,61 @@
#undef _GLIBCXX_DEBUG
#include "cv.h"
#include "highgui.h"
void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
double scale, CvScalar color)
{
int x, y;
for( y = 0; y < cflowmap->rows; y += step)
for( x = 0; x < cflowmap->cols; x += step)
{
CvPoint2D32f fxy = CV_MAT_ELEM(*flow, CvPoint2D32f, y, x);
cvLine(cflowmap, cvPoint(x,y), cvPoint(cvRound(x+fxy.x), cvRound(y+fxy.y)),
color, 1, 8, 0);
cvCircle(cflowmap, cvPoint(x,y), 2, color, -1, 8, 0);
}
}
int main(int argc, char** argv)
{
CvCapture* capture = cvCreateCameraCapture(0);
CvMat* prevgray = 0, *gray = 0, *flow = 0, *cflow = 0;
if( !capture )
return -1;
cvNamedWindow("flow", 1);
for(;;)
{
int firstFrame = gray == 0;
IplImage* frame = cvQueryFrame(capture);
if(!frame)
break;
if(!gray)
{
gray = cvCreateMat(frame->height, frame->width, CV_8UC1);
prevgray = cvCreateMat(gray->rows, gray->cols, gray->type);
flow = cvCreateMat(gray->rows, gray->cols, CV_32FC2);
cflow = cvCreateMat(gray->rows, gray->cols, CV_8UC3);
}
cvCvtColor(frame, gray, CV_BGR2GRAY);
if( !firstFrame )
{
cvCalcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvCvtColor(prevgray, cflow, CV_GRAY2BGR);
drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0));
cvShowImage("flow", cflow);
}
if(cvWaitKey(30)>=0)
break;
{
CvMat* temp;
CV_SWAP(prevgray, gray, temp);
}
}
cvReleaseCapture(&capture);
return 0;
}

187
samples/c/ffilldemo.c Normal file
View File

@@ -0,0 +1,187 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <stdlib.h>
#endif
IplImage* color_img0;
IplImage* mask;
IplImage* color_img;
IplImage* gray_img0 = NULL;
IplImage* gray_img = NULL;
int ffill_case = 1;
int lo_diff = 20, up_diff = 20;
int connectivity = 4;
int is_color = 1;
int is_mask = 0;
int new_mask_val = 255;
void on_mouse( int event, int x, int y, int flags, void* param )
{
if( !color_img )
return;
switch( event )
{
case CV_EVENT_LBUTTONDOWN:
{
CvPoint seed = cvPoint(x,y);
int lo = ffill_case == 0 ? 0 : lo_diff;
int up = ffill_case == 0 ? 0 : up_diff;
int flags = connectivity + (new_mask_val << 8) +
(ffill_case == 1 ? CV_FLOODFILL_FIXED_RANGE : 0);
int b = rand() & 255, g = rand() & 255, r = rand() & 255;
CvConnectedComp comp;
if( is_mask )
cvThreshold( mask, mask, 1, 128, CV_THRESH_BINARY );
if( is_color )
{
CvScalar color = CV_RGB( r, g, b );
cvFloodFill( color_img, seed, color, CV_RGB( lo, lo, lo ),
CV_RGB( up, up, up ), &comp, flags, is_mask ? mask : NULL );
cvShowImage( "image", color_img );
}
else
{
CvScalar brightness = cvRealScalar((r*2 + g*7 + b + 5)/10);
cvFloodFill( gray_img, seed, brightness, cvRealScalar(lo),
cvRealScalar(up), &comp, flags, is_mask ? mask : NULL );
cvShowImage( "image", gray_img );
}
printf("%g pixels were repainted\n", comp.area );
if( is_mask )
cvShowImage( "mask", mask );
}
break;
}
}
int main( int argc, char** argv )
{
char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
if( (color_img0 = cvLoadImage(filename,1)) == 0 )
return 0;
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tc - switch color/grayscale mode\n"
"\tm - switch mask mode\n"
"\tr - restore the original image\n"
"\ts - use null-range floodfill\n"
"\tf - use gradient floodfill with fixed(absolute) range\n"
"\tg - use gradient floodfill with floating(relative) range\n"
"\t4 - use 4-connectivity mode\n"
"\t8 - use 8-connectivity mode\n" );
color_img = cvCloneImage( color_img0 );
gray_img0 = cvCreateImage( cvSize(color_img->width, color_img->height), 8, 1 );
cvCvtColor( color_img, gray_img0, CV_BGR2GRAY );
gray_img = cvCloneImage( gray_img0 );
mask = cvCreateImage( cvSize(color_img->width + 2, color_img->height + 2), 8, 1 );
cvNamedWindow( "image", 0 );
cvCreateTrackbar( "lo_diff", "image", &lo_diff, 255, NULL );
cvCreateTrackbar( "up_diff", "image", &up_diff, 255, NULL );
cvSetMouseCallback( "image", on_mouse, 0 );
for(;;)
{
int c;
if( is_color )
cvShowImage( "image", color_img );
else
cvShowImage( "image", gray_img );
c = cvWaitKey(0);
switch( (char) c )
{
case '\x1b':
printf("Exiting ...\n");
goto exit_main;
case 'c':
if( is_color )
{
printf("Grayscale mode is set\n");
cvCvtColor( color_img, gray_img, CV_BGR2GRAY );
is_color = 0;
}
else
{
printf("Color mode is set\n");
cvCopy( color_img0, color_img, NULL );
cvZero( mask );
is_color = 1;
}
break;
case 'm':
if( is_mask )
{
cvDestroyWindow( "mask" );
is_mask = 0;
}
else
{
cvNamedWindow( "mask", 0 );
cvZero( mask );
cvShowImage( "mask", mask );
is_mask = 1;
}
break;
case 'r':
printf("Original image is restored\n");
cvCopy( color_img0, color_img, NULL );
cvCopy( gray_img0, gray_img, NULL );
cvZero( mask );
break;
case 's':
printf("Simple floodfill mode is set\n");
ffill_case = 0;
break;
case 'f':
printf("Fixed Range floodfill mode is set\n");
ffill_case = 1;
break;
case 'g':
printf("Gradient (floating range) floodfill mode is set\n");
ffill_case = 2;
break;
case '4':
printf("4-connectivity mode is set\n");
connectivity = 4;
break;
case '8':
printf("8-connectivity mode is set\n");
connectivity = 8;
break;
}
}
exit_main:
cvDestroyWindow( "test" );
cvReleaseImage( &gray_img );
cvReleaseImage( &gray_img0 );
cvReleaseImage( &color_img );
cvReleaseImage( &color_img0 );
cvReleaseImage( &mask );
return 1;
}
#ifdef _EiC
main(1,"ffilldemo.c");
#endif

308
samples/c/find_obj.cpp Normal file
View File

@@ -0,0 +1,308 @@
/*
* A Demo to OpenCV Implementation of SURF
* Further Information Refer to "SURF: Speed-Up Robust Feature"
* Author: Liu Liu
* liuliu.1987+opencv@gmail.com
*/
#include <cv.h>
#include <highgui.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <vector>
using namespace std;
// define whether to use approximate nearest-neighbor search
#define USE_FLANN
IplImage *image = 0;
double
compareSURFDescriptors( const float* d1, const float* d2, double best, int length )
{
double total_cost = 0;
assert( length % 4 == 0 );
for( int i = 0; i < length; i += 4 )
{
double t0 = d1[i] - d2[i];
double t1 = d1[i+1] - d2[i+1];
double t2 = d1[i+2] - d2[i+2];
double t3 = d1[i+3] - d2[i+3];
total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;
if( total_cost > best )
break;
}
return total_cost;
}
int
naiveNearestNeighbor( const float* vec, int laplacian,
const CvSeq* model_keypoints,
const CvSeq* model_descriptors )
{
int length = (int)(model_descriptors->elem_size/sizeof(float));
int i, neighbor = -1;
double d, dist1 = 1e6, dist2 = 1e6;
CvSeqReader reader, kreader;
cvStartReadSeq( model_keypoints, &kreader, 0 );
cvStartReadSeq( model_descriptors, &reader, 0 );
for( i = 0; i < model_descriptors->total; i++ )
{
const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
const float* mvec = (const float*)reader.ptr;
CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
if( laplacian != kp->laplacian )
continue;
d = compareSURFDescriptors( vec, mvec, dist2, length );
if( d < dist1 )
{
dist2 = dist1;
dist1 = d;
neighbor = i;
}
else if ( d < dist2 )
dist2 = d;
}
if ( dist1 < 0.6*dist2 )
return neighbor;
return -1;
}
void
findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs )
{
int i;
CvSeqReader reader, kreader;
cvStartReadSeq( objectKeypoints, &kreader );
cvStartReadSeq( objectDescriptors, &reader );
ptpairs.clear();
for( i = 0; i < objectDescriptors->total; i++ )
{
const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
const float* descriptor = (const float*)reader.ptr;
CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
int nearest_neighbor = naiveNearestNeighbor( descriptor, kp->laplacian, imageKeypoints, imageDescriptors );
if( nearest_neighbor >= 0 )
{
ptpairs.push_back(i);
ptpairs.push_back(nearest_neighbor);
}
}
}
void
flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,
const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs )
{
int length = (int)(objectDescriptors->elem_size/sizeof(float));
cv::Mat m_object(objectDescriptors->total, length, CV_32F);
cv::Mat m_image(imageDescriptors->total, length, CV_32F);
// copy descriptors
CvSeqReader obj_reader;
float* obj_ptr = m_object.ptr<float>(0);
cvStartReadSeq( objectDescriptors, &obj_reader );
for(int i = 0; i < objectDescriptors->total; i++ )
{
const float* descriptor = (const float*)obj_reader.ptr;
CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );
memcpy(obj_ptr, descriptor, length*sizeof(float));
obj_ptr += length;
}
CvSeqReader img_reader;
float* img_ptr = m_image.ptr<float>(0);
cvStartReadSeq( imageDescriptors, &img_reader );
for(int i = 0; i < imageDescriptors->total; i++ )
{
const float* descriptor = (const float*)img_reader.ptr;
CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );
memcpy(img_ptr, descriptor, length*sizeof(float));
img_ptr += length;
}
// find nearest neighbors using FLANN
cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);
cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);
cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4)); // using 4 randomized kdtrees
flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked
int* indices_ptr = m_indices.ptr<int>(0);
float* dists_ptr = m_dists.ptr<float>(0);
for (int i=0;i<m_indices.rows;++i) {
if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {
ptpairs.push_back(i);
ptpairs.push_back(indices_ptr[2*i]);
}
}
}
/* a rough implementation for object location */
int
locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
const CvPoint src_corners[4], CvPoint dst_corners[4] )
{
double h[9];
CvMat _h = cvMat(3, 3, CV_64F, h);
vector<int> ptpairs;
vector<CvPoint2D32f> pt1, pt2;
CvMat _pt1, _pt2;
int i, n;
#ifdef USE_FLANN
flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif
n = ptpairs.size()/2;
if( n < 4 )
return 0;
pt1.resize(n);
pt2.resize(n);
for( i = 0; i < n; i++ )
{
pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;
pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
}
_pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );
_pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );
if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))
return 0;
for( i = 0; i < 4; i++ )
{
double x = src_corners[i].x, y = src_corners[i].y;
double Z = 1./(h[6]*x + h[7]*y + h[8]);
double X = (h[0]*x + h[1]*y + h[2])*Z;
double Y = (h[3]*x + h[4]*y + h[5])*Z;
dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
}
return 1;
}
int main(int argc, char** argv)
{
const char* object_filename = argc == 3 ? argv[1] : "box.png";
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
CvMemStorage* storage = cvCreateMemStorage(0);
cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1);
static CvScalar colors[] =
{
{{0,0,255}},
{{0,128,255}},
{{0,255,255}},
{{0,255,0}},
{{255,128,0}},
{{255,255,0}},
{{255,0,0}},
{{255,0,255}},
{{255,255,255}}
};
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj [<object_filename> <scene_filename>]\n",
object_filename, scene_filename );
exit(-1);
}
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
cvCvtColor( object, object_color, CV_GRAY2BGR );
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
int i;
CvSURFParams params = cvSURFParams(500, 1);
double tt = (double)cvGetTickCount();
cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
printf("Object Descriptors: %d\n", objectDescriptors->total);
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
printf("Image Descriptors: %d\n", imageDescriptors->total);
tt = (double)cvGetTickCount() - tt;
printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));
CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
CvPoint dst_corners[4];
IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );
cvCopy( object, correspond );
cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );
cvCopy( image, correspond );
cvResetImageROI( correspond );
#ifdef USE_FLANN
printf("Using approximate nearest neighbor search\n");
#endif
if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,
imageDescriptors, src_corners, dst_corners ))
{
for( i = 0; i < 4; i++ )
{
CvPoint r1 = dst_corners[i%4];
CvPoint r2 = dst_corners[(i+1)%4];
cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),
cvPoint(r2.x, r2.y+object->height ), colors[8] );
}
}
vector<int> ptpairs;
#ifdef USE_FLANN
flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif
for( i = 0; i < (int)ptpairs.size(); i += 2 )
{
CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );
CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );
cvLine( correspond, cvPointFrom32f(r1->pt),
cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );
}
cvShowImage( "Object Correspond", correspond );
for( i = 0; i < objectKeypoints->total; i++ )
{
CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );
CvPoint center;
int radius;
center.x = cvRound(r->pt.x);
center.y = cvRound(r->pt.y);
radius = cvRound(r->size*1.2/9.*2);
cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );
}
cvShowImage( "Object", object_color );
cvWaitKey(0);
cvDestroyWindow("Object");
cvDestroyWindow("Object SURF");
cvDestroyWindow("Object Correspond");
return 0;
}

View File

@@ -0,0 +1,305 @@
//Calonder descriptor sample
#include <cxcore.h>
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <vector>
using namespace std;
// Number of training points (set to -1 to use all points)
const int n_points = -1;
//Draw the border of projection of train image calculed by averaging detected correspondences
const bool draw_border = true;
void cvmSet6(CvMat* m, int row, int col, float val1, float val2, float val3, float val4, float val5, float val6)
{
cvmSet(m, row, col, val1);
cvmSet(m, row, col + 1, val2);
cvmSet(m, row, col + 2, val3);
cvmSet(m, row, col + 3, val4);
cvmSet(m, row, col + 4, val5);
cvmSet(m, row, col + 5, val6);
}
void FindAffineTransform(const vector<CvPoint>& p1, const vector<CvPoint>& p2, CvMat* affine)
{
int eq_num = 2*(int)p1.size();
CvMat* A = cvCreateMat(eq_num, 6, CV_32FC1);
CvMat* B = cvCreateMat(eq_num, 1, CV_32FC1);
CvMat* X = cvCreateMat(6, 1, CV_32FC1);
for(int i = 0; i < (int)p1.size(); i++)
{
cvmSet6(A, 2*i, 0, p1[i].x, p1[i].y, 1, 0, 0, 0);
cvmSet6(A, 2*i + 1, 0, 0, 0, 0, p1[i].x, p1[i].y, 1);
cvmSet(B, 2*i, 0, p2[i].x);
cvmSet(B, 2*i + 1, 0, p2[i].y);
}
cvSolve(A, B, X, CV_SVD);
cvmSet(affine, 0, 0, cvmGet(X, 0, 0));
cvmSet(affine, 0, 1, cvmGet(X, 1, 0));
cvmSet(affine, 0, 2, cvmGet(X, 2, 0));
cvmSet(affine, 1, 0, cvmGet(X, 3, 0));
cvmSet(affine, 1, 1, cvmGet(X, 4, 0));
cvmSet(affine, 1, 2, cvmGet(X, 5, 0));
cvReleaseMat(&A);
cvReleaseMat(&B);
cvReleaseMat(&X);
}
void MapVectorAffine(const vector<CvPoint>& p1, vector<CvPoint>& p2, CvMat* transform)
{
float a = cvmGet(transform, 0, 0);
float b = cvmGet(transform, 0, 1);
float c = cvmGet(transform, 0, 2);
float d = cvmGet(transform, 1, 0);
float e = cvmGet(transform, 1, 1);
float f = cvmGet(transform, 1, 2);
for(int i = 0; i < (int)p1.size(); i++)
{
float x = a*p1[i].x + b*p1[i].y + c;
float y = d*p1[i].x + e*p1[i].y + f;
p2.push_back(cvPoint(x, y));
}
}
float CalcAffineReprojectionError(const vector<CvPoint>& p1, const vector<CvPoint>& p2, CvMat* transform)
{
vector<CvPoint> mapped_p1;
MapVectorAffine(p1, mapped_p1, transform);
float error = 0;
for(int i = 0; i < (int)p2.size(); i++)
{
error += ((p2[i].x - mapped_p1[i].x)*(p2[i].x - mapped_p1[i].x)+(p2[i].y - mapped_p1[i].y)*(p2[i].y - mapped_p1[i].y));
}
error /= p2.size();
return error;
}
int main( int argc, char** argv )
{
printf("calonder_sample is under construction\n");
return 0;
IplImage* test_image;
IplImage* train_image;
if (argc < 3)
{
test_image = cvLoadImage("box_in_scene.png",0);
train_image = cvLoadImage("box.png ",0);
if (!test_image || !train_image)
{
printf("Usage: calonder_sample <train_image> <test_image>");
return 0;
}
}
else
{
test_image = cvLoadImage(argv[2],0);
train_image = cvLoadImage(argv[1],0);
}
if (!train_image)
{
printf("Unable to load train image\n");
return 0;
}
if (!test_image)
{
printf("Unable to load test image\n");
return 0;
}
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
CvSURFParams params = cvSURFParams(500, 1);
cvExtractSURF( test_image, 0, &imageKeypoints, &imageDescriptors, storage, params );
cvExtractSURF( train_image, 0, &objectKeypoints, &objectDescriptors, storage, params );
cv::RTreeClassifier detector;
int patch_width = cv::PATCH_SIZE;
int patch_height = cv::PATCH_SIZE;
vector<cv::BaseKeypoint> base_set;
int i=0;
CvSURFPoint* point;
for (i=0;i<(n_points > 0 ? n_points : objectKeypoints->total);i++)
{
point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,i);
base_set.push_back(cv::BaseKeypoint(point->pt.x,point->pt.y,train_image));
}
//Detector training
cv::RNG rng( cvGetTickCount() );
cv::PatchGenerator gen(0,255,2,false,0.7,1.3,-CV_PI/3,CV_PI/3,-CV_PI/3,CV_PI/3);
printf("RTree Classifier training...\n");
detector.train(base_set,rng,gen,24,cv::DEFAULT_DEPTH,2000,(int)base_set.size(),detector.DEFAULT_NUM_QUANT_BITS);
printf("Done\n");
float* signature = new float[detector.original_num_classes()];
float* best_corr;
int* best_corr_idx;
if (imageKeypoints->total > 0)
{
best_corr = new float[imageKeypoints->total];
best_corr_idx = new int[imageKeypoints->total];
}
for(i=0; i < imageKeypoints->total; i++)
{
point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,i);
int part_idx = -1;
float prob = 0.0f;
CvRect roi = cvRect((int)(point->pt.x) - patch_width/2,(int)(point->pt.y) - patch_height/2, patch_width, patch_height);
cvSetImageROI(test_image, roi);
roi = cvGetImageROI(test_image);
if(roi.width != patch_width || roi.height != patch_height)
{
best_corr_idx[i] = part_idx;
best_corr[i] = prob;
}
else
{
cvSetImageROI(test_image, roi);
IplImage* roi_image = cvCreateImage(cvSize(roi.width, roi.height), test_image->depth, test_image->nChannels);
cvCopy(test_image,roi_image);
detector.getSignature(roi_image, signature);
for (int j = 0; j< detector.original_num_classes();j++)
{
if (prob < signature[j])
{
part_idx = j;
prob = signature[j];
}
}
best_corr_idx[i] = part_idx;
best_corr[i] = prob;
if (roi_image)
cvReleaseImage(&roi_image);
}
cvResetImageROI(test_image);
}
float min_prob = 0.0f;
vector<CvPoint> object;
vector<CvPoint> features;
for (int j=0;j<objectKeypoints->total;j++)
{
float prob = 0.0f;
int idx = -1;
for (i = 0; i<imageKeypoints->total;i++)
{
if ((best_corr_idx[i]!=j)||(best_corr[i] < min_prob))
continue;
if (best_corr[i] > prob)
{
prob = best_corr[i];
idx = i;
}
}
if (idx >=0)
{
point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,j);
object.push_back(cvPoint((int)point->pt.x,(int)point->pt.y));
point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,idx);
features.push_back(cvPoint((int)point->pt.x,(int)point->pt.y));
}
}
if ((int)object.size() > 3)
{
CvMat* affine = cvCreateMat(2, 3, CV_32FC1);
FindAffineTransform(object,features,affine);
vector<CvPoint> corners;
vector<CvPoint> mapped_corners;
corners.push_back(cvPoint(0,0));
corners.push_back(cvPoint(0,train_image->height));
corners.push_back(cvPoint(train_image->width,0));
corners.push_back(cvPoint(train_image->width,train_image->height));
MapVectorAffine(corners,mapped_corners,affine);
//Drawing the result
IplImage* result = cvCreateImage(cvSize(test_image->width > train_image->width ? test_image->width : train_image->width,
train_image->height + test_image->height),
test_image->depth, test_image->nChannels);
cvSetImageROI(result,cvRect(0,0,train_image->width, train_image->height));
cvCopy(train_image,result);
cvResetImageROI(result);
cvSetImageROI(result,cvRect(0,train_image->height,test_image->width, test_image->height));
cvCopy(test_image,result);
cvResetImageROI(result);
for (int i=0;i<(int)features.size();i++)
{
cvLine(result,object[i],cvPoint(features[i].x,features[i].y+train_image->height),cvScalar(255));
}
if (draw_border)
{
cvLine(result,cvPoint(mapped_corners[0].x, mapped_corners[0].y+train_image->height),
cvPoint(mapped_corners[1].x, mapped_corners[1].y+train_image->height),cvScalar(150),3);
cvLine(result,cvPoint(mapped_corners[0].x, mapped_corners[0].y+train_image->height),
cvPoint(mapped_corners[2].x, mapped_corners[2].y+train_image->height),cvScalar(150),3);
cvLine(result,cvPoint(mapped_corners[1].x, mapped_corners[1].y+train_image->height),
cvPoint(mapped_corners[3].x, mapped_corners[3].y+train_image->height),cvScalar(150),3);
cvLine(result,cvPoint(mapped_corners[2].x, mapped_corners[2].y+train_image->height),
cvPoint(mapped_corners[3].x, mapped_corners[3].y+train_image->height),cvScalar(150),3);
}
cvSaveImage("Result.jpg",result);
cvNamedWindow("Result",0);
cvShowImage("Result",result);
cvWaitKey();
cvReleaseMat(&affine);
cvReleaseImage(&result);
}
else
{
printf("Unable to find correspondence\n");
}
if (signature)
delete[] signature;
if (best_corr)
delete[] best_corr;
cvReleaseMemStorage(&storage);
cvReleaseImage(&train_image);
cvReleaseImage(&test_image);
return 0;
}

View File

@@ -0,0 +1,139 @@
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <algorithm>
#include <iostream>
#include <vector>
using namespace cv;
int main(int argc, char** argv)
{
const char* object_filename = argc > 1 ? argv[1] : "box.png";
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
int i;
cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1);
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat image;
double imgscale = 1;
Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
if( !object.data || !image.data )
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj [<object_filename> <scene_filename>]\n",
object_filename, scene_filename );
exit(-1);
}
Size patchSize(32, 32);
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
ldetector.setVerbose(true);
PlanarObjectDetector detector;
vector<Mat> objpyr, imgpyr;
int blurKSize = 3;
double sigma = 0;
GaussianBlur(object, object, Size(blurKSize, blurKSize), sigma, sigma);
GaussianBlur(image, image, Size(blurKSize, blurKSize), sigma, sigma);
buildPyramid(object, objpyr, ldetector.nOctaves-1);
buildPyramid(image, imgpyr, ldetector.nOctaves-1);
vector<KeyPoint> objKeypoints, imgKeypoints;
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
string model_filename = format("%s_model.xml.gz", object_filename);
printf("Trying to load %s ...\n", model_filename.c_str());
FileStorage fs(model_filename, FileStorage::READ);
if( fs.isOpened() )
{
detector.read(fs.getFirstTopLevelNode());
printf("Successfully loaded %s.\n", model_filename.c_str());
}
else
{
printf("The file not found and can not be read. Let's train the model.\n");
printf("Step 1. Finding the robust keypoints ...\n");
ldetector.setVerbose(true);
ldetector.getMostStable2D(object, objKeypoints, 100, gen);
printf("Done.\nStep 2. Training ferns-based planar object detector ...\n");
detector.setVerbose(true);
detector.train(objpyr, objKeypoints, patchSize.width, 100, 11, 10000, ldetector, gen);
printf("Done.\nStep 3. Saving the model to %s ...\n", model_filename.c_str());
if( fs.open(model_filename, FileStorage::WRITE) )
detector.write(fs, "ferns_model");
}
printf("Now find the keypoints in the image, try recognize them and compute the homography matrix\n");
fs.release();
vector<Point2f> dst_corners;
Mat correspond( object.rows + image.rows, std::max(object.cols, image.cols), CV_8UC3);
correspond = Scalar(0.);
Mat part(correspond, Rect(0, 0, object.cols, object.rows));
cvtColor(object, part, CV_GRAY2BGR);
part = Mat(correspond, Rect(0, object.rows, image.cols, image.rows));
cvtColor(image, part, CV_GRAY2BGR);
vector<int> pairs;
Mat H;
double t = (double)getTickCount();
objKeypoints = detector.getModelPoints();
ldetector(imgpyr, imgKeypoints, 300);
std::cout << "Object keypoints: " << objKeypoints.size() << "\n";
std::cout << "Image keypoints: " << imgKeypoints.size() << "\n";
bool found = detector(imgpyr, imgKeypoints, H, dst_corners, &pairs);
t = (double)getTickCount() - t;
printf("%gms\n", t*1000/getTickFrequency());
if( found )
{
for( i = 0; i < 4; i++ )
{
Point r1 = dst_corners[i%4];
Point r2 = dst_corners[(i+1)%4];
line( correspond, Point(r1.x, r1.y+object.rows),
Point(r2.x, r2.y+object.rows), Scalar(0,0,255) );
}
}
for( i = 0; i < (int)pairs.size(); i += 2 )
{
line( correspond, objKeypoints[pairs[i]].pt,
imgKeypoints[pairs[i+1]].pt + Point2f(0,object.rows),
Scalar(0,255,0) );
}
imshow( "Object Correspondence", correspond );
Mat objectColor;
cvtColor(object, objectColor, CV_GRAY2BGR);
for( i = 0; i < (int)objKeypoints.size(); i++ )
{
circle( objectColor, objKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
circle( objectColor, objKeypoints[i].pt, (1 << objKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
}
Mat imageColor;
cvtColor(image, imageColor, CV_GRAY2BGR);
for( i = 0; i < (int)imgKeypoints.size(); i++ )
{
circle( imageColor, imgKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
circle( imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
}
imwrite("correspond.png", correspond );
imshow( "Object", objectColor );
imshow( "Image", imageColor );
waitKey(0);
return 0;
}

134
samples/c/fitellipse.cpp Normal file
View File

@@ -0,0 +1,134 @@
/********************************************************************************
*
*
* This program is demonstration for ellipse fitting. Program finds
* contours and approximate it by ellipses.
*
* Trackbar specify threshold parametr.
*
* White lines is contours. Red lines is fitting ellipses.
*
*
* Autor: Denis Burenkov.
*
*
*
********************************************************************************/
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#endif
int slider_pos = 70;
// Load the source image. HighGUI use.
IplImage *image02 = 0, *image03 = 0, *image04 = 0;
void process_image(int h);
int main( int argc, char** argv )
{
const char* filename = argc == 2 ? argv[1] : (char*)"stuff.jpg";
// load image and force it to be grayscale
if( (image03 = cvLoadImage(filename, 0)) == 0 )
return -1;
// Create the destination images
image02 = cvCloneImage( image03 );
image04 = cvCloneImage( image03 );
// Create windows.
cvNamedWindow("Source", 1);
cvNamedWindow("Result", 1);
// Show the image.
cvShowImage("Source", image03);
// Create toolbars. HighGUI use.
cvCreateTrackbar( "Threshold", "Result", &slider_pos, 255, process_image );
process_image(0);
// Wait for a key stroke; the same function arranges events processing
cvWaitKey(0);
cvReleaseImage(&image02);
cvReleaseImage(&image03);
cvDestroyWindow("Source");
cvDestroyWindow("Result");
return 0;
}
// Define trackbar callback functon. This function find contours,
// draw it and approximate it by ellipses.
void process_image(int h)
{
CvMemStorage* storage;
CvSeq* contour;
// Create dynamic structure and sequence.
storage = cvCreateMemStorage(0);
contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint) , storage);
// Threshold the source image. This needful for cvFindContours().
cvThreshold( image03, image02, slider_pos, 255, CV_THRESH_BINARY );
// Find all contours.
cvFindContours( image02, storage, &contour, sizeof(CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
// Clear images. IPL use.
cvZero(image02);
cvZero(image04);
// This cycle draw all contours and approximate it by ellipses.
for(;contour;contour = contour->h_next)
{
int count = contour->total; // This is number point in contour
CvPoint center;
CvSize size;
CvBox2D box;
// Number point must be more than or equal to 6 (for cvFitEllipse_32f).
if( count < 6 )
continue;
CvMat* points_f = cvCreateMat( 1, count, CV_32FC2 );
CvMat points_i = cvMat( 1, count, CV_32SC2, points_f->data.ptr );
cvCvtSeqToArray( contour, points_f->data.ptr, CV_WHOLE_SEQ );
cvConvert( &points_i, points_f );
// Fits ellipse to current contour.
box = cvFitEllipse2( points_f );
// Draw current contour.
cvDrawContours(image04,contour,CV_RGB(255,255,255),CV_RGB(255,255,255),0,1,8,cvPoint(0,0));
// Convert ellipse data from float to integer representation.
center = cvPointFrom32f(box.center);
size.width = cvRound(box.size.width*0.5);
size.height = cvRound(box.size.height*0.5);
// Draw ellipse.
cvEllipse(image04, center, size,
-box.angle, 0, 360,
CV_RGB(0,0,255), 1, CV_AA, 0);
cvReleaseMat(&points_f);
}
// Show image. HighGUI use.
cvShowImage( "Result", image04 );
}
#ifdef _EiC
main(1,"fitellipse.c");
#endif

BIN
samples/c/fruits.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

317
samples/c/grabcut.cpp Normal file
View File

@@ -0,0 +1,317 @@
#include <highgui.h>
#include <cv.h>
#include <iostream>
using namespace std;
using namespace cv;
const Scalar RED = Scalar(0,0,255);
const Scalar PINK = Scalar(230,130,255);
const Scalar BLUE = Scalar(255,0,0);
const Scalar LIGHTBLUE = Scalar(255,255,160);
const Scalar GREEN = Scalar(0,255,0);
const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;
const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY;
void getBinMask( const Mat& comMask, Mat& binMask )
{
if( comMask.empty() || comMask.type()!=CV_8UC1 )
CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );
if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )
binMask.create( comMask.size(), CV_8UC1 );
binMask = comMask & 1;
}
class GCApplication
{
public:
enum{ NOT_SET = 0, IN_PROCESS = 1, SET = 2 };
static const int radius = 2;
static const int thickness = -1;
void reset();
void setImageAndWinName( const Mat& _image, const string& _winName );
void showImage() const;
void mouseClick( int event, int x, int y, int flags, void* param );
int nextIter();
int getIterCount() const { return iterCount; }
private:
void setRectInMask();
void setLblsInMask( int flags, Point p, bool isPr );
const string* winName;
const Mat* image;
Mat mask;
Mat bgdModel, fgdModel;
uchar rectState, lblsState, prLblsState;
bool isInitialized;
Rect rect;
vector<Point> fgdPxls, bgdPxls, prFgdPxls, prBgdPxls;
int iterCount;
};
void GCApplication::reset()
{
if( !mask.empty() )
mask.setTo(Scalar::all(GC_BGD));
bgdPxls.clear(); fgdPxls.clear();
prBgdPxls.clear(); prFgdPxls.clear();
isInitialized = false;
rectState = NOT_SET;
lblsState = NOT_SET;
prLblsState = NOT_SET;
iterCount = 0;
}
void GCApplication::setImageAndWinName( const Mat& _image, const string& _winName )
{
if( _image.empty() || _winName.empty() )
return;
image = &_image;
winName = &_winName;
mask.create( image->size(), CV_8UC1);
reset();
}
void GCApplication::showImage() const
{
if( image->empty() || winName->empty() )
return;
Mat res;
Mat binMask;
if( !isInitialized )
image->copyTo( res );
else
{
getBinMask( mask, binMask );
image->copyTo( res, binMask );
}
vector<Point>::const_iterator it;
for( it = bgdPxls.begin(); it != bgdPxls.end(); ++it )
circle( res, *it, radius, BLUE, thickness );
for( it = fgdPxls.begin(); it != fgdPxls.end(); ++it )
circle( res, *it, radius, RED, thickness );
for( it = prBgdPxls.begin(); it != prBgdPxls.end(); ++it )
circle( res, *it, radius, LIGHTBLUE, thickness );
for( it = prFgdPxls.begin(); it != prFgdPxls.end(); ++it )
circle( res, *it, radius, PINK, thickness );
if( rectState == IN_PROCESS || rectState == SET )
rectangle( res, Point( rect.x, rect.y ), Point(rect.x + rect.width, rect.y + rect.height ), GREEN, 2);
imshow( *winName, res );
}
void GCApplication::setRectInMask()
{
assert( !mask.empty() );
mask.setTo( GC_BGD );
rect.x = max(0, rect.x);
rect.y = max(0, rect.y);
rect.width = min(rect.width, image->cols-rect.x);
rect.height = min(rect.height, image->rows-rect.y);
(mask(rect)).setTo( Scalar(GC_PR_FGD) );
}
void GCApplication::setLblsInMask( int flags, Point p, bool isPr )
{
vector<Point> *bpxls, *fpxls;
uchar bvalue, fvalue;
if( !isPr )
{
bpxls = &bgdPxls;
fpxls = &fgdPxls;
bvalue = GC_BGD;
fvalue = GC_FGD;
}
else
{
bpxls = &prBgdPxls;
fpxls = &prFgdPxls;
bvalue = GC_PR_BGD;
fvalue = GC_PR_FGD;
}
if( flags & BGD_KEY )
{
bpxls->push_back(p);
circle( mask, p, radius, bvalue, thickness );
}
if( flags & FGD_KEY )
{
fpxls->push_back(p);
circle( mask, p, radius, fvalue, thickness );
}
}
void GCApplication::mouseClick( int event, int x, int y, int flags, void* param )
{
// TODO add bad args check
switch( event )
{
case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels
{
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
if( rectState == NOT_SET && !isb && !isf )
{
rectState = IN_PROCESS;
rect = Rect( x, y, 1, 1 );
}
if ( (isb || isf) && rectState == SET )
lblsState = IN_PROCESS;
}
break;
case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels
{
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
if ( (isb || isf) && rectState == SET )
prLblsState = IN_PROCESS;
}
break;
case CV_EVENT_LBUTTONUP:
if( rectState == IN_PROCESS )
{
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
rectState = SET;
setRectInMask();
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
showImage();
}
if( lblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), false);
lblsState = SET;
showImage();
}
break;
case CV_EVENT_RBUTTONUP:
if( prLblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), true);
prLblsState = SET;
showImage();
}
break;
case CV_EVENT_MOUSEMOVE:
if( rectState == IN_PROCESS )
{
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
showImage();
}
else if( lblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), false);
showImage();
}
else if( prLblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), true);
showImage();
}
break;
}
}
int GCApplication::nextIter()
{
if( isInitialized )
grabCut( *image, mask, rect, bgdModel, fgdModel, 1 );
else
{
if( rectState != SET )
return iterCount;
if( lblsState == SET || prLblsState == SET )
grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_MASK );
else
grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT );
isInitialized = true;
}
iterCount++;
bgdPxls.clear(); fgdPxls.clear();
prBgdPxls.clear(); prFgdPxls.clear();
return iterCount;
}
GCApplication gcapp;
void on_mouse( int event, int x, int y, int flags, void* param )
{
gcapp.mouseClick( event, x, y, flags, param );
}
int main( int argc, char** argv )
{
if( argc==1 )
return 1;
string filename = argv[1];
if( filename.empty() )
return 1;
Mat image = imread( filename, 1 );
if( image.empty() )
return 1;
cout << "First, select the rectangular area\n" <<
"Hot keys: \n"
"\tESC - quit the program\n"
"\tr - restore the original image\n"
"\tn - next iteration\n"
"\n"
"\tleft mouse button - set rectangle\n"
"\n"
"\tCTRL+left mouse button - set GC_BGD pixels\n"
"\tSHIFT+left mouse button - set CG_FGD pixels\n"
"\n"
"\tCTRL+right mouse button - set GC_PR_BGD pixels\n"
"\tSHIFT+right mouse button - set CG_PR_FGD pixels\n";
const string winName = "image";
cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );
cvSetMouseCallback( winName.c_str(), on_mouse, 0 );
gcapp.setImageAndWinName( image, winName );
gcapp.showImage();
for(;;)
{
int c = cvWaitKey(0);
switch( (char) c )
{
case '\x1b':
cout << "Exiting ..." << endl;
goto exit_main;
case 'r':
cout << endl;
gcapp.reset();
gcapp.showImage();
break;
case 'n':
int iterCount = gcapp.getIterCount();
cout << "<" << iterCount << "... ";
int newIterCount = gcapp.nextIter();
if( newIterCount > iterCount )
{
gcapp.showImage();
cout << iterCount << ">" << endl;
}
else
cout << "rect must be determined>" << endl;
break;
}
}
exit_main:
cvDestroyWindow( winName.c_str() );
return 0;
}

62
samples/c/houghlines.c Normal file
View File

@@ -0,0 +1,62 @@
#define CV_NO_BACKWARD_COMPATIBILITY
/* This is a standalone program. Pass an image name as a first parameter of the program.
Switch between standard and probabilistic Hough transform by changing "#if 1" to "#if 0" and back */
#include <cv.h>
#include <highgui.h>
#include <math.h>
int main(int argc, char** argv)
{
const char* filename = argc >= 2 ? argv[1] : "pic1.png";
IplImage* src = cvLoadImage( filename, 0 );
IplImage* dst;
IplImage* color_dst;
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* lines = 0;
int i;
if( !src )
return -1;
dst = cvCreateImage( cvGetSize(src), 8, 1 );
color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
cvCanny( src, dst, 50, 200, 3 );
cvCvtColor( dst, color_dst, CV_GRAY2BGR );
#if 0
lines = cvHoughLines2( dst, storage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );
for( i = 0; i < MIN(lines->total,100); i++ )
{
float* line = (float*)cvGetSeqElem(lines,i);
float rho = line[0];
float theta = line[1];
CvPoint pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 3, CV_AA, 0 );
}
#else
lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 50, 50, 10 );
for( i = 0; i < lines->total; i++ )
{
CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
cvLine( color_dst, line[0], line[1], CV_RGB(255,0,0), 3, CV_AA, 0 );
}
#endif
cvNamedWindow( "Source", 1 );
cvShowImage( "Source", src );
cvNamedWindow( "Hough", 1 );
cvShowImage( "Hough", color_dst );
cvWaitKey(0);
return 0;
}

110
samples/c/image.cpp Normal file
View File

@@ -0,0 +1,110 @@
#include "cv.h" // include standard OpenCV headers, same as before
#include "highgui.h"
#include <stdio.h>
using namespace cv; // all the new API is put into "cv" namespace. Export its content
// enable/disable use of mixed API in the code below.
#define DEMO_MIXED_API_USE 1
int main( int argc, char** argv )
{
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#if DEMO_MIXED_API_USE
Ptr<IplImage> iplimg = cvLoadImage(imagename); // Ptr<T> is safe ref-conting pointer class
if(iplimg.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;
}
Mat img(iplimg); // cv::Mat replaces the CvMat and IplImage, but it's easy to convert
// between the old and the new data structures (by default, only the header
// is converted, while the data is shared)
#else
Mat img = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
if(img.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;
}
#endif
if( !img.data ) // check if the image has been loaded properly
return -1;
Mat img_yuv;
cvtColor(img, img_yuv, CV_BGR2YCrCb); // convert image to YUV color space. The output image will be created automatically
vector<Mat> planes; // Vector is template vector class, similar to STL's vector. It can store matrices too.
split(img_yuv, planes); // split the image into separate color planes
#if 1
// method 1. process Y plane using an iterator
MatIterator_<uchar> it = planes[0].begin<uchar>(), it_end = planes[0].end<uchar>();
for(; it != it_end; ++it)
{
double v = *it*1.7 + rand()%21-10;
*it = saturate_cast<uchar>(v*v/255.);
}
// method 2. process the first chroma plane using pre-stored row pointer.
// method 3. process the second chroma plane using individual element access
for( int y = 0; y < img_yuv.rows; y++ )
{
uchar* Uptr = planes[1].ptr<uchar>(y);
for( int x = 0; x < img_yuv.cols; x++ )
{
Uptr[x] = saturate_cast<uchar>((Uptr[x]-128)/2 + 128);
uchar& Vxy = planes[2].at<uchar>(y, x);
Vxy = saturate_cast<uchar>((Vxy-128)/2 + 128);
}
}
#else
Mat noise(img.size(), CV_8U); // another Mat constructor; allocates a matrix of the specified size and type
randn(noise, Scalar::all(128), Scalar::all(20)); // fills the matrix with normally distributed random values;
// there is also randu() for uniformly distributed random number generation
GaussianBlur(noise, noise, Size(3, 3), 0.5, 0.5); // blur the noise a bit, kernel size is 3x3 and both sigma's are set to 0.5
const double brightness_gain = 0;
const double contrast_gain = 1.7;
#if DEMO_MIXED_API_USE
// it's easy to pass the new matrices to the functions that only work with IplImage or CvMat:
// step 1) - convert the headers, data will not be copied
IplImage cv_planes_0 = planes[0], cv_noise = noise;
// step 2) call the function; do not forget unary "&" to form pointers
cvAddWeighted(&cv_planes_0, contrast_gain, &cv_noise, 1, -128 + brightness_gain, &cv_planes_0);
#else
addWeighted(planes[0], constrast_gain, noise, 1, -128 + brightness_gain, planes[0]);
#endif
const double color_scale = 0.5;
// Mat::convertTo() replaces cvConvertScale. One must explicitly specify the output matrix type (we keep it intact - planes[1].type())
planes[1].convertTo(planes[1], planes[1].type(), color_scale, 128*(1-color_scale));
// alternative form of cv::convertScale if we know the datatype at compile time ("uchar" here).
// This expression will not create any temporary arrays and should be almost as fast as the above variant
planes[2] = Mat_<uchar>(planes[2]*color_scale + 128*(1-color_scale));
// Mat::mul replaces cvMul(). Again, no temporary arrays are created in case of simple expressions.
planes[0] = planes[0].mul(planes[0], 1./255);
#endif
// now merge the results back
merge(planes, img_yuv);
// and produce the output RGB image
cvtColor(img_yuv, img, CV_YCrCb2BGR);
// this is counterpart for cvNamedWindow
namedWindow("image with grain", CV_WINDOW_AUTOSIZE);
#if DEMO_MIXED_API_USE
// this is to demonstrate that img and iplimg really share the data - the result of the above
// processing is stored in img and thus in iplimg too.
cvShowImage("image with grain", iplimg);
#else
imshow("image with grain", img);
#endif
waitKey();
return 0;
// all the memory will automatically be released by Vector<>, Mat and Ptr<> destructors.
}

84
samples/c/inpaint.cpp Normal file
View File

@@ -0,0 +1,84 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <stdlib.h>
IplImage* inpaint_mask = 0;
IplImage* img0 = 0, *img = 0, *inpainted = 0;
CvPoint prev_pt = {-1,-1};
void on_mouse( int event, int x, int y, int flags, void* )
{
if( !img )
return;
if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) )
prev_pt = cvPoint(-1,-1);
else if( event == CV_EVENT_LBUTTONDOWN )
prev_pt = cvPoint(x,y);
else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON) )
{
CvPoint pt = cvPoint(x,y);
if( prev_pt.x < 0 )
prev_pt = pt;
cvLine( inpaint_mask, prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
cvLine( img, prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
prev_pt = pt;
cvShowImage( "image", img );
}
}
int main( int argc, char** argv )
{
char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
if( (img0 = cvLoadImage(filename,-1)) == 0 )
return 0;
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tr - restore the original image\n"
"\ti or SPACE - run inpainting algorithm\n"
"\t\t(before running it, paint something on the image)\n" );
cvNamedWindow( "image", 1 );
img = cvCloneImage( img0 );
inpainted = cvCloneImage( img0 );
inpaint_mask = cvCreateImage( cvGetSize(img), 8, 1 );
cvZero( inpaint_mask );
cvZero( inpainted );
cvShowImage( "image", img );
cvShowImage( "inpainted image", inpainted );
cvSetMouseCallback( "image", on_mouse, 0 );
for(;;)
{
int c = cvWaitKey(0);
if( (char)c == 27 )
break;
if( (char)c == 'r' )
{
cvZero( inpaint_mask );
cvCopy( img0, img );
cvShowImage( "image", img );
}
if( (char)c == 'i' || (char)c == ' ' )
{
cvNamedWindow( "inpainted image", 1 );
cvInpaint( img, inpaint_mask, inpainted, 3, CV_INPAINT_TELEA );
cvShowImage( "inpainted image", inpainted );
}
}
return 1;
}

113
samples/c/kalman.c Normal file
View File

@@ -0,0 +1,113 @@
/*
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
*/
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <math.h>
#endif
int main(int argc, char** argv)
{
const float A[] = { 1, 1, 0, 1 };
IplImage* img = cvCreateImage( cvSize(500,500), 8, 3 );
CvKalman* kalman = cvCreateKalman( 2, 1, 0 );
CvMat* state = cvCreateMat( 2, 1, CV_32FC1 ); /* (phi, delta_phi) */
CvMat* process_noise = cvCreateMat( 2, 1, CV_32FC1 );
CvMat* measurement = cvCreateMat( 1, 1, CV_32FC1 );
CvRNG rng = cvRNG(-1);
char code = -1;
cvZero( measurement );
cvNamedWindow( "Kalman", 1 );
for(;;)
{
cvRandArr( &rng, state, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) );
memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));
cvSetIdentity( kalman->measurement_matrix, cvRealScalar(1) );
cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1e-5) );
cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1e-1) );
cvSetIdentity( kalman->error_cov_post, cvRealScalar(1));
cvRandArr( &rng, kalman->state_post, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) );
for(;;)
{
#define calc_point(angle) \
cvPoint( cvRound(img->width/2 + img->width/3*cos(angle)), \
cvRound(img->height/2 - img->width/3*sin(angle)))
float state_angle = state->data.fl[0];
CvPoint state_pt = calc_point(state_angle);
const CvMat* prediction = cvKalmanPredict( kalman, 0 );
float predict_angle = prediction->data.fl[0];
CvPoint predict_pt = calc_point(predict_angle);
float measurement_angle;
CvPoint measurement_pt;
cvRandArr( &rng, measurement, CV_RAND_NORMAL, cvRealScalar(0),
cvRealScalar(sqrt(kalman->measurement_noise_cov->data.fl[0])) );
/* generate measurement */
cvMatMulAdd( kalman->measurement_matrix, state, measurement, measurement );
measurement_angle = measurement->data.fl[0];
measurement_pt = calc_point(measurement_angle);
/* plot points */
#define draw_cross( center, color, d ) \
cvLine( img, cvPoint( center.x - d, center.y - d ), \
cvPoint( center.x + d, center.y + d ), color, 1, CV_AA, 0); \
cvLine( img, cvPoint( center.x + d, center.y - d ), \
cvPoint( center.x - d, center.y + d ), color, 1, CV_AA, 0 )
cvZero( img );
draw_cross( state_pt, CV_RGB(255,255,255), 3 );
draw_cross( measurement_pt, CV_RGB(255,0,0), 3 );
draw_cross( predict_pt, CV_RGB(0,255,0), 3 );
cvLine( img, state_pt, measurement_pt, CV_RGB(255,0,0), 3, CV_AA, 0 );
cvLine( img, state_pt, predict_pt, CV_RGB(255,255,0), 3, CV_AA, 0 );
cvKalmanCorrect( kalman, measurement );
cvRandArr( &rng, process_noise, CV_RAND_NORMAL, cvRealScalar(0),
cvRealScalar(sqrt(kalman->process_noise_cov->data.fl[0])));
cvMatMulAdd( kalman->transition_matrix, state, process_noise, state );
cvShowImage( "Kalman", img );
code = (char) cvWaitKey( 100 );
if( code > 0 )
break;
}
if( code == 27 || code == 'q' || code == 'Q' )
break;
}
cvDestroyWindow("Kalman");
return 0;
}
#ifdef _EiC
main(1, "kalman.c");
#endif

93
samples/c/kmeans.c Normal file
View File

@@ -0,0 +1,93 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#endif
int main( int argc, char** argv )
{
#define MAX_CLUSTERS 5
CvScalar color_tab[MAX_CLUSTERS];
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
CvRNG rng = cvRNG(-1);
CvPoint ipt;
color_tab[0] = CV_RGB(255,0,0);
color_tab[1] = CV_RGB(0,255,0);
color_tab[2] = CV_RGB(100,100,255);
color_tab[3] = CV_RGB(255,0,255);
color_tab[4] = CV_RGB(255,255,0);
cvNamedWindow( "clusters", 1 );
for(;;)
{
char key;
int k, cluster_count = cvRandInt(&rng)%MAX_CLUSTERS + 1;
int i, sample_count = cvRandInt(&rng)%1000 + 1;
CvMat* points = cvCreateMat( sample_count, 1, CV_32FC2 );
CvMat* clusters = cvCreateMat( sample_count, 1, CV_32SC1 );
cluster_count = MIN(cluster_count, sample_count);
/* generate random sample from multigaussian distribution */
for( k = 0; k < cluster_count; k++ )
{
CvPoint center;
CvMat point_chunk;
center.x = cvRandInt(&rng)%img->width;
center.y = cvRandInt(&rng)%img->height;
cvGetRows( points, &point_chunk, k*sample_count/cluster_count,
k == cluster_count - 1 ? sample_count :
(k+1)*sample_count/cluster_count, 1 );
cvRandArr( &rng, &point_chunk, CV_RAND_NORMAL,
cvScalar(center.x,center.y,0,0),
cvScalar(img->width*0.1,img->height*0.1,0,0));
}
/* shuffle samples */
for( i = 0; i < sample_count/2; i++ )
{
CvPoint2D32f* pt1 = (CvPoint2D32f*)points->data.fl + cvRandInt(&rng)%sample_count;
CvPoint2D32f* pt2 = (CvPoint2D32f*)points->data.fl + cvRandInt(&rng)%sample_count;
CvPoint2D32f temp;
CV_SWAP( *pt1, *pt2, temp );
}
printf( "iterations=%d\n", cvKMeans2( points, cluster_count, clusters,
cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ),
5, 0, 0, 0, 0 ));
cvZero( img );
for( i = 0; i < sample_count; i++ )
{
int cluster_idx = clusters->data.i[i];
ipt.x = (int)points->data.fl[i*2];
ipt.y = (int)points->data.fl[i*2+1];
cvCircle( img, ipt, 2, color_tab[cluster_idx], CV_FILLED, CV_AA, 0 );
}
cvReleaseMat( &points );
cvReleaseMat( &clusters );
cvShowImage( "clusters", img );
key = (char) cvWaitKey(0);
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
}
cvDestroyWindow( "clusters" );
return 0;
}
#ifdef _EiC
main(1,"kmeans.c");
#endif

83
samples/c/laplace.c Normal file
View File

@@ -0,0 +1,83 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <ctype.h>
#include <stdio.h>
#endif
int sigma = 3;
int smoothType = CV_GAUSSIAN;
int main( int argc, char** argv )
{
IplImage* laplace = 0;
IplImage* colorlaplace = 0;
IplImage* planes[3] = { 0, 0, 0 };
CvCapture* capture = 0;
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
else if( argc == 2 )
capture = cvCaptureFromAVI( argv[1] );
if( !capture )
{
fprintf(stderr,"Could not initialize capturing...\n");
return -1;
}
cvNamedWindow( "Laplacian", 0 );
cvCreateTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
for(;;)
{
IplImage* frame = 0;
int i, c, ksize;
frame = cvQueryFrame( capture );
if( !frame )
break;
if( !laplace )
{
for( i = 0; i < 3; i++ )
planes[i] = cvCreateImage( cvGetSize(frame), 8, 1 );
laplace = cvCreateImage( cvGetSize(frame), IPL_DEPTH_16S, 1 );
colorlaplace = cvCreateImage( cvGetSize(frame), 8, 3 );
}
ksize = (sigma*5)|1;
cvSmooth( frame, colorlaplace, smoothType, ksize, ksize, sigma, sigma );
cvSplit( colorlaplace, planes[0], planes[1], planes[2], 0 );
for( i = 0; i < 3; i++ )
{
cvLaplace( planes[i], laplace, 5 );
cvConvertScaleAbs( laplace, planes[i], (sigma+1)*0.25, 0 );
}
cvMerge( planes[0], planes[1], planes[2], 0, colorlaplace );
colorlaplace->origin = frame->origin;
cvShowImage("Laplacian", colorlaplace );
c = cvWaitKey(30);
if( c == ' ' )
smoothType = smoothType == CV_GAUSSIAN ? CV_BLUR : smoothType == CV_BLUR ? CV_MEDIAN : CV_GAUSSIAN;
if( c == 'q' || c == 'Q' || (c & 255) == 27 )
break;
}
cvReleaseCapture( &capture );
cvDestroyWindow("Laplacian");
return 0;
}
#ifdef _EiC
main(1,"laplace.c");
#endif

BIN
samples/c/left01.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
samples/c/left02.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/left03.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

BIN
samples/c/left04.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
samples/c/left05.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/left06.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/left07.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

BIN
samples/c/left08.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/left09.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
samples/c/left11.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
samples/c/left12.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
samples/c/left13.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/left14.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
samples/c/lena.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

File diff suppressed because it is too large Load Diff

530
samples/c/letter_recog.cpp Normal file
View File

@@ -0,0 +1,530 @@
#include "ml.h"
#include <stdio.h>
/*
The sample demonstrates how to train Random Trees classifier
(or Boosting classifier, or MLP - see main()) using the provided dataset.
We use the sample database letter-recognition.data
from UCI Repository, here is the link:
Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).
UCI Repository of machine learning databases
[http://www.ics.uci.edu/~mlearn/MLRepository.html].
Irvine, CA: University of California, Department of Information and Computer Science.
The dataset consists of 20000 feature vectors along with the
responses - capital latin letters A..Z.
The first 16000 (10000 for boosting)) samples are used for training
and the remaining 4000 (10000 for boosting) - to test the classifier.
*/
// This function reads data and responses from the file <filename>
static int
read_num_class_data( const char* filename, int var_count,
CvMat** data, CvMat** responses )
{
const int M = 1024;
FILE* f = fopen( filename, "rt" );
CvMemStorage* storage;
CvSeq* seq;
char buf[M+2];
float* el_ptr;
CvSeqReader reader;
int i, j;
if( !f )
return 0;
el_ptr = new float[var_count+1];
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof(*seq), (var_count+1)*sizeof(float), storage );
for(;;)
{
char* ptr;
if( !fgets( buf, M, f ) || !strchr( buf, ',' ) )
break;
el_ptr[0] = buf[0];
ptr = buf+2;
for( i = 1; i <= var_count; i++ )
{
int n = 0;
sscanf( ptr, "%f%n", el_ptr + i, &n );
ptr += n + 1;
}
if( i <= var_count )
break;
cvSeqPush( seq, el_ptr );
}
fclose(f);
*data = cvCreateMat( seq->total, var_count, CV_32F );
*responses = cvCreateMat( seq->total, 1, CV_32F );
cvStartReadSeq( seq, &reader );
for( i = 0; i < seq->total; i++ )
{
const float* sdata = (float*)reader.ptr + 1;
float* ddata = data[0]->data.fl + var_count*i;
float* dr = responses[0]->data.fl + i;
for( j = 0; j < var_count; j++ )
ddata[j] = sdata[j];
*dr = sdata[-1];
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
cvReleaseMemStorage( &storage );
delete el_ptr;
return 1;
}
static
int build_rtrees_classifier( char* data_filename,
char* filename_to_save, char* filename_to_load )
{
CvMat* data = 0;
CvMat* responses = 0;
CvMat* var_type = 0;
CvMat* sample_idx = 0;
int ok = read_num_class_data( data_filename, 16, &data, &responses );
int nsamples_all = 0, ntrain_samples = 0;
int i = 0;
double train_hr = 0, test_hr = 0;
CvRTrees forest;
CvMat* var_importance = 0;
if( !ok )
{
printf( "Could not read the database %s\n", data_filename );
return -1;
}
printf( "The database %s is loaded.\n", data_filename );
nsamples_all = data->rows;
ntrain_samples = (int)(nsamples_all*0.8);
// Create or load Random Trees classifier
if( filename_to_load )
{
// load classifier from the specified file
forest.load( filename_to_load );
ntrain_samples = 0;
if( forest.get_tree_count() == 0 )
{
printf( "Could not read the classifier %s\n", filename_to_load );
return -1;
}
printf( "The classifier %s is loaded.\n", data_filename );
}
else
{
// create classifier by using <data> and <responses>
printf( "Training the classifier ...\n");
// 1. create type mask
var_type = cvCreateMat( data->cols + 1, 1, CV_8U );
cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) );
cvSetReal1D( var_type, data->cols, CV_VAR_CATEGORICAL );
// 2. create sample_idx
sample_idx = cvCreateMat( 1, nsamples_all, CV_8UC1 );
{
CvMat mat;
cvGetCols( sample_idx, &mat, 0, ntrain_samples );
cvSet( &mat, cvRealScalar(1) );
cvGetCols( sample_idx, &mat, ntrain_samples, nsamples_all );
cvSetZero( &mat );
}
// 3. train classifier
forest.train( data, CV_ROW_SAMPLE, responses, 0, sample_idx, var_type, 0,
CvRTParams(10,10,0,false,15,0,true,4,100,0.01f,CV_TERMCRIT_ITER));
printf( "\n");
}
// compute prediction error on train and test data
for( i = 0; i < nsamples_all; i++ )
{
double r;
CvMat sample;
cvGetRow( data, &sample, i );
r = forest.predict( &sample );
r = fabs((double)r - responses->data.fl[i]) <= FLT_EPSILON ? 1 : 0;
if( i < ntrain_samples )
train_hr += r;
else
test_hr += r;
}
test_hr /= (double)(nsamples_all-ntrain_samples);
train_hr /= (double)ntrain_samples;
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*100., test_hr*100. );
printf( "Number of trees: %d\n", forest.get_tree_count() );
// Print variable importance
var_importance = (CvMat*)forest.get_var_importance();
if( var_importance )
{
double rt_imp_sum = cvSum( var_importance ).val[0];
printf("var#\timportance (in %%):\n");
for( i = 0; i < var_importance->cols; i++ )
printf( "%-2d\t%-4.1f\n", i,
100.f*var_importance->data.fl[i]/rt_imp_sum);
}
//Print some proximitites
printf( "Proximities between some samples corresponding to the letter 'T':\n" );
{
CvMat sample1, sample2;
const int pairs[][2] = {{0,103}, {0,106}, {106,103}, {-1,-1}};
for( i = 0; pairs[i][0] >= 0; i++ )
{
cvGetRow( data, &sample1, pairs[i][0] );
cvGetRow( data, &sample2, pairs[i][1] );
printf( "proximity(%d,%d) = %.1f%%\n", pairs[i][0], pairs[i][1],
forest.get_proximity( &sample1, &sample2 )*100. );
}
}
// Save Random Trees classifier to file if needed
if( filename_to_save )
forest.save( filename_to_save );
cvReleaseMat( &sample_idx );
cvReleaseMat( &var_type );
cvReleaseMat( &data );
cvReleaseMat( &responses );
return 0;
}
static
int build_boost_classifier( char* data_filename,
char* filename_to_save, char* filename_to_load )
{
const int class_count = 26;
CvMat* data = 0;
CvMat* responses = 0;
CvMat* var_type = 0;
CvMat* temp_sample = 0;
CvMat* weak_responses = 0;
int ok = read_num_class_data( data_filename, 16, &data, &responses );
int nsamples_all = 0, ntrain_samples = 0;
int var_count;
int i, j, k;
double train_hr = 0, test_hr = 0;
CvBoost boost;
if( !ok )
{
printf( "Could not read the database %s\n", data_filename );
return -1;
}
printf( "The database %s is loaded.\n", data_filename );
nsamples_all = data->rows;
ntrain_samples = (int)(nsamples_all*0.5);
var_count = data->cols;
// Create or load Boosted Tree classifier
if( filename_to_load )
{
// load classifier from the specified file
boost.load( filename_to_load );
ntrain_samples = 0;
if( !boost.get_weak_predictors() )
{
printf( "Could not read the classifier %s\n", filename_to_load );
return -1;
}
printf( "The classifier %s is loaded.\n", data_filename );
}
else
{
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// As currently boosted tree classifier in MLL can only be trained
// for 2-class problems, we transform the training database by
// "unrolling" each training sample as many times as the number of
// classes (26) that we have.
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CvMat* new_data = cvCreateMat( ntrain_samples*class_count, var_count + 1, CV_32F );
CvMat* new_responses = cvCreateMat( ntrain_samples*class_count, 1, CV_32S );
// 1. unroll the database type mask
printf( "Unrolling the database...\n");
for( i = 0; i < ntrain_samples; i++ )
{
float* data_row = (float*)(data->data.ptr + data->step*i);
for( j = 0; j < class_count; j++ )
{
float* new_data_row = (float*)(new_data->data.ptr +
new_data->step*(i*class_count+j));
for( k = 0; k < var_count; k++ )
new_data_row[k] = data_row[k];
new_data_row[var_count] = (float)j;
new_responses->data.i[i*class_count + j] = responses->data.fl[i] == j+'A';
}
}
// 2. create type mask
var_type = cvCreateMat( var_count + 2, 1, CV_8U );
cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) );
// the last indicator variable, as well
// as the new (binary) response are categorical
cvSetReal1D( var_type, var_count, CV_VAR_CATEGORICAL );
cvSetReal1D( var_type, var_count+1, CV_VAR_CATEGORICAL );
// 3. train classifier
printf( "Training the classifier (may take a few minutes)...\n");
boost.train( new_data, CV_ROW_SAMPLE, new_responses, 0, 0, var_type, 0,
CvBoostParams(CvBoost::REAL, 100, 0.95, 5, false, 0 ));
cvReleaseMat( &new_data );
cvReleaseMat( &new_responses );
printf("\n");
}
temp_sample = cvCreateMat( 1, var_count + 1, CV_32F );
weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F );
// compute prediction error on train and test data
for( i = 0; i < nsamples_all; i++ )
{
int best_class = 0;
double max_sum = -DBL_MAX;
double r;
CvMat sample;
cvGetRow( data, &sample, i );
for( k = 0; k < var_count; k++ )
temp_sample->data.fl[k] = sample.data.fl[k];
for( j = 0; j < class_count; j++ )
{
temp_sample->data.fl[var_count] = (float)j;
boost.predict( temp_sample, 0, weak_responses );
double sum = cvSum( weak_responses ).val[0];
if( max_sum < sum )
{
max_sum = sum;
best_class = j + 'A';
}
}
r = fabs(best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0;
if( i < ntrain_samples )
train_hr += r;
else
test_hr += r;
}
test_hr /= (double)(nsamples_all-ntrain_samples);
train_hr /= (double)ntrain_samples;
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*100., test_hr*100. );
printf( "Number of trees: %d\n", boost.get_weak_predictors()->total );
// Save classifier to file if needed
if( filename_to_save )
boost.save( filename_to_save );
cvReleaseMat( &temp_sample );
cvReleaseMat( &weak_responses );
cvReleaseMat( &var_type );
cvReleaseMat( &data );
cvReleaseMat( &responses );
return 0;
}
static
int build_mlp_classifier( char* data_filename,
char* filename_to_save, char* filename_to_load )
{
const int class_count = 26;
CvMat* data = 0;
CvMat train_data;
CvMat* responses = 0;
CvMat* mlp_response = 0;
int ok = read_num_class_data( data_filename, 16, &data, &responses );
int nsamples_all = 0, ntrain_samples = 0;
int i, j;
double train_hr = 0, test_hr = 0;
CvANN_MLP mlp;
if( !ok )
{
printf( "Could not read the database %s\n", data_filename );
return -1;
}
printf( "The database %s is loaded.\n", data_filename );
nsamples_all = data->rows;
ntrain_samples = (int)(nsamples_all*0.8);
// Create or load MLP classifier
if( filename_to_load )
{
// load classifier from the specified file
mlp.load( filename_to_load );
ntrain_samples = 0;
if( !mlp.get_layer_count() )
{
printf( "Could not read the classifier %s\n", filename_to_load );
return -1;
}
printf( "The classifier %s is loaded.\n", data_filename );
}
else
{
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// MLP does not support categorical variables by explicitly.
// So, instead of the output class label, we will use
// a binary vector of <class_count> components for training and,
// therefore, MLP will give us a vector of "probabilities" at the
// prediction stage
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CvMat* new_responses = cvCreateMat( ntrain_samples, class_count, CV_32F );
// 1. unroll the responses
printf( "Unrolling the responses...\n");
for( i = 0; i < ntrain_samples; i++ )
{
int cls_label = cvRound(responses->data.fl[i]) - 'A';
float* bit_vec = (float*)(new_responses->data.ptr + i*new_responses->step);
for( j = 0; j < class_count; j++ )
bit_vec[j] = 0.f;
bit_vec[cls_label] = 1.f;
}
cvGetRows( data, &train_data, 0, ntrain_samples );
// 2. train classifier
int layer_sz[] = { data->cols, 100, 100, class_count };
CvMat layer_sizes =
cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
mlp.create( &layer_sizes );
printf( "Training the classifier (may take a few minutes)...\n");
mlp.train( &train_data, new_responses, 0, 0,
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01),
#if 1
CvANN_MLP_TrainParams::BACKPROP,0.001));
#else
CvANN_MLP_TrainParams::RPROP,0.05));
#endif
cvReleaseMat( &new_responses );
printf("\n");
}
mlp_response = cvCreateMat( 1, class_count, CV_32F );
// compute prediction error on train and test data
for( i = 0; i < nsamples_all; i++ )
{
int best_class;
CvMat sample;
cvGetRow( data, &sample, i );
CvPoint max_loc = {0,0};
mlp.predict( &sample, mlp_response );
cvMinMaxLoc( mlp_response, 0, 0, 0, &max_loc, 0 );
best_class = max_loc.x + 'A';
int r = fabs((double)best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0;
if( i < ntrain_samples )
train_hr += r;
else
test_hr += r;
}
test_hr /= (double)(nsamples_all-ntrain_samples);
train_hr /= (double)ntrain_samples;
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*100., test_hr*100. );
// Save classifier to file if needed
if( filename_to_save )
mlp.save( filename_to_save );
cvReleaseMat( &mlp_response );
cvReleaseMat( &data );
cvReleaseMat( &responses );
return 0;
}
int main( int argc, char *argv[] )
{
char* filename_to_save = 0;
char* filename_to_load = 0;
char default_data_filename[] = "./letter-recognition.data";
char* data_filename = default_data_filename;
int method = 0;
int i;
for( i = 1; i < argc; i++ )
{
if( strcmp(argv[i],"-data") == 0 ) // flag "-data letter_recognition.xml"
{
i++;
data_filename = argv[i];
}
else if( strcmp(argv[i],"-save") == 0 ) // flag "-save filename.xml"
{
i++;
filename_to_save = argv[i];
}
else if( strcmp(argv[i],"-load") == 0) // flag "-load filename.xml"
{
i++;
filename_to_load = argv[i];
}
else if( strcmp(argv[i],"-boost") == 0)
{
method = 1;
}
else if( strcmp(argv[i],"-mlp") == 0 )
{
method = 2;
}
else
break;
}
if( i < argc ||
(method == 0 ?
build_rtrees_classifier( data_filename, filename_to_save, filename_to_load ) :
method == 1 ?
build_boost_classifier( data_filename, filename_to_save, filename_to_load ) :
method == 2 ?
build_mlp_classifier( data_filename, filename_to_save, filename_to_load ) :
-1) < 0)
{
printf("This is letter recognition sample.\n"
"The usage: letter_recog [-data <path to letter-recognition.data>] \\\n"
" [-save <output XML file for the classifier>] \\\n"
" [-load <XML file with the pre-trained classifier>] \\\n"
" [-boost|-mlp] # to use boost/mlp classifier instead of default Random Trees\n" );
}
return 0;
}

197
samples/c/lkdemo.c Normal file
View File

@@ -0,0 +1,197 @@
/* Demo of modified Lucas-Kanade optical flow algorithm.
See the printf below */
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <ctype.h>
#endif
IplImage *image = 0, *grey = 0, *prev_grey = 0, *pyramid = 0, *prev_pyramid = 0, *swap_temp;
int win_size = 10;
const int MAX_COUNT = 500;
CvPoint2D32f* points[2] = {0,0}, *swap_points;
char* status = 0;
int count = 0;
int need_to_init = 0;
int night_mode = 0;
int flags = 0;
int add_remove_pt = 0;
CvPoint pt;
void on_mouse( int event, int x, int y, int flags, void* param )
{
if( !image )
return;
if( image->origin )
y = image->height - y;
if( event == CV_EVENT_LBUTTONDOWN )
{
pt = cvPoint(x,y);
add_remove_pt = 1;
}
}
int main( int argc, char** argv )
{
CvCapture* capture = 0;
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
else if( argc == 2 )
capture = cvCaptureFromAVI( argv[1] );
if( !capture )
{
fprintf(stderr,"Could not initialize capturing...\n");
return -1;
}
/* print a welcome message, and the OpenCV version */
printf ("Welcome to lkdemo, using OpenCV version %s (%d.%d.%d)\n",
CV_VERSION,
CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tr - auto-initialize tracking\n"
"\tc - delete all the points\n"
"\tn - switch the \"night\" mode on/off\n"
"To add/remove a feature point click it\n" );
cvNamedWindow( "LkDemo", 0 );
cvSetMouseCallback( "LkDemo", on_mouse, 0 );
for(;;)
{
IplImage* frame = 0;
int i, k, c;
frame = cvQueryFrame( capture );
if( !frame )
break;
if( !image )
{
/* allocate all the buffers */
image = cvCreateImage( cvGetSize(frame), 8, 3 );
image->origin = frame->origin;
grey = cvCreateImage( cvGetSize(frame), 8, 1 );
prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
status = (char*)cvAlloc(MAX_COUNT);
flags = 0;
}
cvCopy( frame, image, 0 );
cvCvtColor( image, grey, CV_BGR2GRAY );
if( night_mode )
cvZero( image );
if( need_to_init )
{
/* automatic initialization */
IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
double quality = 0.01;
double min_distance = 10;
count = MAX_COUNT;
cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count,
quality, min_distance, 0, 3, 0, 0.04 );
cvFindCornerSubPix( grey, points[1], count,
cvSize(win_size,win_size), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
cvReleaseImage( &eig );
cvReleaseImage( &temp );
add_remove_pt = 0;
}
else if( count > 0 )
{
cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
points[0], points[1], count, cvSize(win_size,win_size), 3, status, 0,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
flags |= CV_LKFLOW_PYR_A_READY;
for( i = k = 0; i < count; i++ )
{
if( add_remove_pt )
{
double dx = pt.x - points[1][i].x;
double dy = pt.y - points[1][i].y;
if( dx*dx + dy*dy <= 25 )
{
add_remove_pt = 0;
continue;
}
}
if( !status[i] )
continue;
points[1][k++] = points[1][i];
cvCircle( image, cvPointFrom32f(points[1][i]), 3, CV_RGB(0,255,0), -1, 8,0);
}
count = k;
}
if( add_remove_pt && count < MAX_COUNT )
{
points[1][count++] = cvPointTo32f(pt);
cvFindCornerSubPix( grey, points[1] + count - 1, 1,
cvSize(win_size,win_size), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
add_remove_pt = 0;
}
CV_SWAP( prev_grey, grey, swap_temp );
CV_SWAP( prev_pyramid, pyramid, swap_temp );
CV_SWAP( points[0], points[1], swap_points );
need_to_init = 0;
cvShowImage( "LkDemo", image );
c = cvWaitKey(10);
if( (char)c == 27 )
break;
switch( (char) c )
{
case 'r':
need_to_init = 1;
break;
case 'c':
count = 0;
break;
case 'n':
night_mode ^= 1;
break;
default:
;
}
}
cvReleaseCapture( &capture );
cvDestroyWindow("LkDemo");
return 0;
}
#ifdef _EiC
main(1,"lkdemo.c");
#endif

109
samples/c/minarea.c Normal file
View File

@@ -0,0 +1,109 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdlib.h>
#endif
#define ARRAY 1
int main( int argc, char** argv )
{
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
#if !ARRAY
CvMemStorage* storage = cvCreateMemStorage(0);
#endif
cvNamedWindow( "rect & circle", 1 );
for(;;)
{
char key;
int i, count = rand()%100 + 1;
CvPoint pt0, pt;
CvBox2D box;
CvPoint2D32f box_vtx[4];
CvPoint2D32f center;
CvPoint icenter;
float radius;
#if !ARRAY
CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
sizeof(CvPoint), storage );
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
cvSeqPush( ptseq, &pt0 );
}
#ifndef _EiC /* unfortunately, here EiC crashes */
box = cvMinAreaRect2( ptseq, 0 );
#endif
cvMinEnclosingCircle( ptseq, &center, &radius );
#else
CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
CvMat pointMat = cvMat( 1, count, CV_32SC2, points );
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
points[i] = pt0;
}
#ifndef _EiC
box = cvMinAreaRect2( &pointMat, 0 );
#endif
cvMinEnclosingCircle( &pointMat, &center, &radius );
#endif
cvBoxPoints( box, box_vtx );
cvZero( img );
for( i = 0; i < count; i++ )
{
#if !ARRAY
pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i );
#else
pt0 = points[i];
#endif
cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 );
}
#ifndef _EiC
pt0.x = cvRound(box_vtx[3].x);
pt0.y = cvRound(box_vtx[3].y);
for( i = 0; i < 4; i++ )
{
pt.x = cvRound(box_vtx[i].x);
pt.y = cvRound(box_vtx[i].y);
cvLine(img, pt0, pt, CV_RGB(0, 255, 0), 1, CV_AA, 0);
pt0 = pt;
}
#endif
icenter.x = cvRound(center.x);
icenter.y = cvRound(center.y);
cvCircle( img, icenter, cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 );
cvShowImage( "rect & circle", img );
key = (char) cvWaitKey(0);
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
#if !ARRAY
cvClearMemStorage( storage );
#else
free( points );
#endif
}
cvDestroyWindow( "rect & circle" );
return 0;
}
#ifdef _EiC
main(1,"convexhull.c");
#endif

110
samples/c/morphology.c Normal file
View File

@@ -0,0 +1,110 @@
#define CV_NO_BACKWARD_COMPATIBILITY
#include <cv.h>
#include <highgui.h>
#include <stdlib.h>
#include <stdio.h>
IplImage* src = 0;
IplImage* dst = 0;
IplConvKernel* element = 0;
int element_shape = CV_SHAPE_RECT;
//the address of variable which receives trackbar position update
int max_iters = 10;
int open_close_pos = 0;
int erode_dilate_pos = 0;
// callback function for open/close trackbar
void OpenClose(int pos)
{
int n = open_close_pos - max_iters;
int an = n > 0 ? n : -n;
element = cvCreateStructuringElementEx( an*2+1, an*2+1, an, an, element_shape, 0 );
if( n < 0 )
{
cvErode(src,dst,element,1);
cvDilate(dst,dst,element,1);
}
else
{
cvDilate(src,dst,element,1);
cvErode(dst,dst,element,1);
}
cvReleaseStructuringElement(&element);
cvShowImage("Open/Close",dst);
}
// callback function for erode/dilate trackbar
void ErodeDilate(int pos)
{
int n = erode_dilate_pos - max_iters;
int an = n > 0 ? n : -n;
element = cvCreateStructuringElementEx( an*2+1, an*2+1, an, an, element_shape, 0 );
if( n < 0 )
{
cvErode(src,dst,element,1);
}
else
{
cvDilate(src,dst,element,1);
}
cvReleaseStructuringElement(&element);
cvShowImage("Erode/Dilate",dst);
}
int main( int argc, char** argv )
{
char* filename = argc == 2 ? argv[1] : (char*)"baboon.jpg";
if( (src = cvLoadImage(filename,1)) == 0 )
return -1;
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tr - use rectangle structuring element\n"
"\te - use elliptic structuring element\n"
"\tc - use cross-shaped structuring element\n"
"\tSPACE - loop through all the options\n" );
dst = cvCloneImage(src);
//create windows for output images
cvNamedWindow("Open/Close",1);
cvNamedWindow("Erode/Dilate",1);
open_close_pos = erode_dilate_pos = max_iters;
cvCreateTrackbar("iterations", "Open/Close",&open_close_pos,max_iters*2+1,OpenClose);
cvCreateTrackbar("iterations", "Erode/Dilate",&erode_dilate_pos,max_iters*2+1,ErodeDilate);
for(;;)
{
int c;
OpenClose(open_close_pos);
ErodeDilate(erode_dilate_pos);
c = cvWaitKey(0);
if( (char)c == 27 )
break;
if( (char)c == 'e' )
element_shape = CV_SHAPE_ELLIPSE;
else if( (char)c == 'r' )
element_shape = CV_SHAPE_RECT;
else if( (char)c == 'c' )
element_shape = CV_SHAPE_CROSS;
else if( (char)c == ' ' )
element_shape = (element_shape + 1) % 3;
}
//release images
cvReleaseImage(&src);
cvReleaseImage(&dst);
//destroy windows
cvDestroyWindow("Open/Close");
cvDestroyWindow("Erode/Dilate");
return 0;
}

199
samples/c/motempl.c Normal file
View File

@@ -0,0 +1,199 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
// motion templates sample code
#include "cv.h"
#include "highgui.h"
#include <time.h>
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#endif
// various tracking parameters (in seconds)
const double MHI_DURATION = 1;
const double MAX_TIME_DELTA = 0.5;
const double MIN_TIME_DELTA = 0.05;
// number of cyclic frame buffer used for motion detection
// (should, probably, depend on FPS)
const int N = 4;
// ring image buffer
IplImage **buf = 0;
int last = 0;
// temporary images
IplImage *mhi = 0; // MHI
IplImage *orient = 0; // orientation
IplImage *mask = 0; // valid orientation mask
IplImage *segmask = 0; // motion segmentation map
CvMemStorage* storage = 0; // temporary storage
// parameters:
// img - input video frame
// dst - resultant motion picture
// args - optional parameters
void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
CvSize size = cvSize(img->width,img->height); // get current frame size
int i, idx1 = last, idx2;
IplImage* silh;
CvSeq* seq;
CvRect comp_rect;
double count;
double angle;
CvPoint center;
double magnitude;
CvScalar color;
// allocate images at the beginning or
// reallocate them if the frame size is changed
if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
if( buf == 0 ) {
buf = (IplImage**)malloc(N*sizeof(buf[0]));
memset( buf, 0, N*sizeof(buf[0]));
}
for( i = 0; i < N; i++ ) {
cvReleaseImage( &buf[i] );
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
cvZero( buf[i] );
}
cvReleaseImage( &mhi );
cvReleaseImage( &orient );
cvReleaseImage( &segmask );
cvReleaseImage( &mask );
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
cvZero( mhi ); // clear MHI at the beginning
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
}
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
last = idx2;
silh = buf[idx2];
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI
// convert MHI to blue 8u image
cvCvtScale( mhi, mask, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION );
cvZero( dst );
cvMerge( mask, 0, 0, 0, dst );
// calculate motion gradient orientation and valid orientation mask
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
if( !storage )
storage = cvCreateMemStorage(0);
else
cvClearMemStorage(storage);
// segment motion: get sequence of motion components
// segmask is marked motion components map. It is not used further
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );
// iterate through the motion components,
// One more iteration (i == -1) corresponds to the whole image (global motion)
for( i = -1; i < seq->total; i++ ) {
if( i < 0 ) { // case of the whole image
comp_rect = cvRect( 0, 0, size.width, size.height );
color = CV_RGB(255,255,255);
magnitude = 100;
}
else { // i-th motion component
comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
continue;
color = CV_RGB(255,0,0);
magnitude = 30;
}
// select component ROI
cvSetImageROI( silh, comp_rect );
cvSetImageROI( mhi, comp_rect );
cvSetImageROI( orient, comp_rect );
cvSetImageROI( mask, comp_rect );
// calculate orientation
angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
angle = 360.0 - angle; // adjust for images with top-left origin
count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
cvResetImageROI( mhi );
cvResetImageROI( orient );
cvResetImageROI( mask );
cvResetImageROI( silh );
// check for the case of little motion
if( count < comp_rect.width*comp_rect.height * 0.05 )
continue;
// draw a clock with arrow indicating the direction
center = cvPoint( (comp_rect.x + comp_rect.width/2),
(comp_rect.y + comp_rect.height/2) );
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
}
}
int main(int argc, char** argv)
{
IplImage* motion = 0;
CvCapture* capture = 0;
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
else if( argc == 2 )
capture = cvCaptureFromFile( argv[1] );
if( capture )
{
cvNamedWindow( "Motion", 1 );
for(;;)
{
IplImage* image = cvQueryFrame( capture );
if( !image )
break;
if( !motion )
{
motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
cvZero( motion );
motion->origin = image->origin;
}
update_mhi( image, motion, 30 );
cvShowImage( "Motion", motion );
if( cvWaitKey(10) >= 0 )
break;
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Motion" );
}
return 0;
}
#ifdef _EiC
main(1,"motempl.c");
#endif

124
samples/c/mser_sample.cpp Normal file
View File

@@ -0,0 +1,124 @@
/* This sample code was originally provided by Liu Liu
* Copyright<68> 2009, Liu Liu All rights reserved.
*/
#include <iostream>
#include <cstdio>
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
static CvScalar colors[] =
{
{{0,0,255}},
{{0,128,255}},
{{0,255,255}},
{{0,255,0}},
{{255,128,0}},
{{255,255,0}},
{{255,0,0}},
{{255,0,255}},
{{255,255,255}},
{{196,255,255}},
{{255,255,196}}
};
static uchar bcolors[][3] =
{
{0,0,255},
{0,128,255},
{0,255,255},
{0,255,0},
{255,128,0},
{255,255,0},
{255,0,0},
{255,0,255},
{255,255,255}
};
int main( int argc, char** argv )
{
char path[1024];
IplImage* img;
if (argc!=2)
{
strcpy(path,"puzzle.png");
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
if (!img)
{
printf("\nUsage: mser_sample <path_to_image>\n");
return 0;
}
}
else
{
strcpy(path,argv[1]);
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
}
if (!img)
{
printf("Unable to load image %s\n",path);
return 0;
}
IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
IplImage* ellipses = cvCloneImage(rsp);
cvCvtColor(img,ellipses,CV_GRAY2BGR);
CvSeq* contours;
CvMemStorage* storage= cvCreateMemStorage();
IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
double t = (double)cvGetTickCount();
cvExtractMSER( hsv, NULL, &contours, storage, params );
t = cvGetTickCount() - t;
printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
uchar* rsptr = (uchar*)rsp->imageData;
// draw mser with different color
for ( int i = contours->total-1; i >= 0; i-- )
{
CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
for ( int j = 0; j < r->total; j++ )
{
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
}
}
// find ellipse ( it seems cvfitellipse2 have error or sth?
for ( int i = 0; i < contours->total; i++ )
{
CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
CvBox2D box = cvFitEllipse2( r );
box.angle=(float)CV_PI/2-box.angle;
if ( r->color > 0 )
cvEllipseBox( ellipses, box, colors[9], 2 );
else
cvEllipseBox( ellipses, box, colors[2], 2 );
}
cvSaveImage( "rsp.png", rsp );
cvNamedWindow( "original", 0 );
cvShowImage( "original", img );
cvNamedWindow( "response", 0 );
cvShowImage( "response", rsp );
cvNamedWindow( "ellipses", 0 );
cvShowImage( "ellipses", ellipses );
cvWaitKey(0);
cvDestroyWindow( "original" );
cvDestroyWindow( "response" );
cvDestroyWindow( "ellipses" );
cvReleaseImage(&rsp);
cvReleaseImage(&img);
cvReleaseImage(&ellipses);
}

314
samples/c/mushroom.cpp Normal file
View File

@@ -0,0 +1,314 @@
#include "ml.h"
#include <stdio.h>
/*
The sample demonstrates how to build a decision tree for classifying mushrooms.
It uses the sample base agaricus-lepiota.data from UCI Repository, here is the link:
Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).
UCI Repository of machine learning databases
[http://www.ics.uci.edu/~mlearn/MLRepository.html].
Irvine, CA: University of California, Department of Information and Computer Science.
*/
// loads the mushroom database, which is a text file, containing
// one training sample per row, all the input variables and the output variable are categorical,
// the values are encoded by characters.
int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
{
const int M = 1024;
FILE* f = fopen( filename, "rt" );
CvMemStorage* storage;
CvSeq* seq;
char buf[M+2], *ptr;
float* el_ptr;
CvSeqReader reader;
int i, j, var_count = 0;
if( !f )
return 0;
// read the first line and determine the number of variables
if( !fgets( buf, M, f ))
{
fclose(f);
return 0;
}
for( ptr = buf; *ptr != '\0'; ptr++ )
var_count += *ptr == ',';
assert( ptr - buf == (var_count+1)*2 );
// create temporary memory storage to store the whole database
el_ptr = new float[var_count+1];
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof(*seq), (var_count+1)*sizeof(float), storage );
for(;;)
{
for( i = 0; i <= var_count; i++ )
{
int c = buf[i*2];
el_ptr[i] = c == '?' ? -1.f : (float)c;
}
if( i != var_count+1 )
break;
cvSeqPush( seq, el_ptr );
if( !fgets( buf, M, f ) || !strchr( buf, ',' ) )
break;
}
fclose(f);
// allocate the output matrices and copy the base there
*data = cvCreateMat( seq->total, var_count, CV_32F );
*missing = cvCreateMat( seq->total, var_count, CV_8U );
*responses = cvCreateMat( seq->total, 1, CV_32F );
cvStartReadSeq( seq, &reader );
for( i = 0; i < seq->total; i++ )
{
const float* sdata = (float*)reader.ptr + 1;
float* ddata = data[0]->data.fl + var_count*i;
float* dr = responses[0]->data.fl + i;
uchar* dm = missing[0]->data.ptr + var_count*i;
for( j = 0; j < var_count; j++ )
{
ddata[j] = sdata[j];
dm[j] = sdata[j] < 0;
}
*dr = sdata[-1];
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
cvReleaseMemStorage( &storage );
delete el_ptr;
return 1;
}
CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
const CvMat* responses, float p_weight )
{
CvDTree* dtree;
CvMat* var_type;
int i, hr1 = 0, hr2 = 0, p_total = 0;
float priors[] = { 1, p_weight };
var_type = cvCreateMat( data->cols + 1, 1, CV_8U );
cvSet( var_type, cvScalarAll(CV_VAR_CATEGORICAL) ); // all the variables are categorical
dtree = new CvDTree;
dtree->train( data, CV_ROW_SAMPLE, responses, 0, 0, var_type, missing,
CvDTreeParams( 8, // max depth
10, // min sample count
0, // regression accuracy: N/A here
true, // compute surrogate split, as we have missing data
15, // max number of categories (use sub-optimal algorithm for larger numbers)
10, // the number of cross-validation folds
true, // use 1SE rule => smaller tree
true, // throw away the pruned tree branches
priors // the array of priors, the bigger p_weight, the more attention
// to the poisonous mushrooms
// (a mushroom will be judjed to be poisonous with bigger chance)
));
// compute hit-rate on the training database, demonstrates predict usage.
for( i = 0; i < data->rows; i++ )
{
CvMat sample, mask;
cvGetRow( data, &sample, i );
cvGetRow( missing, &mask, i );
double r = dtree->predict( &sample, &mask )->value;
int d = fabs(r - responses->data.fl[i]) >= FLT_EPSILON;
if( d )
{
if( r != 'p' )
hr1++;
else
hr2++;
}
p_total += responses->data.fl[i] == 'p';
}
printf( "Results on the training database:\n"
"\tPoisonous mushrooms mis-predicted: %d (%g%%)\n"
"\tFalse-alarms: %d (%g%%)\n", hr1, (double)hr1*100/p_total,
hr2, (double)hr2*100/(data->rows - p_total) );
cvReleaseMat( &var_type );
return dtree;
}
static const char* var_desc[] =
{
"cap shape (bell=b,conical=c,convex=x,flat=f)",
"cap surface (fibrous=f,grooves=g,scaly=y,smooth=s)",
"cap color (brown=n,buff=b,cinnamon=c,gray=g,green=r,\n\tpink=p,purple=u,red=e,white=w,yellow=y)",
"bruises? (bruises=t,no=f)",
"odor (almond=a,anise=l,creosote=c,fishy=y,foul=f,\n\tmusty=m,none=n,pungent=p,spicy=s)",
"gill attachment (attached=a,descending=d,free=f,notched=n)",
"gill spacing (close=c,crowded=w,distant=d)",
"gill size (broad=b,narrow=n)",
"gill color (black=k,brown=n,buff=b,chocolate=h,gray=g,\n\tgreen=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y)",
"stalk shape (enlarging=e,tapering=t)",
"stalk root (bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r)",
"stalk surface above ring (ibrous=f,scaly=y,silky=k,smooth=s)",
"stalk surface below ring (ibrous=f,scaly=y,silky=k,smooth=s)",
"stalk color above ring (brown=n,buff=b,cinnamon=c,gray=g,orange=o,\n\tpink=p,red=e,white=w,yellow=y)",
"stalk color below ring (brown=n,buff=b,cinnamon=c,gray=g,orange=o,\n\tpink=p,red=e,white=w,yellow=y)",
"veil type (partial=p,universal=u)",
"veil color (brown=n,orange=o,white=w,yellow=y)",
"ring number (none=n,one=o,two=t)",
"ring type (cobwebby=c,evanescent=e,flaring=f,large=l,\n\tnone=n,pendant=p,sheathing=s,zone=z)",
"spore print color (black=k,brown=n,buff=b,chocolate=h,green=r,\n\torange=o,purple=u,white=w,yellow=y)",
"population (abundant=a,clustered=c,numerous=n,\n\tscattered=s,several=v,solitary=y)",
"habitat (grasses=g,leaves=l,meadows=m,paths=p\n\turban=u,waste=w,woods=d)",
0
};
void print_variable_importance( CvDTree* dtree, const char** var_desc )
{
const CvMat* var_importance = dtree->get_var_importance();
int i;
char input[1000];
if( !var_importance )
{
printf( "Error: Variable importance can not be retrieved\n" );
return;
}
printf( "Print variable importance information? (y/n) " );
scanf( "%1s", input );
if( input[0] != 'y' && input[0] != 'Y' )
return;
for( i = 0; i < var_importance->cols*var_importance->rows; i++ )
{
double val = var_importance->data.db[i];
if( var_desc )
{
char buf[100];
int len = strchr( var_desc[i], '(' ) - var_desc[i] - 1;
strncpy( buf, var_desc[i], len );
buf[len] = '\0';
printf( "%s", buf );
}
else
printf( "var #%d", i );
printf( ": %g%%\n", val*100. );
}
}
void interactive_classification( CvDTree* dtree, const char** var_desc )
{
char input[1000];
const CvDTreeNode* root;
CvDTreeTrainData* data;
if( !dtree )
return;
root = dtree->get_root();
data = dtree->get_data();
for(;;)
{
const CvDTreeNode* node;
printf( "Start/Proceed with interactive mushroom classification (y/n): " );
scanf( "%1s", input );
if( input[0] != 'y' && input[0] != 'Y' )
break;
printf( "Enter 1-letter answers, '?' for missing/unknown value...\n" );
// custom version of predict
node = root;
for(;;)
{
CvDTreeSplit* split = node->split;
int dir = 0;
if( !node->left || node->Tn <= dtree->get_pruned_tree_idx() || !node->split )
break;
for( ; split != 0; )
{
int vi = split->var_idx, j;
int count = data->cat_count->data.i[vi];
const int* map = data->cat_map->data.i + data->cat_ofs->data.i[vi];
printf( "%s: ", var_desc[vi] );
scanf( "%1s", input );
if( input[0] == '?' )
{
split = split->next;
continue;
}
// convert the input character to the normalized value of the variable
for( j = 0; j < count; j++ )
if( map[j] == input[0] )
break;
if( j < count )
{
dir = (split->subset[j>>5] & (1 << (j&31))) ? -1 : 1;
if( split->inversed )
dir = -dir;
break;
}
else
printf( "Error: unrecognized value\n" );
}
if( !dir )
{
printf( "Impossible to classify the sample\n");
node = 0;
break;
}
node = dir < 0 ? node->left : node->right;
}
if( node )
printf( "Prediction result: the mushroom is %s\n",
node->class_idx == 0 ? "EDIBLE" : "POISONOUS" );
printf( "\n-----------------------------\n" );
}
}
int main( int argc, char** argv )
{
CvMat *data = 0, *missing = 0, *responses = 0;
CvDTree* dtree;
const char* base_path = argc >= 2 ? argv[1] : "agaricus-lepiota.data";
if( !mushroom_read_database( base_path, &data, &missing, &responses ) )
{
printf( "Unable to load the training database\n"
"Pass it as a parameter: dtree <path to agaricus-lepiota.data>\n" );
return 0;
return -1;
}
dtree = mushroom_create_dtree( data, missing, responses,
10 // poisonous mushrooms will have 10x higher weight in the decision tree
);
cvReleaseMat( &data );
cvReleaseMat( &missing );
cvReleaseMat( &responses );
print_variable_importance( dtree, var_desc );
interactive_classification( dtree, var_desc );
delete dtree;
return 0;
}

View File

@@ -0,0 +1,256 @@
/*
* one_way_sample.cpp
* outlet_detection
*
* Created by Victor Eruhimov on 8/5/09.
* Copyright 2009 Argus Corp. All rights reserved.
*
*/
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <string>
using namespace cv;
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1,
IplImage* img2, const vector<KeyPoint>& features2, const vector<int>& desc_idx);
void generatePCADescriptors(const char* img_path, const char* pca_low_filename, const char* pca_high_filename,
const char* pca_desc_filename, CvSize patch_size);
int main(int argc, char** argv)
{
const char pca_high_filename[] = "pca_hr.yml";
const char pca_low_filename[] = "pca_lr.yml";
const char pca_desc_filename[] = "pca_descriptors.yml";
const CvSize patch_size = cvSize(24, 24);
const int pose_count = 50;
if(argc != 3 && argc != 4)
{
printf("Format: \n./one_way_sample [path_to_samples] [image1] [image2]\n");
printf("For example: ./one_way_sample ../../../opencv/samples/c scene_l.bmp scene_r.bmp\n");
return 0;
}
std::string path_name = argv[1];
std::string img1_name = path_name + "/" + std::string(argv[2]);
std::string img2_name = path_name + "/" + std::string(argv[3]);
CvFileStorage* fs = cvOpenFileStorage("pca_hr.yml", NULL, CV_STORAGE_READ);
if(fs == NULL)
{
printf("PCA data is not found, starting training...\n");
generatePCADescriptors(path_name.c_str(), pca_low_filename, pca_high_filename, pca_desc_filename, patch_size);
}
else
{
cvReleaseFileStorage(&fs);
}
printf("Reading the images...\n");
IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
IplImage* img2 = cvLoadImage(img2_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
// extract keypoints from the first image
vector<KeyPoint> keypoints1;
SURF surf_extractor(5.0e3);
// printf("Extracting keypoints\n");
surf_extractor(img1, Mat(), keypoints1);
printf("Extracted %d keypoints...\n", (int)keypoints1.size());
printf("Training one way descriptors...");
// create descriptors
OneWayDescriptorBase descriptors(patch_size, pose_count, ".", pca_low_filename, pca_high_filename, pca_desc_filename);
descriptors.CreateDescriptorsFromImage(img1, keypoints1);
printf("done\n");
// extract keypoints from the second image
vector<KeyPoint> keypoints2;
surf_extractor(img2, Mat(), keypoints2);
printf("Extracted %d keypoints from the second image...\n", (int)keypoints2.size());
printf("Finding nearest neighbors...");
// find NN for each of keypoints2 in keypoints1
vector<int> desc_idx;
desc_idx.resize(keypoints2.size());
for(size_t i = 0; i < keypoints2.size(); i++)
{
int pose_idx = 0;
float distance = 0;
descriptors.FindDescriptor(img2, keypoints2[i].pt, desc_idx[i], pose_idx, distance);
}
printf("done\n");
IplImage* img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, desc_idx);
cvNamedWindow("correspondences", 1);
cvShowImage("correspondences", img_corr);
cvWaitKey(0);
cvReleaseImage(&img1);
cvReleaseImage(&img2);
cvReleaseImage(&img_corr);
}
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2, const vector<KeyPoint>& features2, const vector<int>& desc_idx)
{
IplImage* img_corr = cvCreateImage(cvSize(img1->width + img2->width, MAX(img1->height, img2->height)), IPL_DEPTH_8U, 3);
cvSetImageROI(img_corr, cvRect(0, 0, img1->width, img1->height));
cvCvtColor(img1, img_corr, CV_GRAY2RGB);
cvSetImageROI(img_corr, cvRect(img1->width, 0, img2->width, img2->height));
cvCvtColor(img2, img_corr, CV_GRAY2RGB);
cvResetImageROI(img_corr);
for(size_t i = 0; i < features1.size(); i++)
{
cvCircle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
}
for(size_t i = 0; i < features2.size(); i++)
{
CvPoint pt = cvPoint(features2[i].pt.x + img1->width, features2[i].pt.y);
cvCircle(img_corr, pt, 3, CV_RGB(255, 0, 0));
cvLine(img_corr, features1[desc_idx[i]].pt, pt, CV_RGB(0, 255, 0));
}
return img_corr;
}
/*
* pca_features
*
*
*/
void savePCAFeatures(const char* filename, CvMat* avg, CvMat* eigenvectors)
{
CvMemStorage* storage = cvCreateMemStorage();
CvFileStorage* fs = cvOpenFileStorage(filename, storage, CV_STORAGE_WRITE);
cvWrite(fs, "avg", avg);
cvWrite(fs, "eigenvectors", eigenvectors);
cvReleaseFileStorage(&fs);
cvReleaseMemStorage(&storage);
}
void calcPCAFeatures(vector<IplImage*>& patches, const char* filename, CvMat** avg, CvMat** eigenvectors)
{
int width = patches[0]->width;
int height = patches[0]->height;
int length = width*height;
int patch_count = (int)patches.size();
CvMat* data = cvCreateMat(patch_count, length, CV_32FC1);
*avg = cvCreateMat(1, length, CV_32FC1);
CvMat* eigenvalues = cvCreateMat(1, length, CV_32FC1);
*eigenvectors = cvCreateMat(length, length, CV_32FC1);
for(int i = 0; i < patch_count; i++)
{
float sum = cvSum(patches[i]).val[0];
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++)
{
*((float*)(data->data.ptr + data->step*i) + y*width + x) = (float)(unsigned char)patches[i]->imageData[y*patches[i]->widthStep + x]/sum;
}
}
}
printf("Calculating PCA...");
cvCalcPCA(data, *avg, eigenvalues, *eigenvectors, CV_PCA_DATA_AS_ROW);
printf("done\n");
// save pca data
savePCAFeatures(filename, *avg, *eigenvectors);
cvReleaseMat(&data);
cvReleaseMat(&eigenvalues);
}
void loadPCAFeatures(const char* path, vector<IplImage*>& patches, CvSize patch_size)
{
const int file_count = 2;
for(int i = 0; i < file_count; i++)
{
char buf[1024];
sprintf(buf, "%s/one_way_train_%04d.jpg", path, i);
printf("Reading image %s...", buf);
IplImage* img = cvLoadImage(buf, CV_LOAD_IMAGE_GRAYSCALE);
printf("done\n");
vector<KeyPoint> features;
SURF surf_extractor(1.0f);
printf("Extracting SURF features...");
surf_extractor(img, Mat(), features);
printf("done\n");
for(int j = 0; j < (int)features.size(); j++)
{
int patch_width = patch_size.width;
int patch_height = patch_size.height;
CvPoint center = features[j].pt;
CvRect roi = cvRect(center.x - patch_width/2, center.y - patch_height/2, patch_width, patch_height);
cvSetImageROI(img, roi);
roi = cvGetImageROI(img);
if(roi.width != patch_width || roi.height != patch_height)
{
continue;
}
IplImage* patch = cvCreateImage(cvSize(patch_width, patch_height), IPL_DEPTH_8U, 1);
cvCopy(img, patch);
patches.push_back(patch);
cvResetImageROI(img);
}
printf("Completed file %d, extracted %d features\n", i, (int)features.size());
cvReleaseImage(&img);
}
}
void generatePCAFeatures(const char* img_filename, const char* pca_filename, CvSize patch_size, CvMat** avg, CvMat** eigenvectors)
{
vector<IplImage*> patches;
loadPCAFeatures(img_filename, patches, patch_size);
calcPCAFeatures(patches, pca_filename, avg, eigenvectors);
}
void generatePCADescriptors(const char* img_path, const char* pca_low_filename, const char* pca_high_filename,
const char* pca_desc_filename, CvSize patch_size)
{
CvMat* avg_hr;
CvMat* eigenvectors_hr;
generatePCAFeatures(img_path, pca_high_filename, patch_size, &avg_hr, &eigenvectors_hr);
CvMat* avg_lr;
CvMat* eigenvectors_lr;
generatePCAFeatures(img_path, pca_low_filename, cvSize(patch_size.width/2, patch_size.height/2),
&avg_lr, &eigenvectors_lr);
const int pose_count = 500;
OneWayDescriptorBase descriptors(patch_size, pose_count);
descriptors.SetPCAHigh(avg_hr, eigenvectors_hr);
descriptors.SetPCALow(avg_lr, eigenvectors_lr);
printf("Calculating %d PCA descriptors (you can grab a coffee, this will take a while)...\n", descriptors.GetPCADimHigh());
descriptors.InitializePoseTransforms();
descriptors.CreatePCADescriptors();
descriptors.SavePCADescriptors(pca_desc_filename);
cvReleaseMat(&avg_hr);
cvReleaseMat(&eigenvectors_hr);
cvReleaseMat(&avg_lr);
cvReleaseMat(&eigenvectors_lr);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

101
samples/c/peopledetect.cpp Normal file
View File

@@ -0,0 +1,101 @@
#include "cvaux.h"
#include "highgui.h"
#include <stdio.h>
#include <string.h>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat img;
FILE* f = 0;
char _filename[1024];
if( argc == 1 )
{
printf("Usage: peopledetect (<image_filename> | <image_list>.txt)\n");
return 0;
}
img = imread(argv[1]);
if( img.data )
{
strcpy(_filename, argv[1]);
}
else
{
f = fopen(argv[1], "rt");
if(!f)
{
fprintf( stderr, "ERROR: the specified file could not be loaded\n");
return -1;
}
}
HOGDescriptor hog;
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
namedWindow("people detector", 1);
for(;;)
{
char* filename = _filename;
if(f)
{
if(!fgets(filename, (int)sizeof(_filename)-2, f))
break;
//while(*filename && isspace(*filename))
// ++filename;
if(filename[0] == '#')
continue;
int l = strlen(filename);
while(l > 0 && isspace(filename[l-1]))
--l;
filename[l] = '\0';
img = imread(filename);
}
printf("%s:\n", filename);
if(!img.data)
continue;
fflush(stdout);
vector<Rect> found, found_filtered;
double t = (double)getTickCount();
// run the detector with default parameters. to get a higher hit-rate
// (and more false alarms, respectively), decrease the hitThreshold and
// groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
int can = img.channels();
hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
t = (double)getTickCount() - t;
printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
size_t i, j;
for( i = 0; i < found.size(); i++ )
{
Rect r = found[i];
for( j = 0; j < found.size(); j++ )
if( j != i && (r & found[j]) == r)
break;
if( j == found.size() )
found_filtered.push_back(r);
}
for( i = 0; i < found_filtered.size(); i++ )
{
Rect r = found_filtered[i];
// the HOG detector returns slightly larger rectangles than the real objects.
// so we slightly shrink the rectangles to get a nicer output.
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.8);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.8);
rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
}
imshow("people detector", img);
int c = waitKey(0) & 255;
if( c == 'q' || c == 'Q' || !f)
break;
}
if(f)
fclose(f);
return 0;
}

BIN
samples/c/pic1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

BIN
samples/c/pic2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 254 KiB

BIN
samples/c/pic3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
samples/c/pic4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

BIN
samples/c/pic5.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
samples/c/pic6.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -0,0 +1,84 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <ctype.h>
#include <stdio.h>
#endif
int main( int argc, char** argv )
{
CvCapture* capture = 0;
IplImage* log_polar_img = 0;
IplImage* lin_polar_img = 0;
IplImage* recovered_img = 0;
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
else if( argc == 2 )
capture = cvCaptureFromAVI( argv[1] );
if( !capture )
{
fprintf(stderr,"Could not initialize capturing...\n");
fprintf(stderr,"Usage: %s <CAMERA_NUMBER> , or \n %s <VIDEO_FILE>\n",argv[0],argv[0]);
return -1;
}
cvNamedWindow( "Linear-Polar", 0 );
cvNamedWindow( "Log-Polar", 0 );
cvNamedWindow( "Recovered image", 0 );
cvMoveWindow( "Linear-Polar", 20,20 );
cvMoveWindow( "Log-Polar", 700,20 );
cvMoveWindow( "Recovered image", 20,700 );
for(;;)
{
IplImage* frame = 0;
frame = cvQueryFrame( capture );
if( !frame )
break;
if( !log_polar_img )
{
log_polar_img = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_8U, frame->nChannels );
lin_polar_img = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_8U, frame->nChannels );
recovered_img = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_8U, frame->nChannels );
}
cvLogPolar(frame,log_polar_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS);
cvLinearPolar(frame,lin_polar_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS);
#if 0
cvLogPolar(log_polar_img,recovered_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_WARP_INVERSE_MAP+CV_INTER_LINEAR);
#else
cvLinearPolar(lin_polar_img,recovered_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_WARP_INVERSE_MAP+CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS);
#endif
cvShowImage("Log-Polar", log_polar_img );
cvShowImage("Linear-Polar", lin_polar_img );
cvShowImage("Recovered image", recovered_img );
if( cvWaitKey(10) >= 0 )
break;
}
cvReleaseCapture( &capture );
cvDestroyWindow("Linear-Polar");
cvDestroyWindow("Log-Polar");
cvDestroyWindow("Recovered image");
return 0;
}
#ifdef _EiC
main(1,"laplace.c");
#endif

BIN
samples/c/puzzle.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 631 KiB

View File

@@ -0,0 +1,102 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <math.h>
#endif
IplImage* image[2] = { 0, 0 }, *image0 = 0, *image1 = 0;
CvSize size;
int w0, h0,i;
int threshold1, threshold2;
int l,level = 4;
int sthreshold1, sthreshold2;
int l_comp;
int block_size = 1000;
float parameter;
double threshold;
double rezult, min_rezult;
int filter = CV_GAUSSIAN_5x5;
CvConnectedComp *cur_comp, min_comp;
CvSeq *comp;
CvMemStorage *storage;
CvPoint pt1, pt2;
void ON_SEGMENT(int a)
{
cvPyrSegmentation(image0, image1, storage, &comp,
level, threshold1+1, threshold2+1);
/*l_comp = comp->total;
i = 0;
min_comp.value = cvScalarAll(0);
while(i<l_comp)
{
cur_comp = (CvConnectedComp*)cvGetSeqElem ( comp, i );
if(fabs(255- min_comp.value.val[0])>
fabs(255- cur_comp->value.val[0]) &&
fabs(min_comp.value.val[1])>
fabs(cur_comp->value.val[1]) &&
fabs(min_comp.value.val[2])>
fabs(cur_comp->value.val[2]) )
min_comp = *cur_comp;
i++;
}*/
cvShowImage("Segmentation", image1);
}
int main( int argc, char** argv )
{
char* filename = argc == 2 ? argv[1] : (char*)"fruits.jpg";
if( (image[0] = cvLoadImage( filename, 1)) == 0 )
return -1;
cvNamedWindow("Source", 0);
cvShowImage("Source", image[0]);
cvNamedWindow("Segmentation", 0);
storage = cvCreateMemStorage ( block_size );
image[0]->width &= -(1<<level);
image[0]->height &= -(1<<level);
image0 = cvCloneImage( image[0] );
image1 = cvCloneImage( image[0] );
// segmentation of the color image
l = 1;
threshold1 =255;
threshold2 =30;
ON_SEGMENT(1);
sthreshold1 = cvCreateTrackbar("Threshold1", "Segmentation", &threshold1, 255, ON_SEGMENT);
sthreshold2 = cvCreateTrackbar("Threshold2", "Segmentation", &threshold2, 255, ON_SEGMENT);
cvShowImage("Segmentation", image1);
cvWaitKey(0);
cvDestroyWindow("Segmentation");
cvDestroyWindow("Source");
cvReleaseMemStorage(&storage );
cvReleaseImage(&image[0]);
cvReleaseImage(&image0);
cvReleaseImage(&image1);
return 0;
}
#ifdef _EiC
main(1,"pyramid_segmentation.c");
#endif

BIN
samples/c/right01.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

BIN
samples/c/right02.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
samples/c/right03.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/right04.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

BIN
samples/c/right05.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

BIN
samples/c/right06.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
samples/c/right07.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/right08.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
samples/c/right09.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
samples/c/right11.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
samples/c/right12.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

BIN
samples/c/right13.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
samples/c/right14.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
samples/c/scene_l.bmp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

BIN
samples/c/scene_r.bmp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

226
samples/c/squares.c Normal file
View File

@@ -0,0 +1,226 @@
//
// The full "Square Detector" program.
// It loads several images subsequentally and tries to find squares in
// each image
//
#ifdef _CH_
#pragma package <opencv>
#endif
#define CV_NO_BACKWARD_COMPATIBILITY
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <math.h>
#include <string.h>
int thresh = 50;
IplImage* img = 0;
IplImage* img0 = 0;
CvMemStorage* storage = 0;
const char* wndname = "Square Detection Demo";
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage )
{
CvSeq* contours;
int i, c, l, N = 11;
CvSize sz = cvSize( img->width & -2, img->height & -2 );
IplImage* timg = cvCloneImage( img ); // make a copy of input image
IplImage* gray = cvCreateImage( sz, 8, 1 );
IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
IplImage* tgray;
CvSeq* result;
double s, t;
// create empty sequence that will contain points -
// 4 points per square (the square's vertices)
CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );
// select the maximum ROI in the image
// with the width and height divisible by 2
cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));
// down-scale and upscale the image to filter out the noise
cvPyrDown( timg, pyr, 7 );
cvPyrUp( pyr, timg, 7 );
tgray = cvCreateImage( sz, 8, 1 );
// find squares in every color plane of the image
for( c = 0; c < 3; c++ )
{
// extract the c-th color plane
cvSetImageCOI( timg, c+1 );
cvCopy( timg, tgray, 0 );
// try several threshold levels
for( l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
cvCanny( tgray, gray, 0, thresh, 5 );
// dilate canny output to remove potential
// holes between edge segments
cvDilate( gray, gray, 0, 1 );
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
}
// find contours and store them all as a list
cvFindContours( gray, storage, &contours, sizeof(CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
// test each contour
while( contours )
{
// approximate contour with accuracy proportional
// to the contour perimeter
result = cvApproxPoly( contours, sizeof(CvContour), storage,
CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( result->total == 4 &&
cvContourArea(result,CV_WHOLE_SEQ,0) > 1000 &&
cvCheckContourConvexity(result) )
{
s = 0;
for( i = 0; i < 5; i++ )
{
// find minimum angle between joint
// edges (maximum of cosine)
if( i >= 2 )
{
t = fabs(angle(
(CvPoint*)cvGetSeqElem( result, i ),
(CvPoint*)cvGetSeqElem( result, i-2 ),
(CvPoint*)cvGetSeqElem( result, i-1 )));
s = s > t ? s : t;
}
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( s < 0.3 )
for( i = 0; i < 4; i++ )
cvSeqPush( squares,
(CvPoint*)cvGetSeqElem( result, i ));
}
// take the next contour
contours = contours->h_next;
}
}
}
// release all the temporary images
cvReleaseImage( &gray );
cvReleaseImage( &pyr );
cvReleaseImage( &tgray );
cvReleaseImage( &timg );
return squares;
}
// the function draws all the squares in the image
void drawSquares( IplImage* img, CvSeq* squares )
{
CvSeqReader reader;
IplImage* cpy = cvCloneImage( img );
int i;
// initialize reader of the sequence
cvStartReadSeq( squares, &reader, 0 );
// read 4 sequence elements at a time (all vertices of a square)
for( i = 0; i < squares->total; i += 4 )
{
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
CV_READ_SEQ_ELEM( pt[0], reader );
CV_READ_SEQ_ELEM( pt[1], reader );
CV_READ_SEQ_ELEM( pt[2], reader );
CV_READ_SEQ_ELEM( pt[3], reader );
// draw the square as a closed polyline
cvPolyLine( cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 3, CV_AA, 0 );
}
// show the resultant image
cvShowImage( wndname, cpy );
cvReleaseImage( &cpy );
}
char* names[] = { "pic1.png", "pic2.png", "pic3.png",
"pic4.png", "pic5.png", "pic6.png", 0 };
int main(int argc, char** argv)
{
int i, c;
// create memory storage that will contain all the dynamic data
storage = cvCreateMemStorage(0);
for( i = 0; names[i] != 0; i++ )
{
// load i-th image
img0 = cvLoadImage( names[i], 1 );
if( !img0 )
{
printf("Couldn't load %s\n", names[i] );
continue;
}
img = cvCloneImage( img0 );
// create window and a trackbar (slider) with parent "image" and set callback
// (the slider regulates upper threshold, passed to Canny edge detector)
cvNamedWindow( wndname, 1 );
// find and draw the squares
drawSquares( img, findSquares4( img, storage ) );
// wait for key.
// Also the function cvWaitKey takes care of event processing
c = cvWaitKey(0);
// release both images
cvReleaseImage( &img );
cvReleaseImage( &img0 );
// clear memory storage - reset free space position
cvClearMemStorage( storage );
if( (char)c == 27 )
break;
}
cvDestroyWindow( wndname );
return 0;
}

389
samples/c/stereo_calib.cpp Normal file
View File

@@ -0,0 +1,389 @@
/* This is sample from the OpenCV book. The copyright notice is below */
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at:
http://sourceforge.net/projects/opencvlibrary/
* The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back):
http://opencvlibrary.sourceforge.net/
* An active user group is at:
http://tech.groups.yahoo.com/group/OpenCV/
* The minutes of weekly OpenCV development meetings are at:
http://pr.willowgarage.com/wiki/OpenCV
************************************************** */
#undef _GLIBCXX_DEBUG
#include "cv.h"
#include "cxmisc.h"
#include "highgui.h"
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <ctype.h>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* path, const char* imageList, int useUncalibrated)
{
CvRect roi1, roi2;
int nx = 0, ny = 0;
int displayCorners = 1;
int showUndistorted = 1;
bool isVerticalStereo = false;//OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = 1.f; //Set this to your actual square size
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes = 0, n, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<CvPoint2D32f> temp_points[2];
vector<int> npoints;
// vector<uchar> active[2];
int is_found[2] = {0, 0};
vector<CvPoint2D32f> temp;
CvSize imageSize = {0,0};
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
double Q[4][4];
CvMat _M1 = cvMat(3, 3, CV_64F, M1 );
CvMat _M2 = cvMat(3, 3, CV_64F, M2 );
CvMat _D1 = cvMat(1, 5, CV_64F, D1 );
CvMat _D2 = cvMat(1, 5, CV_64F, D2 );
CvMat matR = cvMat(3, 3, CV_64F, R );
CvMat matT = cvMat(3, 1, CV_64F, T );
CvMat matE = cvMat(3, 3, CV_64F, E );
CvMat matF = cvMat(3, 3, CV_64F, F );
CvMat matQ = cvMat(4, 4, CV_64FC1, Q);
char buf[1024];
if( displayCorners )
cvNamedWindow( "corners", 1 );
// READ IN THE LIST OF CHESSBOARDS:
if( !f )
{
fprintf(stderr, "can not open file %s\n", imageList );
return;
}
if( !fgets(buf, sizeof(buf)-3, f) || sscanf(buf, "%d%d", &nx, &ny) != 2 )
return;
n = nx*ny;
temp.resize(n);
temp_points[0].resize(n);
temp_points[1].resize(n);
for(i=0;;i++)
{
int count = 0, result=0;
lr = i % 2;
vector<CvPoint2D32f>& pts = temp_points[lr];//points[lr];
if( !fgets( buf, sizeof(buf)-3, f ))
break;
size_t len = strlen(buf);
while( len > 0 && isspace(buf[len-1]))
buf[--len] = '\0';
if( buf[0] == '#')
continue;
char fullpath[1024];
sprintf(fullpath, "%s/%s", path, buf);
IplImage* img = cvLoadImage( fullpath, 0 );
if( !img )
{
printf("Cannot read file %s\n", fullpath);
return;
}
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for( int s = 1; s <= maxScale; s++ )
{
IplImage* timg = img;
if( s > 1 )
{
timg = cvCreateImage(cvSize(img->width*s,img->height*s),
img->depth, img->nChannels );
cvResize( img, timg, CV_INTER_CUBIC );
}
result = cvFindChessboardCorners( timg, cvSize(nx, ny),
&temp[0], &count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE);
if( timg != img )
cvReleaseImage( &timg );
if( result || s == maxScale )
for( j = 0; j < count; j++ )
{
temp[j].x /= s;
temp[j].y /= s;
}
if( result )
break;
}
if( displayCorners )
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage( imageSize, 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
cvDrawChessboardCorners( cimg, cvSize(nx, ny), &temp[0],
count, result );
IplImage* cimg1 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
cvResize(cimg, cimg1);
cvShowImage( "corners", cimg1 );
cvReleaseImage( &cimg );
cvReleaseImage( &cimg1 );
int c = cvWaitKey(1000);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
//N = pts.size();
//pts.resize(N + n, cvPoint2D32f(0,0));
//active[lr].push_back((uchar)result);
is_found[lr] = result > 0 ? 1 : 0;
//assert( result != 0 );
if( result )
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix( img, &temp[0], count,
cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
30, 0.01) );
copy( temp.begin(), temp.end(), pts.begin() );
}
cvReleaseImage( &img );
if(lr)
{
if(is_found[0] == 1 && is_found[1] == 1)
{
assert(temp_points[0].size() == temp_points[1].size());
int current_size = points[0].size();
points[0].resize(current_size + temp_points[0].size(), cvPoint2D32f(0.0, 0.0));
points[1].resize(current_size + temp_points[1].size(), cvPoint2D32f(0.0, 0.0));
copy(temp_points[0].begin(), temp_points[0].end(), points[0].begin() + current_size);
copy(temp_points[1].begin(), temp_points[1].end(), points[1].begin() + current_size);
nframes++;
printf("Pair successfully detected...\n");
}
is_found[0] = 0;
is_found[1] = 0;
}
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
objectPoints.resize(nframes*n);
for( i = 0; i < ny; i++ )
for( j = 0; j < nx; j++ )
objectPoints[i*nx + j] =
cvPoint3D32f(i*squareSize, j*squareSize, 0);
for( i = 1; i < nframes; i++ )
copy( objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n );
npoints.resize(nframes,n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate( &_objectPoints, &_imagePoints1,
&_imagePoints2, &_npoints,
&_M1, &_D1, &_M2, &_D2,
imageSize, &matR, &matT, &matE, &matF,
cvTermCriteria(CV_TERMCRIT_ITER+
CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_FIX_K3);
printf(" done\n");
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints( &_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1 );
cvUndistortPoints( &_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2 );
cvComputeCorrespondEpilines( &_imagePoints1, 1, &matF, &_L1 );
cvComputeCorrespondEpilines( &_imagePoints2, 2, &matF, &_L2 );
double avgErr = 0;
for( i = 0; i < N; i++ )
{
double err = fabs(points[0][i].x*lines[1][i].x +
points[0][i].y*lines[1][i].y + lines[1][i].z)
+ fabs(points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z);
avgErr += err;
}
printf( "avg err = %g\n", avgErr/(nframes*n) );
// save intrinsic parameters
CvFileStorage* fstorage = cvOpenFileStorage("intrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(fstorage, "M1", &_M1);
cvWrite(fstorage, "D1", &_D1);
cvWrite(fstorage, "M2", &_M2);
cvWrite(fstorage, "D2", &_D2);
cvReleaseFileStorage(&fstorage);
//COMPUTE AND DISPLAY RECTIFICATION
if( showUndistorted )
{
CvMat* mx1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* mx2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* img1r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* img2r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* disp = cvCreateMat( imageSize.height,
imageSize.width, CV_16S );
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useUncalibrated == 0 )
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify( &_M1, &_M2, &_D1, &_D2, imageSize,
&matR, &matT,
&_R1, &_R2, &_P1, &_P2, &matQ,
CV_CALIB_ZERO_DISPARITY,
1, imageSize, &roi1, &roi2);
CvFileStorage* file = cvOpenFileStorage("extrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(file, "R", &matR);
cvWrite(file, "T", &matT);
cvWrite(file, "R1", &_R1);
cvWrite(file, "R2", &_R2);
cvWrite(file, "P1", &_P1);
cvWrite(file, "P2", &_P2);
cvWrite(file, "Q", &matQ);
cvReleaseFileStorage(&file);
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
if(!isVerticalStereo)
roi2.x += imageSize.width;
else
roi2.y += imageSize.height;
//Precompute maps for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_P1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_P2,mx2,my2);
}
//OR ELSE HARTLEY'S METHOD
else if( useUncalibrated == 1 || useUncalibrated == 2 )
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
//Just to show you could have independently used F
if( useUncalibrated == 2 )
cvFindFundamentalMat( &_imagePoints1,
&_imagePoints2, &matF);
cvStereoRectifyUncalibrated( &_imagePoints1,
&_imagePoints2, &matF,
imageSize,
&_H1, &_H2, 3);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D1,&_R2,&_M2,mx2,my2);
}
else
assert(0);
cvReleaseMat( &mx1 );
cvReleaseMat( &my1 );
cvReleaseMat( &mx2 );
cvReleaseMat( &my2 );
cvReleaseMat( &img1r );
cvReleaseMat( &img2r );
cvReleaseMat( &disp );
}
}
int main(int argc, char** argv)
{
if(argc > 1 && !strcmp(argv[1], "--help"))
{
printf("Usage:\n ./stereo_calib <path to images> <file wtih image list>\n");
return 0;
}
StereoCalib(argc > 1 ? argv[1] : ".", argc > 2 ? argv[2] : "stereo_calib.txt", 0);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More