Normalize line endings and whitespace

This commit is contained in:
OpenCV Buildbot
2012-10-17 11:12:04 +04:00
committed by Andrey Kamaev
parent 0442bca235
commit 81f826db2b
1511 changed files with 258678 additions and 258624 deletions

View File

@@ -1,12 +1,12 @@
#http://www.cmake.org/cmake/help/cmake2.6docs.html
cmake_minimum_required (VERSION 2.6)
project (OpenGL_Qt_Binding)
FIND_PACKAGE( OpenCV REQUIRED )
find_package (OpenGL REQUIRED)
ADD_EXECUTABLE(OpenGL_Qt_Binding main.cpp)
TARGET_LINK_LIBRARIES(OpenGL_Qt_Binding ${OpenCV_LIBS} ${OPENGL_LIBRARIES} )
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cube4.avi ${CMAKE_CURRENT_BINARY_DIR}/cube4.avi COPYONLY)
#http://www.cmake.org/cmake/help/cmake2.6docs.html
cmake_minimum_required (VERSION 2.6)
project (OpenGL_Qt_Binding)
FIND_PACKAGE( OpenCV REQUIRED )
find_package (OpenGL REQUIRED)
ADD_EXECUTABLE(OpenGL_Qt_Binding main.cpp)
TARGET_LINK_LIBRARIES(OpenGL_Qt_Binding ${OpenCV_LIBS} ${OPENGL_LIBRARIES} )
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cube4.avi ${CMAKE_CURRENT_BINARY_DIR}/cube4.avi COPYONLY)

View File

@@ -1,261 +1,261 @@
//Yannick Verdie 2010
//--- Please read help() below: ---
#include <iostream>
#include <vector>
#include <opencv/highgui.h>
#include <GL/gl.h>
#include <opencv/cxcore.h>
#include <opencv/cv.h>
using namespace std;
using namespace cv;
void help()
{
cout << "\nThis demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
" and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
"It works off of the video: cube4.avi\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n\n"
" 1). This demo is mainly based on work from Javier Barandiaran Martirena\n"
" See this page http://opencv.willowgarage.com/wiki/Posit.\n"
" 2). This is a demo to illustrate how to use **OpenGL Callback**.\n"
" 3). You need Qt binding to compile this sample with OpenGL support enabled.\n"
" 4). The features' detection is very basic and could highly be improved \n"
" (basic thresholding tuned for the specific video) but 2).\n"
" 5) THANKS TO Google Summer of Code 2010 for supporting this work!\n" << endl;
}
#define FOCAL_LENGTH 600
#define CUBE_SIZE 10
void renderCube(float size)
{
glBegin(GL_QUADS);
// Front Face
glNormal3f( 0.0f, 0.0f, 1.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
// Back Face
glNormal3f( 0.0f, 0.0f,-1.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( 0.0f, size, size);
glVertex3f( size, size, size);
glVertex3f( size, 0.0f, size);
// Top Face
glNormal3f( 0.0f, 1.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( size, size, size);
glVertex3f( 0.0f, size, size);
// Bottom Face
glNormal3f( 0.0f,-1.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( size, 0.0f, size);
glVertex3f( size, 0.0f, 0.0f);
// Right face
glNormal3f( 1.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, 0.0f, size);
glVertex3f( size, size, size);
glVertex3f( size, size, 0.0f);
// Left Face
glNormal3f(-1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( 0.0f, size, size);
glVertex3f( 0.0f, 0.0f, size);
glEnd();
}
void on_opengl(void* param)
{
//Draw the object with the estimated pose
glLoadIdentity();
glScalef( 1.0f, 1.0f, -1.0f);
glMultMatrixf( (float*)param );
glEnable( GL_LIGHTING );
glEnable( GL_LIGHT0 );
glEnable( GL_BLEND );
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
renderCube( CUBE_SIZE );
glDisable(GL_BLEND);
glDisable( GL_LIGHTING );
}
void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
{
//Create the model pointss
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source, IplImage* grayImage)
{
cvCvtColor(source,grayImage,CV_RGB2GRAY);
cvSmooth( grayImage, grayImage,CV_GAUSSIAN,11);
cvNormalize(grayImage, grayImage, 0, 255, CV_MINMAX);
cvThreshold( grayImage, grayImage, 26, 255, CV_THRESH_BINARY_INV);//25
Mat MgrayImage = grayImage;
//For debug
//MgrayImage = MgrayImage.clone();//deep copy
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
if (contours.size() == srcImagePoints_temp.size())
{
for(int i = 0 ; i<contours.size(); i++ )
{
p.x = p.y = 0;
for(int j = 0 ; j<contours[i].size(); j++ )
p+=contours[i][j];
srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());
}
//Need to keep the same order
//> y = 0
//> x = 1
//< x = 2
//< y = 3
//get point 0;
int index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(0) = srcImagePoints_temp.at(index);
//get point 1;
index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(1) = srcImagePoints_temp.at(index);
//get point 2;
index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(2) = srcImagePoints_temp.at(index);
//get point 3;
index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
Mat Msource = source;
stringstream ss;
for(int i = 0 ; i<srcImagePoints_temp.size(); i++ )
{
ss<<i;
circle(Msource,srcImagePoints->at(i),5,CV_RGB(255,0,0));
putText( Msource, ss.str(), srcImagePoints->at(i),CV_FONT_HERSHEY_SIMPLEX,1,CV_RGB(255,0,0));
ss.str("");
//new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source->width/2,source->height/2-srcImagePoints_temp.at(i).y);
}
}
}
void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)
{
//coordinate system returned is relative to the first 3D input point
for (int f=0; f<3; f++)
{
for (int c=0; c<3; c++)
{
posePOSIT[c*4+f] = rotationMatrix[f*3+c]; //transposed
}
}
posePOSIT[3] = 0.0;
posePOSIT[7] = 0.0;
posePOSIT[11] = 0.0;
posePOSIT[12] = translationVector[0];
posePOSIT[13] = translationVector[1];
posePOSIT[14] = translationVector[2];
posePOSIT[15] = 1.0;
}
int main(int argc, char *argv[])
{
CvCapture* video = cvCaptureFromFile("cube4.avi");
CV_Assert(video);
IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3);
IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1);
cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
//For debug
//cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
cvCreateOpenGLCallback("POSIT",on_opengl,OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
//Create the POSIT object with the model points
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );
CvMatr32f rotation_matrix = new float[9];
CvVect32f translation_vector = new float[3];
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
while(cvWaitKey(33) != 27)
{
source=cvQueryFrame(video);
cvShowImage("original",source);
foundCorners(&srcImagePoints,source,grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
cvShowImage("POSIT",source);
//For debug
//cvShowImage("tempGray",grayImage);
if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)
cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0);
}
cvDestroyAllWindows();
cvReleaseImage(&grayImage);
cvReleaseCapture(&video);
cvReleasePOSITObject(&positObject);
return 0;
}
//Yannick Verdie 2010
//--- Please read help() below: ---
#include <iostream>
#include <vector>
#include <opencv/highgui.h>
#include <GL/gl.h>
#include <opencv/cxcore.h>
#include <opencv/cv.h>
using namespace std;
using namespace cv;
void help()
{
cout << "\nThis demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
" and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
"It works off of the video: cube4.avi\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n\n"
" 1). This demo is mainly based on work from Javier Barandiaran Martirena\n"
" See this page http://opencv.willowgarage.com/wiki/Posit.\n"
" 2). This is a demo to illustrate how to use **OpenGL Callback**.\n"
" 3). You need Qt binding to compile this sample with OpenGL support enabled.\n"
" 4). The features' detection is very basic and could highly be improved \n"
" (basic thresholding tuned for the specific video) but 2).\n"
" 5) THANKS TO Google Summer of Code 2010 for supporting this work!\n" << endl;
}
#define FOCAL_LENGTH 600
#define CUBE_SIZE 10
void renderCube(float size)
{
glBegin(GL_QUADS);
// Front Face
glNormal3f( 0.0f, 0.0f, 1.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
// Back Face
glNormal3f( 0.0f, 0.0f,-1.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( 0.0f, size, size);
glVertex3f( size, size, size);
glVertex3f( size, 0.0f, size);
// Top Face
glNormal3f( 0.0f, 1.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( size, size, size);
glVertex3f( 0.0f, size, size);
// Bottom Face
glNormal3f( 0.0f,-1.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( size, 0.0f, size);
glVertex3f( size, 0.0f, 0.0f);
// Right face
glNormal3f( 1.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, 0.0f, size);
glVertex3f( size, size, size);
glVertex3f( size, size, 0.0f);
// Left Face
glNormal3f(-1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( 0.0f, size, size);
glVertex3f( 0.0f, 0.0f, size);
glEnd();
}
void on_opengl(void* param)
{
//Draw the object with the estimated pose
glLoadIdentity();
glScalef( 1.0f, 1.0f, -1.0f);
glMultMatrixf( (float*)param );
glEnable( GL_LIGHTING );
glEnable( GL_LIGHT0 );
glEnable( GL_BLEND );
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
renderCube( CUBE_SIZE );
glDisable(GL_BLEND);
glDisable( GL_LIGHTING );
}
void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
{
//Create the model pointss
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source, IplImage* grayImage)
{
cvCvtColor(source,grayImage,CV_RGB2GRAY);
cvSmooth( grayImage, grayImage,CV_GAUSSIAN,11);
cvNormalize(grayImage, grayImage, 0, 255, CV_MINMAX);
cvThreshold( grayImage, grayImage, 26, 255, CV_THRESH_BINARY_INV);//25
Mat MgrayImage = grayImage;
//For debug
//MgrayImage = MgrayImage.clone();//deep copy
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
if (contours.size() == srcImagePoints_temp.size())
{
for(int i = 0 ; i<contours.size(); i++ )
{
p.x = p.y = 0;
for(int j = 0 ; j<contours[i].size(); j++ )
p+=contours[i][j];
srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());
}
//Need to keep the same order
//> y = 0
//> x = 1
//< x = 2
//< y = 3
//get point 0;
int index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(0) = srcImagePoints_temp.at(index);
//get point 1;
index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(1) = srcImagePoints_temp.at(index);
//get point 2;
index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(2) = srcImagePoints_temp.at(index);
//get point 3;
index = 0;
for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
Mat Msource = source;
stringstream ss;
for(int i = 0 ; i<srcImagePoints_temp.size(); i++ )
{
ss<<i;
circle(Msource,srcImagePoints->at(i),5,CV_RGB(255,0,0));
putText( Msource, ss.str(), srcImagePoints->at(i),CV_FONT_HERSHEY_SIMPLEX,1,CV_RGB(255,0,0));
ss.str("");
//new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source->width/2,source->height/2-srcImagePoints_temp.at(i).y);
}
}
}
void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)
{
//coordinate system returned is relative to the first 3D input point
for (int f=0; f<3; f++)
{
for (int c=0; c<3; c++)
{
posePOSIT[c*4+f] = rotationMatrix[f*3+c]; //transposed
}
}
posePOSIT[3] = 0.0;
posePOSIT[7] = 0.0;
posePOSIT[11] = 0.0;
posePOSIT[12] = translationVector[0];
posePOSIT[13] = translationVector[1];
posePOSIT[14] = translationVector[2];
posePOSIT[15] = 1.0;
}
int main(int argc, char *argv[])
{
CvCapture* video = cvCaptureFromFile("cube4.avi");
CV_Assert(video);
IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3);
IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1);
cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
//For debug
//cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
cvCreateOpenGLCallback("POSIT",on_opengl,OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
//Create the POSIT object with the model points
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );
CvMatr32f rotation_matrix = new float[9];
CvVect32f translation_vector = new float[3];
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
while(cvWaitKey(33) != 27)
{
source=cvQueryFrame(video);
cvShowImage("original",source);
foundCorners(&srcImagePoints,source,grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
cvShowImage("POSIT",source);
//For debug
//cvShowImage("tempGray",grayImage);
if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)
cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0);
}
cvDestroyAllWindows();
cvReleaseImage(&grayImage);
cvReleaseCapture(&video);
cvReleasePOSITObject(&positObject);
return 0;
}

View File

@@ -31,10 +31,10 @@ const string plotsDir = "/plots";
static void help(char** argv)
{
cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
<< "It shows how to use detectors, descriptors and recognition methods \n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< "Call: \n"
cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
<< "It shows how to use detectors, descriptors and recognition methods \n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< "Call: \n"
<< "Format:\n ./" << argv[0] << " [VOC path] [result directory] \n"
<< " or: \n"
<< " ./" << argv[0] << " [VOC path] [result directory] [feature detector] [descriptor extractor] [descriptor matcher] \n"
@@ -2514,7 +2514,7 @@ int main(int argc, char** argv)
{
if( argc != 3 && argc != 6 )
{
help(argv);
help(argv);
return -1;
}

View File

@@ -16,7 +16,7 @@ static void help()
" ./bgfg_segm [--camera]=<use camera, if this key is present>, [--file_name]=<path to movie file> \n\n");
}
const char* keys =
const char* keys =
{
"{c camera | | use camera or not}"
"{fn file_name|tree.avi | movie file }"
@@ -25,18 +25,18 @@ const char* keys =
//this is a sample for foreground detection functions
int main(int argc, const char** argv)
{
help();
help();
CommandLineParser parser(argc, argv, keys);
CommandLineParser parser(argc, argv, keys);
bool useCamera = parser.has("camera");
string file = parser.get<string>("file_name");
string file = parser.get<string>("file_name");
VideoCapture cap;
bool update_bg_model = true;
if( useCamera )
cap.open(0);
else
cap.open(file.c_str());
cap.open(file.c_str());
parser.printMessage();
@@ -45,25 +45,25 @@ int main(int argc, const char** argv)
printf("can not open camera or video file\n");
return -1;
}
namedWindow("image", CV_WINDOW_NORMAL);
namedWindow("foreground mask", CV_WINDOW_NORMAL);
namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5);
Mat img, fgmask, fgimg;
for(;;)
{
cap >> img;
if( img.empty() )
break;
//cvtColor(_img, img, COLOR_BGR2GRAY);
if( fgimg.empty() )
fgimg.create(img.size(), img.type());
@@ -88,9 +88,9 @@ int main(int argc, const char** argv)
{
update_bg_model = !update_bg_model;
if(update_bg_model)
printf("Background update is on\n");
printf("Background update is on\n");
else
printf("Background update is off\n");
printf("Background update is off\n");
}
}

View File

@@ -51,7 +51,7 @@ static void help()
"box.png box_in_scene.png " << endl;
}
const char* keys =
const char* keys =
{
"{@first_image | box.png | the first image}"
"{@second_image | box_in_scene.png | the second image}"

View File

@@ -8,13 +8,13 @@ using namespace std;
static void help()
{
cout
<< "\nThis program illustrates the use of findContours and drawContours\n"
<< "The original image is put up along with the image of drawn contours\n"
<< "Usage:\n"
<< "./contours2\n"
<< "\nA trackbar is put up which controls the contour level from -3 to 3\n"
<< endl;
cout
<< "\nThis program illustrates the use of findContours and drawContours\n"
<< "The original image is put up along with the image of drawn contours\n"
<< "Usage:\n"
<< "./contours2\n"
<< "\nA trackbar is put up which controls the contour level from -3 to 3\n"
<< endl;
}
const int w = 500;
@@ -25,7 +25,7 @@ vector<Vec4i> hierarchy;
static void on_trackbar(int, void*)
{
Mat cnt_img = Mat::zeros(w, w, CV_8UC3);
Mat cnt_img = Mat::zeros(w, w, CV_8UC3);
int _levels = levels - 3;
drawContours( cnt_img, contours, _levels <= 0 ? 3 : -1, Scalar(128,255,255),
3, CV_AA, hierarchy, std::abs(_levels) );
@@ -38,8 +38,8 @@ int main( int argc, char**)
Mat img = Mat::zeros(w, w, CV_8UC1);
if(argc > 1)
{
help();
return -1;
help();
return -1;
}
//Draw 6 faces
for( int i = 0; i < 6; i++ )

View File

@@ -233,7 +233,7 @@ int main(int argc, char** argv)
{
if( argc != 7 && argc != 8 )
{
help(argv);
help(argv);
return -1;
}
@@ -256,7 +256,7 @@ int main(int argc, char** argv)
cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
return -1;
}
cout << "< Reading the images..." << endl;
Mat img1 = imread( argv[5] ), img2;
if( !isWarpPerspective )

View File

@@ -59,40 +59,40 @@ using namespace cv;
The algorithm:
for each tested combination of detector+descriptor+matcher:
create detector, descriptor and matcher,
load their params if they are there, otherwise use the default ones and save them
for each dataset:
load reference image
detect keypoints in it, compute descriptors
for each transformed image:
load the image
load the transformation matrix
detect keypoints in it too, compute descriptors
find matches
transform keypoints from the first image using the ground-truth matrix
compute the number of matched keypoints, i.e. for each pair (i,j) found by a matcher compare
j-th keypoint from the second image with the transformed i-th keypoint. If they are close, +1.
so, we have:
N - number of keypoints in the first image that are also visible
(after transformation) on the second image
N1 - number of keypoints in the first image that have been matched.
n - number of the correct matches found by the matcher
n/N1 - precision
n/N - recall (?)
we store (N, n/N1, n/N) (where N is stored primarily for tuning the detector's thresholds,
in order to semi-equalize their keypoints counts)
*/
typedef Vec3f TVec; // (N, n/N1, n/N) - see above
@@ -138,7 +138,7 @@ static void transformKeypoints( const vector<KeyPoint>& kp,
size_t i, n = kp.size();
contours.resize(n);
vector<Point> temp;
for( i = 0; i < n; i++ )
{
ellipse2Poly(Point2f(kp[i].pt.x*scale, kp[i].pt.y*scale),
@@ -157,7 +157,7 @@ static TVec proccessMatches( Size imgsize,
double overlapThreshold )
{
const double visibilityThreshold = 0.6;
// 1. [preprocessing] find bounding rect for each element of kp1t_contours and kp_contours.
// 2. [cross-check] for each DMatch (iK, i1)
// update best_match[i1] using DMatch::distance.
@@ -167,18 +167,18 @@ static TVec proccessMatches( Size imgsize,
// if best_match[i1] is initialized, increment N1
// if kp_contours[best_match[i1]] and kp1t_contours[i1] overlap by overlapThreshold*100%,
// increment n. Use bounding rects to speedup this step
int i, size1 = (int)kp1t_contours.size(), size = (int)kp_contours.size(), msize = (int)matches.size();
vector<DMatch> best_match(size1);
vector<Rect> rects1(size1), rects(size);
// proprocess
for( i = 0; i < size1; i++ )
rects1[i] = boundingRect(kp1t_contours[i]);
for( i = 0; i < size; i++ )
rects[i] = boundingRect(kp_contours[i]);
// cross-check
for( i = 0; i < msize; i++ )
{
@@ -188,35 +188,35 @@ static TVec proccessMatches( Size imgsize,
if( best_match[i1].trainIdx < 0 || best_match[i1].distance > m.distance )
best_match[i1] = m;
}
int N = 0, N1 = 0, n = 0;
// overlapping
for( i = 0; i < size1; i++ )
{
int i1 = i, iK = best_match[i].queryIdx;
if( iK >= 0 )
N1++;
Rect r = rects1[i] & Rect(0, 0, imgsize.width, imgsize.height);
if( r.area() < visibilityThreshold*rects1[i].area() )
continue;
N++;
if( iK < 0 || (rects1[i1] & rects[iK]).area() == 0 )
continue;
double n_area = intersectConvexConvex(kp1t_contours[i1], kp_contours[iK], noArray(), true);
if( n_area == 0 )
continue;
double area1 = contourArea(kp1t_contours[i1], false);
double area = contourArea(kp_contours[iK], false);
double ratio = n_area/(area1 + area - n_area);
n += ratio >= overlapThreshold;
}
return TVec((float)N, (float)n/std::max(N1, 1), (float)n/std::max(N, 1));
}
@@ -228,7 +228,7 @@ static void saveResults(const string& dir, const string& name, const string& dsn
string fname2 = format("%s%s_%s_recall.csv", dir.c_str(), name.c_str(), dsname.c_str());
FILE* f1 = fopen(fname1.c_str(), "wt");
FILE* f2 = fopen(fname2.c_str(), "wt");
for( size_t i = 0; i < results.size(); i++ )
{
fprintf(f1, "%d, %.1f\n", xvals[i], results[i][1]*100);
@@ -251,31 +251,31 @@ int main(int argc, char** argv)
//"SURF_BF", "SURF", "SURF", "BruteForce",
0
};
static const char* datasets[] =
{
"bark", "bikes", "boat", "graf", "leuven", "trees", "ubc", "wall", 0
};
static const int imgXVals[] = { 2, 3, 4, 5, 6 }; // if scale, blur or light changes
static const int viewpointXVals[] = { 20, 30, 40, 50, 60 }; // if viewpoint changes
static const int jpegXVals[] = { 60, 80, 90, 95, 98 }; // if jpeg compression
const double overlapThreshold = 0.6;
vector<vector<vector<TVec> > > results; // indexed as results[ddm][dataset][testcase]
string dataset_dir = string(getenv("OPENCV_TEST_DATA_PATH")) +
"/cv/detectors_descriptors_evaluation/images_datasets";
string dir=argc > 1 ? argv[1] : ".";
if( dir[dir.size()-1] != '\\' && dir[dir.size()-1] != '/' )
dir += "/";
int result = system(("mkdir " + dir).c_str());
CV_Assert(result == 0);
for( int i = 0; ddms[i*4] != 0; i++ )
{
const char* name = ddms[i*4];
@@ -283,40 +283,40 @@ int main(int argc, char** argv)
const char* descriptor_name = ddms[i*4+2];
const char* matcher_name = ddms[i*4+3];
string params_filename = dir + string(name) + "_params.yml";
cout << "Testing " << name << endl;
Ptr<FeatureDetector> detector = FeatureDetector::create(detector_name);
Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create(descriptor_name);
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(matcher_name);
saveloadDDM( params_filename, detector, descriptor, matcher );
results.push_back(vector<vector<TVec> >());
for( int j = 0; datasets[j] != 0; j++ )
{
const char* dsname = datasets[j];
cout << "\ton " << dsname << " ";
cout.flush();
const int* xvals = strcmp(dsname, "ubc") == 0 ? jpegXVals :
strcmp(dsname, "graf") == 0 || strcmp(dsname, "wall") == 0 ? viewpointXVals : imgXVals;
vector<KeyPoint> kp1, kp;
vector<DMatch> matches;
vector<vector<Point2f> > kp1t_contours, kp_contours;
Mat desc1, desc;
Mat img1 = imread(format("%s/%s/img1.png", dataset_dir.c_str(), dsname), 0);
CV_Assert( !img1.empty() );
detector->detect(img1, kp1);
descriptor->compute(img1, kp1, desc1);
results[i].push_back(vector<TVec>());
for( int k = 2; ; k++ )
{
cout << ".";
@@ -324,20 +324,20 @@ int main(int argc, char** argv)
Mat imgK = imread(format("%s/%s/img%d.png", dataset_dir.c_str(), dsname, k), 0);
if( imgK.empty() )
break;
detector->detect(imgK, kp);
descriptor->compute(imgK, kp, desc);
matcher->match( desc, desc1, matches );
Mat H = loadMat(format("%s/%s/H1to%dp.xml", dataset_dir.c_str(), dsname, k));
transformKeypoints( kp1, kp1t_contours, H );
transformKeypoints( kp, kp_contours, Mat::eye(3, 3, CV_64F));
TVec r = proccessMatches( imgK.size(), matches, kp1t_contours, kp_contours, overlapThreshold );
results[i][j].push_back(r);
}
saveResults(dir, name, dsname, results[i][j], xvals);
cout << endl;
}

View File

@@ -9,7 +9,7 @@ int main( int /*argc*/, char** /*argv*/ )
const int N1 = (int)sqrt((double)N);
const Scalar colors[] =
{
Scalar(0,0,255), Scalar(0,255,0),
Scalar(0,0,255), Scalar(0,255,0),
Scalar(0,255,255),Scalar(255,255,0)
};
@@ -27,7 +27,7 @@ int main( int /*argc*/, char** /*argv*/ )
{
// form the training samples
Mat samples_part = samples.rowRange(i*nsamples/N, (i+1)*nsamples/N );
Scalar mean(((i%N1)+1)*img.rows/(N1+1),
((i/N1)+1)*img.rows/(N1+1));
Scalar sigma(30,30);

View File

@@ -59,19 +59,19 @@ using namespace std;
int main(int argc, char * argv[]) {
/*
Note: the vocabulary and training data is specifically made for this openCV
example. It is not reccomended for use with other datasets as it is
example. It is not reccomended for use with other datasets as it is
intentionally small to reduce baggage in the openCV project.
A new vocabulary can be generated using the supplied BOWMSCtrainer (or other
clustering method such as K-means
New training data can be generated by extracting bag-of-words using the
New training data can be generated by extracting bag-of-words using the
openCV BOWImgDescriptorExtractor class.
vocabulary, chow-liu tree, training data, and test data can all be saved and
loaded using openCV's FileStorage class and it is not necessary to generate
loaded using openCV's FileStorage class and it is not necessary to generate
data each time as done in this example
*/

View File

@@ -155,7 +155,7 @@ int main(int ac, char** av)
//read from string
{
cout << "Read data from string\n";
string dataString =
string dataString =
"%YAML:1.0\n"
"mdata:\n"
" A: 97\n"

View File

@@ -9,10 +9,10 @@ using namespace cv;
static void help()
{
cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a region\n"
"and then grabcut will attempt to segment it out.\n"
"Call:\n"
"./grabcut <image_name>\n"
"\nSelect a rectangular area around the object you want to segment\n" <<
"and then grabcut will attempt to segment it out.\n"
"Call:\n"
"./grabcut <image_name>\n"
"\nSelect a rectangular area around the object you want to segment\n" <<
"\nHot keys: \n"
"\tESC - quit the program\n"
"\tr - restore the original image\n"
@@ -277,20 +277,20 @@ int main( int argc, char** argv )
{
if( argc!=2 )
{
help();
help();
return 1;
}
string filename = argv[1];
if( filename.empty() )
{
cout << "\nDurn, couldn't read in " << argv[1] << endl;
cout << "\nDurn, couldn't read in " << argv[1] << endl;
return 1;
}
Mat image = imread( filename, 1 );
if( image.empty() )
{
cout << "\n Durn, couldn't read image filename " << filename << endl;
return 1;
return 1;
}
help();

View File

@@ -437,7 +437,7 @@ int build_mlp_classifier( char* data_filename,
cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
mlp.create( &layer_sizes );
printf( "Training the classifier (may take a few minutes)...\n");
#if 1
int method = CvANN_MLP_TrainParams::BACKPROP;
double method_param = 0.001;
@@ -447,7 +447,7 @@ int build_mlp_classifier( char* data_filename,
double method_param = 0.1;
int max_iter = 1000;
#endif
mlp.train( &train_data, new_responses, 0, 0,
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,max_iter,0.01),
method, method_param));

View File

@@ -1,7 +1,7 @@
#include "opencv2/core/core.hpp"
#include <iostream>
const char* keys =
const char* keys =
{
"{ b build | | print complete build info }"
"{ h help | | print this help }"
@@ -25,7 +25,7 @@ int main(int argc, const char* argv[])
}
else
{
std::cout << "OpenCV " << CV_VERSION << std::endl;
std::cout << "OpenCV " << CV_VERSION << std::endl;
}
return 0;

View File

@@ -1,357 +1,357 @@
#include <cstring>
#include <cmath>
#include <iostream>
#include <sstream>
#include "opencv2/core/core.hpp"
#include "opencv2/core/opengl_interop.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/contrib/contrib.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
class PointCloudRenderer
{
public:
PointCloudRenderer(const Mat& points, const Mat& img, double scale);
void onMouseEvent(int event, int x, int y, int flags);
void draw();
void update(int key, double aspect);
int fov_;
private:
int mouse_dx_;
int mouse_dy_;
double yaw_;
double pitch_;
Point3d pos_;
TickMeter tm_;
static const int step_;
int frame_;
GlCamera camera_;
GlArrays pointCloud_;
string fps_;
};
bool stop = false;
static void mouseCallback(int event, int x, int y, int flags, void* userdata)
{
if (stop)
return;
PointCloudRenderer* renderer = static_cast<PointCloudRenderer*>(userdata);
renderer->onMouseEvent(event, x, y, flags);
}
static void openGlDrawCallback(void* userdata)
{
if (stop)
return;
PointCloudRenderer* renderer = static_cast<PointCloudRenderer*>(userdata);
renderer->draw();
}
int main(int argc, const char* argv[])
{
const char* keys =
"{ l left | | left image file name }"
"{ r right | | right image file name }"
"{ i intrinsic | | intrinsic camera parameters file name }"
"{ e extrinsic | | extrinsic camera parameters file name }"
"{ d ndisp | 256 | number of disparities }"
"{ s scale | 1.0 | scale factor for point cloud }"
"{ h help | | print help message }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help"))
{
cmd.printMessage();
return 0;
}
string left = cmd.get<string>("left");
string right = cmd.get<string>("right");
string intrinsic = cmd.get<string>("intrinsic");
string extrinsic = cmd.get<string>("extrinsic");
int ndisp = cmd.get<int>("ndisp");
double scale = cmd.get<double>("scale");
if (!cmd.check())
{
cmd.printErrors();
return 0;
}
if (left.empty() || right.empty())
{
cout << "Missed input images" << endl;
cout << "Avaible options:" << endl;
cmd.printMessage();
return 0;
}
if (intrinsic.empty() ^ extrinsic.empty())
{
cout << "Boss camera parameters must be specified" << endl;
cout << "Avaible options:" << endl;
cmd.printMessage();
return 0;
}
Mat imgLeftColor = imread(left, IMREAD_COLOR);
Mat imgRightColor = imread(right, IMREAD_COLOR);
if (imgLeftColor.empty())
{
cout << "Can't load image " << left << endl;
return -1;
}
if (imgRightColor.empty())
{
cout << "Can't load image " << right << endl;
return -1;
}
Mat Q = Mat::eye(4, 4, CV_32F);
if (!intrinsic.empty() && !extrinsic.empty())
{
FileStorage fs;
// reading intrinsic parameters
fs.open(intrinsic, CV_STORAGE_READ);
if (!fs.isOpened())
{
cout << "Failed to open file " << intrinsic << endl;
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
// reading extrinsic parameters
fs.open(extrinsic, CV_STORAGE_READ);
if (!fs.isOpened())
{
cout << "Failed to open file " << extrinsic << endl;
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
Size img_size = imgLeftColor.size();
Rect roi1, roi2;
stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2);
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
Mat img1r, img2r;
remap(imgLeftColor, img1r, map11, map12, INTER_LINEAR);
remap(imgRightColor, img2r, map21, map22, INTER_LINEAR);
imgLeftColor = img1r(roi1);
imgRightColor = img2r(roi2);
}
Mat imgLeftGray, imgRightGray;
cvtColor(imgLeftColor, imgLeftGray, COLOR_BGR2GRAY);
cvtColor(imgRightColor, imgRightGray, COLOR_BGR2GRAY);
cvtColor(imgLeftColor, imgLeftColor, COLOR_BGR2RGB);
Mat disp, points;
StereoBM bm(0, ndisp);
bm(imgLeftGray, imgRightGray, disp);
disp.convertTo(disp, CV_8U, 1.0 / 16.0);
disp = disp(Range(21, disp.rows - 21), Range(ndisp, disp.cols - 21)).clone();
imgLeftColor = imgLeftColor(Range(21, imgLeftColor.rows - 21), Range(ndisp, imgLeftColor.cols - 21)).clone();
reprojectImageTo3D(disp, points, Q);
const string windowName = "OpenGL Sample";
namedWindow(windowName, WINDOW_OPENGL);
resizeWindow(windowName, 400, 400);
PointCloudRenderer renderer(points, imgLeftColor, scale);
createTrackbar("Fov", windowName, &renderer.fov_, 100);
setMouseCallback(windowName, mouseCallback, &renderer);
setOpenGlDrawCallback(windowName, openGlDrawCallback, &renderer);
for(;;)
{
int key = waitKey(10);
if (key >= 0)
key = key & 0xff;
if (key == 27)
{
stop = true;
break;
}
double aspect = getWindowProperty(windowName, WND_PROP_ASPECT_RATIO);
key = tolower(key);
renderer.update(key, aspect);
updateWindow(windowName);
}
return 0;
}
const int PointCloudRenderer::step_ = 20;
PointCloudRenderer::PointCloudRenderer(const Mat& points, const Mat& img, double scale)
{
mouse_dx_ = 0;
mouse_dy_ = 0;
fov_ = 0;
yaw_ = 0.0;
pitch_ = 0.0;
frame_ = 0;
camera_.setScale(Point3d(scale, scale, scale));
pointCloud_.setVertexArray(points);
pointCloud_.setColorArray(img, false);
tm_.start();
}
inline int clamp(int val, int minVal, int maxVal)
{
return max(min(val, maxVal), minVal);
}
void PointCloudRenderer::onMouseEvent(int event, int x, int y, int /*flags*/)
{
static int oldx = x;
static int oldy = y;
static bool moving = false;
if (event == EVENT_LBUTTONDOWN)
{
oldx = x;
oldy = y;
moving = true;
}
else if (event == EVENT_LBUTTONUP)
{
moving = false;
}
if (moving)
{
mouse_dx_ = oldx - x;
mouse_dy_ = oldy - y;
}
else
{
mouse_dx_ = 0;
mouse_dy_ = 0;
}
const int mouseClamp = 300;
mouse_dx_ = clamp(mouse_dx_, -mouseClamp, mouseClamp);
mouse_dy_ = clamp(mouse_dy_, -mouseClamp, mouseClamp);
}
static Point3d rotate(Point3d v, double yaw, double pitch)
{
Point3d t1;
t1.x = v.x * cos(-yaw / 180.0 * CV_PI) - v.z * sin(-yaw / 180.0 * CV_PI);
t1.y = v.y;
t1.z = v.x * sin(-yaw / 180.0 * CV_PI) + v.z * cos(-yaw / 180.0 * CV_PI);
Point3d t2;
t2.x = t1.x;
t2.y = t1.y * cos(pitch / 180.0 * CV_PI) - t1.z * sin(pitch / 180.0 * CV_PI);
t2.z = t1.y * sin(pitch / 180.0 * CV_PI) + t1.z * cos(pitch / 180.0 * CV_PI);
return t2;
}
void PointCloudRenderer::update(int key, double aspect)
{
const Point3d dirVec(0.0, 0.0, -1.0);
const Point3d upVec(0.0, 1.0, 0.0);
const Point3d leftVec(-1.0, 0.0, 0.0);
const double posStep = 0.1;
const double mouseStep = 0.001;
camera_.setPerspectiveProjection(30.0 + fov_ / 100.0 * 40.0, aspect, 0.1, 1000.0);
yaw_ += mouse_dx_ * mouseStep;
pitch_ += mouse_dy_ * mouseStep;
if (key == 'w')
pos_ += posStep * rotate(dirVec, yaw_, pitch_);
else if (key == 's')
pos_ -= posStep * rotate(dirVec, yaw_, pitch_);
else if (key == 'a')
pos_ += posStep * rotate(leftVec, yaw_, pitch_);
else if (key == 'd')
pos_ -= posStep * rotate(leftVec, yaw_, pitch_);
else if (key == 'q')
pos_ += posStep * rotate(upVec, yaw_, pitch_);
else if (key == 'e')
pos_ -= posStep * rotate(upVec, yaw_, pitch_);
camera_.setCameraPos(pos_, yaw_, pitch_, 0.0);
tm_.stop();
if (frame_++ >= step_)
{
ostringstream ostr;
ostr << "FPS: " << step_ / tm_.getTimeSec();
fps_ = ostr.str();
frame_ = 0;
tm_.reset();
}
tm_.start();
}
void PointCloudRenderer::draw()
{
camera_.setupProjectionMatrix();
camera_.setupModelViewMatrix();
render(pointCloud_);
render(fps_, GlFont::get("Courier New", 16), Scalar::all(255), Point2d(3.0, 0.0));
}
#include <cstring>
#include <cmath>
#include <iostream>
#include <sstream>
#include "opencv2/core/core.hpp"
#include "opencv2/core/opengl_interop.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/contrib/contrib.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
class PointCloudRenderer
{
public:
PointCloudRenderer(const Mat& points, const Mat& img, double scale);
void onMouseEvent(int event, int x, int y, int flags);
void draw();
void update(int key, double aspect);
int fov_;
private:
int mouse_dx_;
int mouse_dy_;
double yaw_;
double pitch_;
Point3d pos_;
TickMeter tm_;
static const int step_;
int frame_;
GlCamera camera_;
GlArrays pointCloud_;
string fps_;
};
bool stop = false;
static void mouseCallback(int event, int x, int y, int flags, void* userdata)
{
if (stop)
return;
PointCloudRenderer* renderer = static_cast<PointCloudRenderer*>(userdata);
renderer->onMouseEvent(event, x, y, flags);
}
static void openGlDrawCallback(void* userdata)
{
if (stop)
return;
PointCloudRenderer* renderer = static_cast<PointCloudRenderer*>(userdata);
renderer->draw();
}
int main(int argc, const char* argv[])
{
const char* keys =
"{ l left | | left image file name }"
"{ r right | | right image file name }"
"{ i intrinsic | | intrinsic camera parameters file name }"
"{ e extrinsic | | extrinsic camera parameters file name }"
"{ d ndisp | 256 | number of disparities }"
"{ s scale | 1.0 | scale factor for point cloud }"
"{ h help | | print help message }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help"))
{
cmd.printMessage();
return 0;
}
string left = cmd.get<string>("left");
string right = cmd.get<string>("right");
string intrinsic = cmd.get<string>("intrinsic");
string extrinsic = cmd.get<string>("extrinsic");
int ndisp = cmd.get<int>("ndisp");
double scale = cmd.get<double>("scale");
if (!cmd.check())
{
cmd.printErrors();
return 0;
}
if (left.empty() || right.empty())
{
cout << "Missed input images" << endl;
cout << "Avaible options:" << endl;
cmd.printMessage();
return 0;
}
if (intrinsic.empty() ^ extrinsic.empty())
{
cout << "Boss camera parameters must be specified" << endl;
cout << "Avaible options:" << endl;
cmd.printMessage();
return 0;
}
Mat imgLeftColor = imread(left, IMREAD_COLOR);
Mat imgRightColor = imread(right, IMREAD_COLOR);
if (imgLeftColor.empty())
{
cout << "Can't load image " << left << endl;
return -1;
}
if (imgRightColor.empty())
{
cout << "Can't load image " << right << endl;
return -1;
}
Mat Q = Mat::eye(4, 4, CV_32F);
if (!intrinsic.empty() && !extrinsic.empty())
{
FileStorage fs;
// reading intrinsic parameters
fs.open(intrinsic, CV_STORAGE_READ);
if (!fs.isOpened())
{
cout << "Failed to open file " << intrinsic << endl;
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
// reading extrinsic parameters
fs.open(extrinsic, CV_STORAGE_READ);
if (!fs.isOpened())
{
cout << "Failed to open file " << extrinsic << endl;
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
Size img_size = imgLeftColor.size();
Rect roi1, roi2;
stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2);
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
Mat img1r, img2r;
remap(imgLeftColor, img1r, map11, map12, INTER_LINEAR);
remap(imgRightColor, img2r, map21, map22, INTER_LINEAR);
imgLeftColor = img1r(roi1);
imgRightColor = img2r(roi2);
}
Mat imgLeftGray, imgRightGray;
cvtColor(imgLeftColor, imgLeftGray, COLOR_BGR2GRAY);
cvtColor(imgRightColor, imgRightGray, COLOR_BGR2GRAY);
cvtColor(imgLeftColor, imgLeftColor, COLOR_BGR2RGB);
Mat disp, points;
StereoBM bm(0, ndisp);
bm(imgLeftGray, imgRightGray, disp);
disp.convertTo(disp, CV_8U, 1.0 / 16.0);
disp = disp(Range(21, disp.rows - 21), Range(ndisp, disp.cols - 21)).clone();
imgLeftColor = imgLeftColor(Range(21, imgLeftColor.rows - 21), Range(ndisp, imgLeftColor.cols - 21)).clone();
reprojectImageTo3D(disp, points, Q);
const string windowName = "OpenGL Sample";
namedWindow(windowName, WINDOW_OPENGL);
resizeWindow(windowName, 400, 400);
PointCloudRenderer renderer(points, imgLeftColor, scale);
createTrackbar("Fov", windowName, &renderer.fov_, 100);
setMouseCallback(windowName, mouseCallback, &renderer);
setOpenGlDrawCallback(windowName, openGlDrawCallback, &renderer);
for(;;)
{
int key = waitKey(10);
if (key >= 0)
key = key & 0xff;
if (key == 27)
{
stop = true;
break;
}
double aspect = getWindowProperty(windowName, WND_PROP_ASPECT_RATIO);
key = tolower(key);
renderer.update(key, aspect);
updateWindow(windowName);
}
return 0;
}
const int PointCloudRenderer::step_ = 20;
PointCloudRenderer::PointCloudRenderer(const Mat& points, const Mat& img, double scale)
{
mouse_dx_ = 0;
mouse_dy_ = 0;
fov_ = 0;
yaw_ = 0.0;
pitch_ = 0.0;
frame_ = 0;
camera_.setScale(Point3d(scale, scale, scale));
pointCloud_.setVertexArray(points);
pointCloud_.setColorArray(img, false);
tm_.start();
}
inline int clamp(int val, int minVal, int maxVal)
{
return max(min(val, maxVal), minVal);
}
void PointCloudRenderer::onMouseEvent(int event, int x, int y, int /*flags*/)
{
static int oldx = x;
static int oldy = y;
static bool moving = false;
if (event == EVENT_LBUTTONDOWN)
{
oldx = x;
oldy = y;
moving = true;
}
else if (event == EVENT_LBUTTONUP)
{
moving = false;
}
if (moving)
{
mouse_dx_ = oldx - x;
mouse_dy_ = oldy - y;
}
else
{
mouse_dx_ = 0;
mouse_dy_ = 0;
}
const int mouseClamp = 300;
mouse_dx_ = clamp(mouse_dx_, -mouseClamp, mouseClamp);
mouse_dy_ = clamp(mouse_dy_, -mouseClamp, mouseClamp);
}
static Point3d rotate(Point3d v, double yaw, double pitch)
{
Point3d t1;
t1.x = v.x * cos(-yaw / 180.0 * CV_PI) - v.z * sin(-yaw / 180.0 * CV_PI);
t1.y = v.y;
t1.z = v.x * sin(-yaw / 180.0 * CV_PI) + v.z * cos(-yaw / 180.0 * CV_PI);
Point3d t2;
t2.x = t1.x;
t2.y = t1.y * cos(pitch / 180.0 * CV_PI) - t1.z * sin(pitch / 180.0 * CV_PI);
t2.z = t1.y * sin(pitch / 180.0 * CV_PI) + t1.z * cos(pitch / 180.0 * CV_PI);
return t2;
}
void PointCloudRenderer::update(int key, double aspect)
{
const Point3d dirVec(0.0, 0.0, -1.0);
const Point3d upVec(0.0, 1.0, 0.0);
const Point3d leftVec(-1.0, 0.0, 0.0);
const double posStep = 0.1;
const double mouseStep = 0.001;
camera_.setPerspectiveProjection(30.0 + fov_ / 100.0 * 40.0, aspect, 0.1, 1000.0);
yaw_ += mouse_dx_ * mouseStep;
pitch_ += mouse_dy_ * mouseStep;
if (key == 'w')
pos_ += posStep * rotate(dirVec, yaw_, pitch_);
else if (key == 's')
pos_ -= posStep * rotate(dirVec, yaw_, pitch_);
else if (key == 'a')
pos_ += posStep * rotate(leftVec, yaw_, pitch_);
else if (key == 'd')
pos_ -= posStep * rotate(leftVec, yaw_, pitch_);
else if (key == 'q')
pos_ += posStep * rotate(upVec, yaw_, pitch_);
else if (key == 'e')
pos_ -= posStep * rotate(upVec, yaw_, pitch_);
camera_.setCameraPos(pos_, yaw_, pitch_, 0.0);
tm_.stop();
if (frame_++ >= step_)
{
ostringstream ostr;
ostr << "FPS: " << step_ / tm_.getTimeSec();
fps_ = ostr.str();
frame_ = 0;
tm_.reset();
}
tm_.start();
}
void PointCloudRenderer::draw()
{
camera_.setupProjectionMatrix();
camera_.setupModelViewMatrix();
render(pointCloud_);
render(fps_, GlFont::get("Courier New", 16), Scalar::all(255), Point2d(3.0, 0.0));
}

View File

@@ -29,9 +29,9 @@ static void help()
static void writeOpticalFlowToFile(const Mat& flow, FILE* file) {
int cols = flow.cols;
int rows = flow.rows;
fprintf(file, "PIEH");
if (fwrite(&cols, sizeof(int), 1, file) != 1 ||
fwrite(&rows, sizeof(int), 1, file) != 1) {
printf(APP_NAME "writeOpticalFlowToFile : problem writing header\n");
@@ -53,7 +53,7 @@ static void writeOpticalFlowToFile(const Mat& flow, FILE* file) {
static void run(int argc, char** argv) {
if (argc < 3) {
printf(APP_NAME "Wrong number of command line arguments for mode `run`: %d (expected %d)\n",
printf(APP_NAME "Wrong number of command line arguments for mode `run`: %d (expected %d)\n",
argc, 3);
exit(1);
}
@@ -81,20 +81,20 @@ static void run(int argc, char** argv) {
exit(1);
}
printf(APP_NAME "Read two images of size [rows = %d, cols = %d]\n",
printf(APP_NAME "Read two images of size [rows = %d, cols = %d]\n",
frame1.rows, frame1.cols);
Mat flow;
float start = (float)getTickCount();
calcOpticalFlowSF(frame1, frame2,
calcOpticalFlowSF(frame1, frame2,
flow,
3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10);
printf(APP_NAME "calcOpticalFlowSF : %lf sec\n", (getTickCount() - start) / getTickFrequency());
FILE* file = fopen(argv[2], "wb");
if (file == NULL) {
printf(APP_NAME "Unable to open file '%s' for writing\n", argv[2]);
printf(APP_NAME "Unable to open file '%s' for writing\n", argv[2]);
exit(1);
}
printf(APP_NAME "Writing to file\n");
@@ -161,11 +161,11 @@ static float calc_rmse(Mat flow1, Mat flow2) {
static void eval(int argc, char** argv) {
if (argc < 2) {
printf(APP_NAME "Wrong number of command line arguments for mode `eval` : %d (expected %d)\n",
printf(APP_NAME "Wrong number of command line arguments for mode `eval` : %d (expected %d)\n",
argc, 2);
exit(1);
}
Mat flow1, flow2;
FILE* flow_file_1 = fopen(argv[0], "rb");
@@ -189,7 +189,7 @@ static void eval(int argc, char** argv) {
exit(1);
}
fclose(flow_file_2);
float rmse = calc_rmse(flow1, flow2);
printf("%lf\n", rmse);
}
@@ -208,12 +208,12 @@ int main(int argc, char** argv) {
run(new_argc, new_argv);
} else if ("eval" == mode) {
eval(new_argc, new_argv);
} else if ("help" == mode)
} else if ("help" == mode)
help();
else {
printf(APP_NAME "Unknown mode : %s\n", argv[1]);
printf(APP_NAME "Unknown mode : %s\n", argv[1]);
help();
}
return 0;
}

View File

@@ -21,12 +21,12 @@ namespace
void help(char** av)
{
cout << "\nThis program gets you started being able to read images from a list in a file\n"
"Usage:\n./" << av[0] << " image_list.yaml\n"
"Usage:\n./" << av[0] << " image_list.yaml\n"
<< "\tThis is a starter sample, to get you up and going in a copy pasta fashion.\n"
<< "\tThe program reads in an list of images from a yaml or xml file and displays\n"
<< "one at a time\n"
<< "\tTry running imagelist_creator to generate a list of images.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
}
bool readStringList(const string& filename, vector<string>& l)
@@ -71,7 +71,7 @@ int main(int ac, char** av)
vector<string> imagelist;
if (!readStringList(arg,imagelist))
{
{
cerr << "Failed to read image list\n" << endl;
help(av);
return 1;

View File

@@ -32,8 +32,8 @@ namespace {
}
int process(VideoCapture& capture) {
int n = 0;
char filename[200];
int n = 0;
char filename[200];
string window_name = "video | q or esc to quit";
cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window;
@@ -50,10 +50,10 @@ namespace {
case 27: //escape key
return 0;
case ' ': //Save an image
sprintf(filename,"filename%.3d.jpg",n++);
imwrite(filename,frame);
cout << "Saved " << filename << endl;
break;
sprintf(filename,"filename%.3d.jpg",n++);
imwrite(filename,frame);
cout << "Saved " << filename << endl;
break;
default:
break;
}

View File

@@ -189,7 +189,7 @@ int main(int argc, char** argv)
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
M1 *= scale;
M2 *= scale;

View File

@@ -11,9 +11,9 @@ using namespace cv;
/** Global Variables */
const int alpha_slider_max = 100;
int alpha_slider;
int alpha_slider;
double alpha;
double beta;
double beta;
/** Matrices to store images */
Mat src1;
@@ -25,20 +25,20 @@ Mat dst;
* @brief Callback for trackbar
*/
void on_trackbar( int, void* )
{
{
alpha = (double) alpha_slider/alpha_slider_max ;
beta = ( 1.0 - alpha );
addWeighted( src1, alpha, src2, beta, 0.0, dst);
imshow( "Linear Blend", dst );
}
/**
* @function main
* @brief Main function
* @brief Main function
*/
int main( int argc, char** argv )
{
@@ -49,7 +49,7 @@ int main( int argc, char** argv )
if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
/// Initialize values
/// Initialize values
alpha_slider = 0;
/// Create Windows
@@ -57,12 +57,12 @@ int main( int argc, char** argv )
/// Create Trackbars
char TrackbarName[50];
sprintf( TrackbarName, "Alpha x %d", alpha_slider_max );
sprintf( TrackbarName, "Alpha x %d", alpha_slider_max );
createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar );
/// Show some stuff
on_trackbar( alpha_slider, 0 );
/// Wait until user press some key
waitKey(0);
return 0;

View File

@@ -25,16 +25,16 @@ Mat new_image;
* @brief Called whenever any of alpha or beta changes
*/
void on_trackbar( int, void* )
{
{
Mat new_image = Mat::zeros( image.size(), image.type() );
for( int y = 0; y < image.rows; y++ )
{ for( int x = 0; x < image.cols; x++ )
{ for( int c = 0; c < 3; c++ )
{
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
}
}
}
}
imshow("New Image", new_image);
}
@@ -42,14 +42,14 @@ void on_trackbar( int, void* )
/**
* @function main
* @brief Main function
* @brief Main function
*/
int main( int argc, char** argv )
{
/// Read image given by user
image = imread( argv[1] );
/// Initialize values
/// Initialize values
alpha = 1;
beta = 0;
@@ -59,12 +59,12 @@ int main( int argc, char** argv )
/// Create Trackbars
createTrackbar( "Contrast Trackbar", "New Image", &alpha, alpha_max, on_trackbar );
createTrackbar( "Brightness Trackbar", "New Image", &beta, beta_max, on_trackbar );
createTrackbar( "Brightness Trackbar", "New Image", &beta, beta_max, on_trackbar );
/// Show some stuff
imshow("Original Image", image);
imshow("New Image", image);
/// Wait until user press some key
waitKey();
return 0;

View File

@@ -1,205 +1,205 @@
#include <iostream> // for standard I/O
#include <string> // for strings
#include <iomanip> // for controlling float print precision
#include <sstream> // string to number conversion
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
using namespace std;
using namespace cv;
double getPSNR ( const Mat& I1, const Mat& I2);
Scalar getMSSIM( const Mat& I1, const Mat& I2);
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to read a video file with OpenCV. In addition, it tests the"
<< " similarity of two input videos first with PSNR, and for the frames below a PSNR " << endl
<< "trigger value, also with MSSIM."<< endl
<< "Usage:" << endl
<< "./video-source referenceVideo useCaseTestVideo PSNR_Trigger_Value Wait_Between_Frames " << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[], char *window_name)
{
help();
if (argc != 5)
{
cout << "Not enough parameters" << endl;
return -1;
}
stringstream conv;
const string sourceReference = argv[1],sourceCompareWith = argv[2];
int psnrTriggerValue, delay;
conv << argv[3] << endl << argv[4]; // put in the strings
conv >> psnrTriggerValue >> delay;// take out the numbers
char c;
int frameNum = -1; // Frame counter
VideoCapture captRefrnc(sourceReference),
captUndTst(sourceCompareWith);
if ( !captRefrnc.isOpened())
{
cout << "Could not open reference " << sourceReference << endl;
return -1;
}
if( !captUndTst.isOpened())
{
cout << "Could not open case test " << sourceCompareWith << endl;
return -1;
}
Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi)
{
cout << "Inputs have different size!!! Closing." << endl;
return -1;
}
const char* WIN_UT = "Under Test";
const char* WIN_RF = "Reference";
// Windows
namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE );
namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE );
cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Reference frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " <<
setiosflags(ios::fixed) << setprecision(3) << psnrTriggerValue << endl;
Mat frameReference, frameUnderTest;
double psnrV;
Scalar mssimV;
while( true) //Show the image captured in the window and repeat
{
captRefrnc >> frameReference;
captUndTst >> frameUnderTest;
if( frameReference.empty() || frameUnderTest.empty())
{
cout << " < < < Game over! > > > ";
break;
}
++frameNum;
cout <<"Frame:" << frameNum <<"# ";
///////////////////////////////// PSNR ////////////////////////////////////////////////////
psnrV = getPSNR(frameReference,frameUnderTest); //get PSNR
cout << setiosflags(ios::fixed) << setprecision(3) << psnrV << "dB";
//////////////////////////////////// MSSIM /////////////////////////////////////////////////
if (psnrV < psnrTriggerValue && psnrV)
{
mssimV = getMSSIM(frameReference,frameUnderTest);
cout << " MSSIM: "
<< " R " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[2] * 100 << "%"
<< " G " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[1] * 100 << "%"
<< " B " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[0] * 100 << "%";
}
cout << endl;
////////////////////////////////// Show Image /////////////////////////////////////////////
imshow( WIN_RF, frameReference);
imshow( WIN_UT, frameUnderTest);
c = cvWaitKey(delay);
if (c == 27) break;
}
return 0;
}
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
Scalar getMSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
#include <iostream> // for standard I/O
#include <string> // for strings
#include <iomanip> // for controlling float print precision
#include <sstream> // string to number conversion
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
using namespace std;
using namespace cv;
double getPSNR ( const Mat& I1, const Mat& I2);
Scalar getMSSIM( const Mat& I1, const Mat& I2);
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to read a video file with OpenCV. In addition, it tests the"
<< " similarity of two input videos first with PSNR, and for the frames below a PSNR " << endl
<< "trigger value, also with MSSIM."<< endl
<< "Usage:" << endl
<< "./video-source referenceVideo useCaseTestVideo PSNR_Trigger_Value Wait_Between_Frames " << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[], char *window_name)
{
help();
if (argc != 5)
{
cout << "Not enough parameters" << endl;
return -1;
}
stringstream conv;
const string sourceReference = argv[1],sourceCompareWith = argv[2];
int psnrTriggerValue, delay;
conv << argv[3] << endl << argv[4]; // put in the strings
conv >> psnrTriggerValue >> delay;// take out the numbers
char c;
int frameNum = -1; // Frame counter
VideoCapture captRefrnc(sourceReference),
captUndTst(sourceCompareWith);
if ( !captRefrnc.isOpened())
{
cout << "Could not open reference " << sourceReference << endl;
return -1;
}
if( !captUndTst.isOpened())
{
cout << "Could not open case test " << sourceCompareWith << endl;
return -1;
}
Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi)
{
cout << "Inputs have different size!!! Closing." << endl;
return -1;
}
const char* WIN_UT = "Under Test";
const char* WIN_RF = "Reference";
// Windows
namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE );
namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE );
cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Reference frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " <<
setiosflags(ios::fixed) << setprecision(3) << psnrTriggerValue << endl;
Mat frameReference, frameUnderTest;
double psnrV;
Scalar mssimV;
while( true) //Show the image captured in the window and repeat
{
captRefrnc >> frameReference;
captUndTst >> frameUnderTest;
if( frameReference.empty() || frameUnderTest.empty())
{
cout << " < < < Game over! > > > ";
break;
}
++frameNum;
cout <<"Frame:" << frameNum <<"# ";
///////////////////////////////// PSNR ////////////////////////////////////////////////////
psnrV = getPSNR(frameReference,frameUnderTest); //get PSNR
cout << setiosflags(ios::fixed) << setprecision(3) << psnrV << "dB";
//////////////////////////////////// MSSIM /////////////////////////////////////////////////
if (psnrV < psnrTriggerValue && psnrV)
{
mssimV = getMSSIM(frameReference,frameUnderTest);
cout << " MSSIM: "
<< " R " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[2] * 100 << "%"
<< " G " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[1] * 100 << "%"
<< " B " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[0] * 100 << "%";
}
cout << endl;
////////////////////////////////// Show Image /////////////////////////////////////////////
imshow( WIN_RF, frameReference);
imshow( WIN_UT, frameUnderTest);
c = cvWaitKey(delay);
if (c == 27) break;
}
return 0;
}
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
Scalar getMSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
}

View File

@@ -1,97 +1,97 @@
#include <iostream> // for standard I/O
#include <string> // for strings
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace std;
using namespace cv;
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to write video files. You can extract the R or G or B color channel "
<< " of the input video.write " << endl
<< "Usage:" << endl
<< "./video-write inputvideoName [ R | G | B] [Y | N]" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[], char *window_name)
{
help();
if (argc != 4)
{
cout << "Not enough parameters" << endl;
return -1;
}
const string source = argv[1]; // the source file name
const bool askOutputType = argv[3][0] =='Y'; // If false it will use the inputs codec type
VideoCapture inputVideo(source); // Open input
if ( !inputVideo.isOpened())
{
cout << "Could not open the input video." << source << endl;
return -1;
}
string::size_type pAt = source.find_last_of('.'); // Find extension point
const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container
int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form
// Transform from int to char via Bitwise operators
char EXT[] = {ex & 0XFF , (ex & 0XFF00) >> 8,(ex & 0XFF0000) >> 16,(ex & 0XFF000000) >> 24, 0};
Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size
(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
VideoWriter outputVideo; // Open the output
if (askOutputType)
outputVideo.open(NAME , ex=-1, inputVideo.get(CV_CAP_PROP_FPS),S, true);
else
outputVideo.open(NAME , ex, inputVideo.get(CV_CAP_PROP_FPS),S, true);
if (!outputVideo.isOpened())
{
cout << "Could not open the output video for write: " << source << endl;
return -1;
}
union { int v; char c[5];} uEx ;
uEx.v = ex; // From Int to char via union
uEx.c[4]='\0';
cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height
<< " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl;
cout << "Input codec type: " << EXT << endl;
int channel = 2; // Select the channel to save
switch(argv[2][0])
{
case 'R' : {channel = 2; break;}
case 'G' : {channel = 1; break;}
case 'B' : {channel = 0; break;}
}
Mat src,res;
vector<Mat> spl;
while( true) //Show the image captured in the window and repeat
{
inputVideo >> src; // read
if( src.empty()) break; // check if at end
split(src, spl); // process - extract only the correct channel
for( int i =0; i < 3; ++i)
if (i != channel)
spl[i] = Mat::zeros(S, spl[0].type());
merge(spl, res);
//outputVideo.write(res); //save or
outputVideo << res;
}
cout << "Finished writing" << endl;
return 0;
#include <iostream> // for standard I/O
#include <string> // for strings
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace std;
using namespace cv;
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to write video files. You can extract the R or G or B color channel "
<< " of the input video.write " << endl
<< "Usage:" << endl
<< "./video-write inputvideoName [ R | G | B] [Y | N]" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[], char *window_name)
{
help();
if (argc != 4)
{
cout << "Not enough parameters" << endl;
return -1;
}
const string source = argv[1]; // the source file name
const bool askOutputType = argv[3][0] =='Y'; // If false it will use the inputs codec type
VideoCapture inputVideo(source); // Open input
if ( !inputVideo.isOpened())
{
cout << "Could not open the input video." << source << endl;
return -1;
}
string::size_type pAt = source.find_last_of('.'); // Find extension point
const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container
int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form
// Transform from int to char via Bitwise operators
char EXT[] = {ex & 0XFF , (ex & 0XFF00) >> 8,(ex & 0XFF0000) >> 16,(ex & 0XFF000000) >> 24, 0};
Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size
(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
VideoWriter outputVideo; // Open the output
if (askOutputType)
outputVideo.open(NAME , ex=-1, inputVideo.get(CV_CAP_PROP_FPS),S, true);
else
outputVideo.open(NAME , ex, inputVideo.get(CV_CAP_PROP_FPS),S, true);
if (!outputVideo.isOpened())
{
cout << "Could not open the output video for write: " << source << endl;
return -1;
}
union { int v; char c[5];} uEx ;
uEx.v = ex; // From Int to char via union
uEx.c[4]='\0';
cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height
<< " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl;
cout << "Input codec type: " << EXT << endl;
int channel = 2; // Select the channel to save
switch(argv[2][0])
{
case 'R' : {channel = 2; break;}
case 'G' : {channel = 1; break;}
case 'B' : {channel = 0; break;}
}
Mat src,res;
vector<Mat> spl;
while( true) //Show the image captured in the window and repeat
{
inputVideo >> src; // read
if( src.empty()) break; // check if at end
split(src, spl); // process - extract only the correct channel
for( int i =0; i < 3; ++i)
if (i != channel)
spl[i] = Mat::zeros(S, spl[0].type());
merge(spl, res);
//outputVideo.write(res); //save or
outputVideo << res;
}
cout << "Finished writing" << endl;
return 0;
}

View File

@@ -27,7 +27,7 @@ int main( int argc, char** argv )
if( !src.data )
{ cout<<"Usage: ./Histogram_Demo <path_to_image>"<<endl;
return -1;
return -1;
}
/// Convert to grayscale
@@ -35,15 +35,15 @@ int main( int argc, char** argv )
/// Apply Histogram Equalization
equalizeHist( src, dst );
/// Display results
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( equalized_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
imshow( equalized_window, dst );
/// Wait until user exits the program
/// Wait until user exits the program
waitKey(0);
return 0;

View File

@@ -35,7 +35,7 @@ int main( int argc, char** argv )
/// Create windows
namedWindow( image_window, CV_WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar
char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar( trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod );
@@ -55,11 +55,11 @@ void MatchingMethod( int, void* )
/// Source image to display
Mat img_display;
img.copyTo( img_display );
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
/// Do the Matching and Normalize
@@ -69,19 +69,19 @@ void MatchingMethod( int, void* )
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
else
{ matchLoc = maxLoc; }
/// Show me what you got
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
imshow( image_window, img_display );
imshow( result_window, result );

View File

@@ -13,7 +13,7 @@ using namespace cv;
using namespace std;
/// Global Variables
Mat src; Mat hsv; Mat hue;
Mat src; Mat hsv; Mat hue;
int bins = 25;
/// Function Headers
@@ -33,7 +33,7 @@ int main( int argc, char** argv )
/// Use only the Hue value
hue.create( hsv.size(), hsv.depth() );
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
/// Create Trackbar to enter the number of bins
char* window_image = "Source image";
@@ -57,7 +57,7 @@ int main( int argc, char** argv )
void Hist_and_Backproj(int, void* )
{
MatND hist;
int histSize = MAX( bins, 2 );
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
@@ -68,16 +68,16 @@ void Hist_and_Backproj(int, void* )
/// Get Backprojection
MatND backproj;
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
/// Draw the backproj
imshow( "BackProj", backproj );
/// Draw the histogram
int w = 400; int h = 400;
int bin_w = cvRound( (double) w / histSize );
int bin_w = cvRound( (double) w / histSize );
Mat histImg = Mat::zeros( w, h, CV_8UC3 );
for( int i = 0; i < bins; i ++ )
for( int i = 0; i < bins; i ++ )
{ rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 0, 0, 255 ), -1 ); }
imshow( "Histogram", histImg );

View File

@@ -13,7 +13,7 @@ using namespace cv;
using namespace std;
/// Global Variables
Mat src; Mat hsv;
Mat src; Mat hsv;
Mat mask;
int lo = 20; int up = 20;
@@ -33,7 +33,7 @@ int main( int argc, char** argv )
/// Transform it to HSV
cvtColor( src, hsv, CV_BGR2HSV );
/// Show the image
/// Show the image
namedWindow( window_image, CV_WINDOW_AUTOSIZE );
imshow( window_image, src );
@@ -57,7 +57,7 @@ void pickPoint (int event, int x, int y, int, void* )
// Fill and get the mask
Point seed = Point( x, y );
int newMaskVal = 255;
Scalar newVal = Scalar( 120, 120, 120 );
@@ -65,7 +65,7 @@ void pickPoint (int event, int x, int y, int, void* )
int flags = connectivity + (newMaskVal << 8 ) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
Mat mask2 = Mat::zeros( src.rows + 2, src.cols + 2, CV_8UC1 );
floodFill( src, mask2, seed, newVal, 0, Scalar( lo, lo, lo ), Scalar( up, up, up), flags );
floodFill( src, mask2, seed, newVal, 0, Scalar( lo, lo, lo ), Scalar( up, up, up), flags );
mask = mask2( Range( 1, mask2.rows - 1 ), Range( 1, mask2.cols - 1 ) );
imshow( "Mask", mask );
@@ -80,7 +80,7 @@ void Hist_and_Backproj( )
{
MatND hist;
int h_bins = 30; int s_bins = 32;
int histSize[] = { h_bins, s_bins };
int histSize[] = { h_bins, s_bins };
float h_range[] = { 0, 179 };
float s_range[] = { 0, 255 };
@@ -96,7 +96,7 @@ void Hist_and_Backproj( )
/// Get Backprojection
MatND backproj;
calcBackProject( &hsv, 1, channels, hist, backproj, ranges, 1, true );
/// Draw the backproj
imshow( "BackProj", backproj );

View File

@@ -25,7 +25,7 @@ int main( int argc, char** argv )
/// Load three images with different environment settings
if( argc < 4 )
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
return -1;
}
src_base = imread( argv[1], 1 );
@@ -37,7 +37,7 @@ int main( int argc, char** argv )
cvtColor( src_test1, hsv_test1, CV_BGR2HSV );
cvtColor( src_test2, hsv_test2, CV_BGR2HSV );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
/// Using 30 bins for hue and 32 for saturation
int h_bins = 50; int s_bins = 60;
@@ -74,14 +74,14 @@ int main( int argc, char** argv )
/// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ )
{ int compare_method = i;
{ int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
}
printf( "Done \n" );

View File

@@ -12,12 +12,12 @@ using namespace cv;
/**
* @function main
* @brief Main function
* @brief Main function
*/
int main( int argc, char** argv )
{
double alpha = 0.5; double beta; double input;
double alpha = 0.5; double beta; double input;
Mat src1, src2, dst;
@@ -43,7 +43,7 @@ int main( int argc, char** argv )
beta = ( 1.0 - alpha );
addWeighted( src1, alpha, src2, beta, 0.0, dst);
imshow( "Linear Blend", dst );

View File

@@ -15,7 +15,7 @@ int beta; /**< Simple brightness control */
/**
* @function main
* @brief Main function
* @brief Main function
*/
int main( int argc, char** argv )
{
@@ -23,7 +23,7 @@ int main( int argc, char** argv )
Mat image = imread( argv[1] );
Mat new_image = Mat::zeros( image.size(), image.type() );
/// Initialize values
/// Initialize values
std::cout<<" Basic Linear Transforms "<<std::endl;
std::cout<<"-------------------------"<<std::endl;
std::cout<<"* Enter the alpha value [1.0-3.0]: ";std::cin>>alpha;
@@ -38,9 +38,9 @@ int main( int argc, char** argv )
{ for( int x = 0; x < image.cols; x++ )
{ for( int c = 0; c < 3; c++ )
{
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
}
}
}
}
/// Create Windows
@@ -51,7 +51,7 @@ int main( int argc, char** argv )
imshow("Original Image", image);
imshow("New Image", new_image);
/// Wait until user press some key
waitKey();
return 0;

View File

@@ -36,29 +36,29 @@ int main( int argc, char** argv )
if( !src.data )
{ return -1; }
/// Create windows
namedWindow( "Erosion Demo", CV_WINDOW_AUTOSIZE );
namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE );
cvMoveWindow( "Dilation Demo", src.cols, 0 );
/// Create Erosion Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
&erosion_elem, max_elem,
Erosion );
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
&erosion_elem, max_elem,
Erosion );
createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo",
&erosion_size, max_kernel_size,
Erosion );
createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo",
&erosion_size, max_kernel_size,
Erosion );
/// Create Dilation Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
&dilation_elem, max_elem,
Dilation );
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
&dilation_elem, max_elem,
Dilation );
createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",
&dilation_size, max_kernel_size,
Dilation );
createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",
&dilation_size, max_kernel_size,
Dilation );
/// Default start
Erosion( 0, 0 );
@@ -68,7 +68,7 @@ int main( int argc, char** argv )
return 0;
}
/**
/**
* @function Erosion
*/
void Erosion( int, void* )
@@ -78,15 +78,15 @@ void Erosion( int, void* )
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( erosion_type,
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
Point( erosion_size, erosion_size ) );
Mat element = getStructuringElement( erosion_type,
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
Point( erosion_size, erosion_size ) );
/// Apply the erosion operation
erode( src, erosion_dst, element );
imshow( "Erosion Demo", erosion_dst );
imshow( "Erosion Demo", erosion_dst );
}
/**
/**
* @function Dilation
*/
void Dilation( int, void* )
@@ -96,10 +96,10 @@ void Dilation( int, void* )
else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( dilation_type,
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
Point( dilation_size, dilation_size ) );
Mat element = getStructuringElement( dilation_type,
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
Point( dilation_size, dilation_size ) );
/// Apply the dilation operation
dilate( src, dilation_dst, element );
imshow( "Dilation Demo", dilation_dst );
imshow( "Dilation Demo", dilation_dst );
}

View File

@@ -37,7 +37,7 @@ int main( int argc, char** argv )
if( !src.data )
{ return -1; }
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
@@ -45,14 +45,14 @@ int main( int argc, char** argv )
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
/// Create Trackbar to select kernel type
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
Morphology_Operations );
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
Morphology_Operations );
/// Create Trackbar to choose kernel size
createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size,
Morphology_Operations );
createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size,
Morphology_Operations );
/// Default start
Morphology_Operations( 0, 0 );
@@ -61,7 +61,7 @@ int main( int argc, char** argv )
return 0;
}
/**
/**
* @function Morphology_Operations
*/
void Morphology_Operations( int, void* )
@@ -70,11 +70,11 @@ void Morphology_Operations( int, void* )
// Since MORPH_X : 2,3,4,5 and 6
int operation = morph_operator + 2;
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
/// Apply the specified morphology operation
morphologyEx( src, dst, operation, element );
imshow( window_name, dst );
imshow( window_name, dst );
}

View File

@@ -36,7 +36,7 @@ int main( int argc, char** argv )
{ printf(" No data! -- Exiting the program \n");
return -1; }
tmp = src;
tmp = src;
dst = tmp;
/// Create window
@@ -45,7 +45,7 @@ int main( int argc, char** argv )
/// Loop
while( true )
{
{
int c;
c = waitKey(10);
@@ -53,7 +53,7 @@ int main( int argc, char** argv )
{ break; }
if( (char)c == 'u' )
{ pyrUp( tmp, dst, Size( tmp.cols*2, tmp.rows*2 ) );
printf( "** Zoom In: Image x 2 \n" );
printf( "** Zoom In: Image x 2 \n" );
}
else if( (char)c == 'd' )
{ pyrDown( tmp, dst, Size( tmp.cols/2, tmp.rows/2 ) );
@@ -62,8 +62,8 @@ int main( int argc, char** argv )
imshow( window_name, dst );
tmp = dst;
}
}
return 0;
}

View File

@@ -26,15 +26,15 @@ int display_caption( char* caption );
int display_dst( int delay );
/**
* function main
/**
* function main
*/
int main( int argc, char** argv )
{
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Load the source image
src = imread( "../images/lena.png", 1 );
src = imread( "../images/lena.png", 1 );
if( display_caption( "Original Image" ) != 0 ) { return 0; }
@@ -42,7 +42,7 @@ int main( int argc, char** argv )
if( display_dst( DELAY_CAPTION ) != 0 ) { return 0; }
/// Applying Homogeneous blur
/// Applying Homogeneous blur
if( display_caption( "Homogeneous Blur" ) != 0 ) { return 0; }
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
@@ -50,7 +50,7 @@ int main( int argc, char** argv )
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
/// Applying Gaussian blur
/// Applying Gaussian blur
if( display_caption( "Gaussian Blur" ) != 0 ) { return 0; }
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
@@ -87,8 +87,8 @@ int main( int argc, char** argv )
int display_caption( char* caption )
{
dst = Mat::zeros( src.size(), src.type() );
putText( dst, caption,
Point( src.cols/4, src.rows/2),
putText( dst, caption,
Point( src.cols/4, src.rows/2),
CV_FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
imshow( window_name, dst );
@@ -105,5 +105,5 @@ int display_dst( int delay )
imshow( window_name, dst );
int c = waitKey ( delay );
if( c >= 0 ) { return -1; }
return 0;
return 0;
}

View File

@@ -43,13 +43,13 @@ int main( int argc, char** argv )
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Create Trackbar to choose type of Threshold
createTrackbar( trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo );
createTrackbar( trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo );
createTrackbar( trackbar_value,
window_name, &threshold_value,
max_value, Threshold_Demo );
window_name, &threshold_value,
max_value, Threshold_Demo );
/// Call the function to initialize
Threshold_Demo( 0, 0 );
@@ -60,7 +60,7 @@ int main( int argc, char** argv )
int c;
c = waitKey( 20 );
if( (char)c == 27 )
{ break; }
{ break; }
}
}

View File

@@ -37,7 +37,7 @@ void CannyThreshold(int, void*)
/// Using Canny's output as a mask, we display our result
dst = Scalar::all(0);
src.copyTo( dst, detected_edges);
imshow( window_name, dst );
}

View File

@@ -32,14 +32,14 @@ int main( int argc, char** argv )
/// Load the image
src = imread( argv[1], 1 );
/// Set the dst image the same type and size as src
/// Set the dst image the same type and size as src
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
/// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( src.cols - 1, 0 );
srcTri[2] = Point2f( 0, src.rows - 1 );
dstTri[0] = Point2f( src.cols*0.0, src.rows*0.33 );
dstTri[1] = Point2f( src.cols*0.85, src.rows*0.25 );
dstTri[2] = Point2f( src.cols*0.15, src.rows*0.7 );
@@ -62,7 +62,7 @@ int main( int argc, char** argv )
/// Rotate the warped image
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );

View File

@@ -24,7 +24,7 @@ int main(int argc, char** argv)
if( !src.data )
{ return -1; }
/// Convert it to gray
/// Convert it to gray
cvtColor( src, src_gray, CV_BGR2GRAY );
/// Reduce the noise so we avoid false circle detection
@@ -46,7 +46,7 @@ int main(int argc, char** argv)
circle( src, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
/// Show your results
/// Show your results
namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo", src );

View File

@@ -15,8 +15,8 @@ using namespace std;
/// Global variables
/** General variables */
Mat src, edges;
Mat src_gray;
Mat src, edges;
Mat src_gray;
Mat standard_hough, probabilistic_hough;
int min_threshold = 50;
int max_trackbar = 150;
@@ -47,7 +47,7 @@ int main( int argc, char** argv )
/// Pass the image to gray
cvtColor( src, src_gray, CV_RGB2GRAY );
/// Apply Canny edge detector
Canny( src_gray, edges, 50, 200, 3 );
@@ -88,7 +88,7 @@ void Standard_Hough( int, void* )
cvtColor( edges, standard_hough, CV_GRAY2BGR );
/// 1. Use Standard Hough Transform
HoughLines( edges, s_lines, 1, CV_PI/180, min_threshold + s_trackbar, 0, 0 );
HoughLines( edges, s_lines, 1, CV_PI/180, min_threshold + s_trackbar, 0, 0 );
/// Show the result
for( int i = 0; i < s_lines.size(); i++ )
@@ -100,8 +100,8 @@ void Standard_Hough( int, void* )
Point pt1( cvRound(x0 + alpha*(-sin_t)), cvRound(y0 + alpha*cos_t) );
Point pt2( cvRound(x0 - alpha*(-sin_t)), cvRound(y0 - alpha*cos_t) );
line( standard_hough, pt1, pt2, Scalar(255,0,0), 3, CV_AA);
}
line( standard_hough, pt1, pt2, Scalar(255,0,0), 3, CV_AA);
}
imshow( standard_name, standard_hough );
}

View File

@@ -18,7 +18,7 @@ int main( int argc, char** argv )
{
Mat src, src_gray, dst;
int kernel_size = 3;
int kernel_size = 3;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;

View File

@@ -32,7 +32,7 @@ int main( int argc, char** argv )
dst.create( src.size(), src.type() );
map_x.create( src.size(), CV_32FC1 );
map_y.create( src.size(), CV_32FC1 );
/// Create window
namedWindow( remap_window, CV_WINDOW_AUTOSIZE );
@@ -47,7 +47,7 @@ int main( int argc, char** argv )
/// Update map_x & map_y. Then apply remap
update_map();
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0) );
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0) );
// Display results
imshow( remap_window, dst );
@@ -65,34 +65,34 @@ void update_map( void )
for( int j = 0; j < src.rows; j++ )
{ for( int i = 0; i < src.cols; i++ )
{
{
switch( ind )
{
case 0:
if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )
{
case 0:
if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )
{
map_x.at<float>(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ;
map_y.at<float>(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ;
}
else
{ map_x.at<float>(j,i) = 0 ;
map_y.at<float>(j,i) = 0 ;
map_x.at<float>(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ;
map_y.at<float>(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ;
}
else
{ map_x.at<float>(j,i) = 0 ;
map_y.at<float>(j,i) = 0 ;
}
break;
case 1:
map_x.at<float>(j,i) = i ;
map_y.at<float>(j,i) = src.rows - j ;
break;
case 1:
map_x.at<float>(j,i) = i ;
map_y.at<float>(j,i) = src.rows - j ;
break;
case 2:
map_x.at<float>(j,i) = src.cols - i ;
map_y.at<float>(j,i) = j ;
break;
map_x.at<float>(j,i) = src.cols - i ;
map_y.at<float>(j,i) = j ;
break;
case 3:
map_x.at<float>(j,i) = src.cols - i ;
map_y.at<float>(j,i) = src.rows - j ;
break;
map_x.at<float>(j,i) = src.cols - i ;
map_y.at<float>(j,i) = src.rows - j ;
break;
} // end of switch
}
}
}
ind++;
}

View File

@@ -18,7 +18,7 @@ int main( int argc, char** argv )
{
Mat src, src_gray;
Mat grad;
Mat grad;
char* window_name = "Sobel Demo - Simple Edge Detector";
int scale = 1;
int delta = 0;
@@ -43,15 +43,15 @@ int main( int argc, char** argv )
/// Generate grad_x and grad_y
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
/// Gradient X
//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
convertScaleAbs( grad_x, abs_grad_x );
/// Gradient Y
/// Gradient Y
//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
convertScaleAbs( grad_y, abs_grad_y );
/// Total Gradient (approximate)

View File

@@ -11,7 +11,7 @@
using namespace cv;
/// Global Variables
/// Global Variables
Mat src, dst;
int top, bottom, left, right;
int borderType;
@@ -29,10 +29,10 @@ int main( int argc, char** argv )
/// Load an image
src = imread( argv[1] );
if( !src.data )
{ return -1;
printf(" No data entered, please enter the path to an image file \n");
printf(" No data entered, please enter the path to an image file \n");
}
/// Brief how-to for this program
@@ -46,12 +46,12 @@ int main( int argc, char** argv )
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
left = (int) (0.05*src.cols); right = (int) (0.05*src.cols);
dst = src;
imshow( window_name, dst );
while( true )
{
c = waitKey(500);

View File

@@ -22,7 +22,7 @@ int main ( int argc, char** argv )
Mat kernel;
Point anchor;
double delta;
int ddepth;
int ddepth;
int kernel_size;
char* window_name = "filter2D Demo";
@@ -36,7 +36,7 @@ int main ( int argc, char** argv )
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
anchor = Point( -1, -1 );
delta = 0;
@@ -60,6 +60,6 @@ int main ( int argc, char** argv )
imshow( window_name, dst );
ind++;
}
return 0;
}

View File

@@ -56,15 +56,15 @@ void thresh_callback(int, void* )
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
}
/// Show in a window

View File

@@ -56,7 +56,7 @@ void thresh_callback(int, void* )
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Approximate contours to polygons + get bounding rects and circles
@@ -66,18 +66,18 @@ void thresh_callback(int, void* )
vector<float>radius( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
minEnclosingCircle( contours_poly[i], center[i], radius[i] );
}
}
/// Draw polygonal contour + bonding rects + circles
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );
circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 );
}

View File

@@ -56,7 +56,7 @@ void thresh_callback(int, void* )
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles and ellipses for each contour
@@ -64,24 +64,24 @@ void thresh_callback(int, void* )
vector<RotatedRect> minEllipse( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ minRect[i] = minAreaRect( Mat(contours[i]) );
{ minRect[i] = minAreaRect( Mat(contours[i]) );
if( contours[i].size() > 5 )
{ minEllipse[i] = fitEllipse( Mat(contours[i]) ); }
}
}
/// Draw contours + rotated rects + ellipses
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
// contour
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
// ellipse
ellipse( drawing, minEllipse[i], color, 2, 8 );
// rotated rectangle
// rotated rectangle
Point2f rect_points[4]; minRect[i].points( rect_points );
for( int j = 0; j < 4; j++ )
line( drawing, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );
line( drawing, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );
}
/// Show in a window

View File

@@ -58,21 +58,21 @@ void thresh_callback(int, void* )
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ convexHull( Mat(contours[i]), hull[i], false ); }
{ convexHull( Mat(contours[i]), hull[i], false ); }
/// Draw contours + hull results
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
}
/// Show in a window

View File

@@ -56,7 +56,7 @@ void thresh_callback(int, void* )
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
@@ -64,7 +64,7 @@ void thresh_callback(int, void* )
for( int i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
/// Get the mass centers:
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
@@ -72,9 +72,9 @@ void thresh_callback(int, void* )
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );
}
@@ -86,9 +86,9 @@ void thresh_callback(int, void* )
printf("\t Info: Area and Contour Length \n");
for( int i = 0; i< contours.size(); i++ )
{
printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );
}
}

View File

@@ -34,13 +34,13 @@ int main( int argc, char** argv )
/// Draw it in src
for( int j = 0; j < 6; j++ )
{ line( src, vert[j], vert[(j+1)%6], Scalar( 255 ), 3, 8 ); }
{ line( src, vert[j], vert[(j+1)%6], Scalar( 255 ), 3, 8 ); }
/// Get the contours
vector<vector<Point> > contours; vector<Vec4i> hierarchy;
Mat src_copy = src.clone();
findContours( src_copy, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
findContours( src_copy, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
/// Calculate the distances to the contour
Mat raw_dist( src.size(), CV_32FC1 );
@@ -53,19 +53,19 @@ int main( int argc, char** argv )
double minVal; double maxVal;
minMaxLoc( raw_dist, &minVal, &maxVal, 0, 0, Mat() );
minVal = abs(minVal); maxVal = abs(maxVal);
/// Depicting the distances graphically
Mat drawing = Mat::zeros( src.size(), CV_8UC3 );
for( int j = 0; j < src.rows; j++ )
{ for( int i = 0; i < src.cols; i++ )
{
{
if( raw_dist.at<float>(j,i) < 0 )
{ drawing.at<Vec3b>(j,i)[0] = 255 - (int) abs(raw_dist.at<float>(j,i))*255/minVal; }
else if( raw_dist.at<float>(j,i) > 0 )
{ drawing.at<Vec3b>(j,i)[2] = 255 - (int) raw_dist.at<float>(j,i)*255/maxVal; }
{ drawing.at<Vec3b>(j,i)[2] = 255 - (int) raw_dist.at<float>(j,i)*255/maxVal; }
else
{ drawing.at<Vec3b>(j,i)[0] = 255; drawing.at<Vec3b>(j,i)[1] = 255; drawing.at<Vec3b>(j,i)[2] = 255; }
{ drawing.at<Vec3b>(j,i)[0] = 255; drawing.at<Vec3b>(j,i)[1] = 255; drawing.at<Vec3b>(j,i)[2] = 255; }
}
}

View File

@@ -37,7 +37,7 @@ int main( int argc, char** argv )
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
cornerHarris_demo( 0, 0 );
waitKey(0);
@@ -58,25 +58,25 @@ void cornerHarris_demo( int, void* )
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
/// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
convertScaleAbs( dst_norm, dst_norm_scaled );
/// Drawing a circle around corners
for( int j = 0; j < dst_norm.rows ; j++ )
{ for( int i = 0; i < dst_norm.cols; i++ )
{
if( (int) dst_norm.at<float>(j,i) > thresh )
{
circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
{
circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
}
}
}
/// Showing the result
}
}
/// Showing the result
namedWindow( corners_window, CV_WINDOW_AUTOSIZE );
imshow( corners_window, dst_norm_scaled );
}

View File

@@ -38,7 +38,7 @@ int main( int argc, char** argv )
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
@@ -55,7 +55,7 @@ int main( int argc, char** argv )
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 ) { maxCorners = 1; }
/// Parameters for Shi-Tomasi algorithm
vector<Point2f> corners;
double qualityLevel = 0.01;
@@ -69,16 +69,16 @@ void goodFeaturesToTrack_Demo( int, void* )
copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
useHarrisDetector,
k );
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
useHarrisDetector,
k );
/// Draw corners detected
cout<<"** Number of corners detected: "<<corners.size()<<endl;
@@ -88,7 +88,7 @@ void goodFeaturesToTrack_Demo( int, void* )
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
imshow( source_window, copy );
/// Set the neeed parameters to find the refined corners
Size winSize = Size( 5, 5 );
@@ -100,6 +100,6 @@ void goodFeaturesToTrack_Demo( int, void* )
/// Write them down
for( int i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}

View File

@@ -38,7 +38,7 @@ int main( int argc, char** argv )
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
@@ -55,7 +55,7 @@ int main( int argc, char** argv )
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 ) { maxCorners = 1; }
/// Parameters for Shi-Tomasi algorithm
vector<Point2f> corners;
double qualityLevel = 0.01;
@@ -69,16 +69,16 @@ void goodFeaturesToTrack_Demo( int, void* )
copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
useHarrisDetector,
k );
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
useHarrisDetector,
k );
/// Draw corners detected
cout<<"** Number of corners detected: "<<corners.size()<<endl;
@@ -88,6 +88,6 @@ void goodFeaturesToTrack_Demo( int, void* )
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
imshow( source_window, copy );
}

View File

@@ -1,13 +1,13 @@
<?xml version="1.0"?>
<opencv_storage>
<images>
images/CameraCalibraation/VID5/xx1.jpg
images/CameraCalibraation/VID5/xx2.jpg
images/CameraCalibraation/VID5/xx3.jpg
images/CameraCalibraation/VID5/xx4.jpg
images/CameraCalibraation/VID5/xx5.jpg
images/CameraCalibraation/VID5/xx6.jpg
images/CameraCalibraation/VID5/xx7.jpg
images/CameraCalibraation/VID5/xx8.jpg
</images>
<?xml version="1.0"?>
<opencv_storage>
<images>
images/CameraCalibraation/VID5/xx1.jpg
images/CameraCalibraation/VID5/xx2.jpg
images/CameraCalibraation/VID5/xx3.jpg
images/CameraCalibraation/VID5/xx4.jpg
images/CameraCalibraation/VID5/xx5.jpg
images/CameraCalibraation/VID5/xx6.jpg
images/CameraCalibraation/VID5/xx7.jpg
images/CameraCalibraation/VID5/xx8.jpg
</images>
</opencv_storage>

View File

@@ -1,46 +1,46 @@
<?xml version="1.0"?>
<opencv_storage>
<Settings>
<!-- Number of inner corners per a item row and column. (square, circle) -->
<BoardSize_Width> 9</BoardSize_Width>
<BoardSize_Height>6</BoardSize_Height>
<!-- The size of a square in some user defined metric system (pixel, millimeter)-->
<Square_Size>50</Square_Size>
<!-- The type of input used for camera calibration. One of: CHESSBOARD CIRCLES_GRID ASYMMETRIC_CIRCLES_GRID -->
<Calibrate_Pattern>"CHESSBOARD"</Calibrate_Pattern>
<!-- The input to use for calibration.
To use an input camera -> give the ID of the camera, like "1"
To use an input video -> give the path of the input video, like "/tmp/x.avi"
To use an image list -> give the path to the XML or YAML file containing the list of the images, like "/tmp/circles_list.xml"
-->
<Input>"images/CameraCalibraation/VID5/VID5.xml"</Input>
<!-- If true (non-zero) we flip the input images around the horizontal axis.-->
<Input_FlipAroundHorizontalAxis>0</Input_FlipAroundHorizontalAxis>
<!-- Time delay between frames in case of camera. -->
<Input_Delay>100</Input_Delay>
<!-- How many frames to use, for calibration. -->
<Calibrate_NrOfFrameToUse>25</Calibrate_NrOfFrameToUse>
<!-- Consider only fy as a free parameter, the ratio fx/fy stays the same as in the input cameraMatrix.
Use or not setting. 0 - False Non-Zero - True-->
<Calibrate_FixAspectRatio> 1 </Calibrate_FixAspectRatio>
<!-- If true (non-zero) tangential distortion coefficients are set to zeros and stay zero.-->
<Calibrate_AssumeZeroTangentialDistortion>1</Calibrate_AssumeZeroTangentialDistortion>
<!-- If true (non-zero) the principal point is not changed during the global optimization.-->
<Calibrate_FixPrincipalPointAtTheCenter> 1 </Calibrate_FixPrincipalPointAtTheCenter>
<!-- The name of the output log file. -->
<Write_outputFileName>"out_camera_data.xml"</Write_outputFileName>
<!-- If true (non-zero) we write to the output file the feature points.-->
<Write_DetectedFeaturePoints>1</Write_DetectedFeaturePoints>
<!-- If true (non-zero) we write to the output file the extrinsic camera parameters.-->
<Write_extrinsicParameters>1</Write_extrinsicParameters>
<!-- If true (non-zero) we show after calibration the undistorted images.-->
<Show_UndistortedImage>1</Show_UndistortedImage>
</Settings>
<?xml version="1.0"?>
<opencv_storage>
<Settings>
<!-- Number of inner corners per a item row and column. (square, circle) -->
<BoardSize_Width> 9</BoardSize_Width>
<BoardSize_Height>6</BoardSize_Height>
<!-- The size of a square in some user defined metric system (pixel, millimeter)-->
<Square_Size>50</Square_Size>
<!-- The type of input used for camera calibration. One of: CHESSBOARD CIRCLES_GRID ASYMMETRIC_CIRCLES_GRID -->
<Calibrate_Pattern>"CHESSBOARD"</Calibrate_Pattern>
<!-- The input to use for calibration.
To use an input camera -> give the ID of the camera, like "1"
To use an input video -> give the path of the input video, like "/tmp/x.avi"
To use an image list -> give the path to the XML or YAML file containing the list of the images, like "/tmp/circles_list.xml"
-->
<Input>"images/CameraCalibraation/VID5/VID5.xml"</Input>
<!-- If true (non-zero) we flip the input images around the horizontal axis.-->
<Input_FlipAroundHorizontalAxis>0</Input_FlipAroundHorizontalAxis>
<!-- Time delay between frames in case of camera. -->
<Input_Delay>100</Input_Delay>
<!-- How many frames to use, for calibration. -->
<Calibrate_NrOfFrameToUse>25</Calibrate_NrOfFrameToUse>
<!-- Consider only fy as a free parameter, the ratio fx/fy stays the same as in the input cameraMatrix.
Use or not setting. 0 - False Non-Zero - True-->
<Calibrate_FixAspectRatio> 1 </Calibrate_FixAspectRatio>
<!-- If true (non-zero) tangential distortion coefficients are set to zeros and stay zero.-->
<Calibrate_AssumeZeroTangentialDistortion>1</Calibrate_AssumeZeroTangentialDistortion>
<!-- If true (non-zero) the principal point is not changed during the global optimization.-->
<Calibrate_FixPrincipalPointAtTheCenter> 1 </Calibrate_FixPrincipalPointAtTheCenter>
<!-- The name of the output log file. -->
<Write_outputFileName>"out_camera_data.xml"</Write_outputFileName>
<!-- If true (non-zero) we write to the output file the feature points.-->
<Write_DetectedFeaturePoints>1</Write_DetectedFeaturePoints>
<!-- If true (non-zero) we write to the output file the extrinsic camera parameters.-->
<Write_extrinsicParameters>1</Write_extrinsicParameters>
<!-- If true (non-zero) we show after calibration the undistorted images.-->
<Show_UndistortedImage>1</Show_UndistortedImage>
</Settings>
</opencv_storage>

View File

@@ -1,352 +1,352 @@
%YAML:1.0
calibration_Time: "08/19/11 20:44:38"
nrOfFrames: 8
image_Width: 640
image_Height: 480
board_Width: 9
board_Height: 6
square_Size: 50.
FixAspectRatio: 1.
# flags: +fix_aspectRatio +fix_principal_point +zero_tangent_dist
flagValue: 14
Camera_Matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 6.5746697810243404e+002, 0., 3.1950000000000000e+002, 0.,
6.5746697810243404e+002, 2.3950000000000000e+002, 0., 0., 1. ]
Distortion_Coefficients: !!opencv-matrix
rows: 5
cols: 1
dt: d
data: [ -4.1802327018241026e-001, 5.0715243805833121e-001, 0., 0.,
-5.7843596847939704e-001 ]
Avg_Reprojection_Error: 3.8441346462381665e-001
Per_View_Reprojection_Errors: !!opencv-matrix
rows: 8
cols: 1
dt: f
data: [ 5.04357755e-001, 4.85754758e-001, 3.99563968e-001,
4.13829178e-001, 3.53570908e-001, 3.21116358e-001,
2.74473161e-001, 2.39761785e-001 ]
# a set of 6-tuples (rotation vector + translation vector) for each view
Extrinsic_Parameters: !!opencv-matrix
rows: 8
cols: 6
dt: d
data: [ -7.8704123655486097e-002, -1.5922384772614945e-001,
3.1166227207451498e+000, 2.4224388101960471e+002,
1.1795590397660339e+002, 6.2576484126093249e+002,
-1.4117480285164308e-001, -1.7917415443804836e-002,
3.1333182268743949e+000, 2.5943034781849354e+002,
1.4039780562976958e+002, 6.3848706527260981e+002,
7.2230525186138789e-002, -7.5445981266787754e-002,
1.5712860749221762e+000, 1.7426560451795339e+002,
-1.9309240362258871e+002, 7.0891416556762647e+002,
2.0367310600105853e-002, 6.8565520026996951e-002,
-5.4313033031644169e-004, -2.0146314940404827e+002,
-1.3305643514116997e+002, 7.4933554744027231e+002,
-3.4468530027734055e-002, 2.1921265175331925e-002,
-1.5731053528054522e+000, -1.1155718744299284e+002,
2.0307615364261443e+002, 8.4915903914333899e+002,
3.7425562109513817e-002, 7.4883169379022230e-002,
-3.6031632305130512e-002, -2.0094505419395196e+002,
-1.1627359108310560e+002, 9.2021583518760133e+002,
6.8105689976949157e-002, 6.4426739692440949e-002,
-7.0967130057087435e-002, -1.9233852871740035e+002,
-1.0334652096641923e+002, 1.0755293563503658e+003,
-5.8017546499862287e-002, -1.6909812666033443e-003,
-1.5876137659782963e+000, -1.0242234847115104e+002,
2.2583088401423066e+002, 1.1125972190244058e+003 ]
Image_points: !!opencv-matrix
rows: 8
cols: 54
dt: "2f"
data: [ 5.58494690e+002, 3.55650085e+002, 5.13314697e+002,
3.59107666e+002, 4.65728333e+002, 3.62133911e+002,
4.15701111e+002, 3.65026459e+002, 3.64399353e+002,
3.67339203e+002, 3.12101196e+002, 3.69211914e+002,
2.59208405e+002, 3.70413513e+002, 2.07456192e+002,
3.71175995e+002, 1.56619507e+002, 3.72176544e+002,
5.60868713e+002, 3.08104828e+002, 5.15191772e+002,
3.10485626e+002, 4.67032959e+002, 3.12660004e+002,
4.16112152e+002, 3.14887177e+002, 3.64010712e+002,
3.16825775e+002, 3.10712372e+002, 3.18640808e+002,
2.56853943e+002, 3.20017365e+002, 2.04168182e+002,
3.20908417e+002, 1.52469528e+002, 3.22105377e+002,
5.62328369e+002, 2.58646881e+002, 5.16396301e+002,
2.59919281e+002, 4.67907654e+002, 2.61257874e+002,
4.16463440e+002, 2.62675537e+002, 3.63546570e+002,
2.64064117e+002, 3.09528137e+002, 2.65489990e+002,
2.54765533e+002, 2.66862030e+002, 2.01299225e+002,
2.67997345e+002, 1.48913437e+002, 2.69627167e+002,
5.63098022e+002, 2.08423523e+002, 5.16782654e+002,
2.08424667e+002, 4.68059296e+002, 2.08661697e+002,
4.16216431e+002, 2.09268982e+002, 3.62888763e+002,
2.10013397e+002, 3.08458557e+002, 2.11074738e+002,
2.53267990e+002, 2.12496582e+002, 1.99121384e+002,
2.14005814e+002, 1.46551376e+002, 2.15851318e+002,
5.62997437e+002, 1.57966492e+002, 5.16406494e+002,
1.56580688e+002, 4.67334900e+002, 1.55756500e+002,
4.15378235e+002, 1.55492874e+002, 3.62096710e+002,
1.55498734e+002, 3.07522827e+002, 1.56133240e+002,
2.52235214e+002, 1.57516571e+002, 1.97876328e+002,
1.59318787e+002, 1.45078247e+002, 1.61638428e+002,
5.62097168e+002, 1.07469536e+002, 5.15766846e+002,
1.04902527e+002, 4.66562866e+002, 1.03045807e+002,
4.14651459e+002, 1.01924713e+002, 3.61240662e+002,
1.01357826e+002, 3.06746613e+002, 1.01582802e+002,
2.51568024e+002, 1.02920105e+002, 1.97343307e+002,
1.04941299e+002, 1.44756821e+002, 1.07737488e+002,
5.68062500e+002, 3.73591125e+002, 5.25272644e+002,
3.77019318e+002, 4.79870941e+002, 3.80086578e+002,
4.31823730e+002, 3.83036652e+002, 3.81995758e+002,
3.85271759e+002, 3.30728729e+002, 3.86998779e+002,
2.78071167e+002, 3.88151031e+002, 2.26231567e+002,
3.88669586e+002, 1.74855331e+002, 3.89197998e+002,
5.69792542e+002, 3.27097382e+002, 5.26866028e+002,
3.29362366e+002, 4.81278229e+002, 3.31532928e+002,
4.32783203e+002, 3.33552185e+002, 3.82408234e+002,
3.35186554e+002, 3.30427399e+002, 3.36404053e+002,
2.77138519e+002, 3.37450958e+002, 2.24525131e+002,
3.37957092e+002, 1.72285507e+002, 3.38503540e+002,
5.70942749e+002, 2.79243713e+002, 5.27789307e+002,
2.80073486e+002, 4.82146576e+002, 2.81226410e+002,
4.33247375e+002, 2.82237427e+002, 3.82503662e+002,
2.83062286e+002, 3.30138885e+002, 2.83794434e+002,
2.76433228e+002, 2.84549286e+002, 2.23158783e+002,
2.84981049e+002, 1.70520218e+002, 2.85720886e+002,
5.71001953e+002, 2.30928329e+002, 5.27846863e+002,
2.30519928e+002, 4.82114563e+002, 2.30268906e+002,
4.33114563e+002, 2.30243515e+002, 3.82384857e+002,
2.30311340e+002, 3.29870392e+002, 2.30454620e+002,
2.76012634e+002, 2.30882156e+002, 2.22529434e+002,
2.31355362e+002, 1.69742065e+002, 2.32063004e+002,
5.70199036e+002, 1.82609772e+002, 5.27030884e+002,
1.80973267e+002, 4.81193573e+002, 1.79573792e+002,
4.32409821e+002, 1.78475616e+002, 3.81855530e+002,
1.77680283e+002, 3.29641937e+002, 1.77092087e+002,
2.75895782e+002, 1.77155502e+002, 2.22438889e+002,
1.77605667e+002, 1.69884583e+002, 1.78365585e+002,
5.69026245e+002, 1.34654831e+002, 5.26171570e+002,
1.31798691e+002, 4.80653503e+002, 1.29171509e+002,
4.31869904e+002, 1.27280067e+002, 3.81419739e+002,
1.25591202e+002, 3.29466644e+002, 1.24407089e+002,
2.76225342e+002, 1.24174736e+002, 2.23024109e+002,
1.24463333e+002, 1.70838898e+002, 1.25398903e+002,
4.73812897e+002, 6.94673386e+001, 4.74245453e+002,
1.12387466e+002, 4.74243347e+002, 1.56034164e+002,
4.73834778e+002, 2.00523651e+002, 4.72891602e+002,
2.44457306e+002, 4.71412811e+002, 2.87981171e+002,
4.69708252e+002, 3.30783173e+002, 4.67558228e+002,
3.71818420e+002, 4.65495667e+002, 4.11996979e+002,
4.31027649e+002, 6.75546722e+001, 4.31269440e+002,
1.10960022e+002, 4.31185486e+002, 1.55113556e+002,
4.30830139e+002, 2.00061066e+002, 4.30168427e+002,
2.44456863e+002, 4.29107544e+002, 2.88479645e+002,
4.27829071e+002, 3.31813507e+002, 4.26131653e+002,
3.73071228e+002, 4.24718811e+002, 4.13476563e+002,
3.86868805e+002, 6.61982269e+001, 3.86895416e+002,
1.09904411e+002, 3.86690216e+002, 1.54396423e+002,
3.86368591e+002, 1.99800369e+002, 3.85792206e+002,
2.44538574e+002, 3.85117279e+002, 2.88826447e+002,
3.84405273e+002, 3.32408020e+002, 3.83303772e+002,
3.74074097e+002, 3.82477448e+002, 4.14638977e+002,
3.41941437e+002, 6.54462357e+001, 3.41628204e+002,
1.09383698e+002, 3.41402344e+002, 1.54105545e+002,
3.41168854e+002, 1.99655045e+002, 3.40816681e+002,
2.44469910e+002, 3.40516937e+002, 2.88975800e+002,
3.40365662e+002, 3.32670990e+002, 3.39935211e+002,
3.74465759e+002, 3.39847626e+002, 4.14742279e+002,
2.96694000e+002, 6.56859589e+001, 2.96075226e+002,
1.09505333e+002, 2.95704895e+002, 1.54202652e+002,
2.95653107e+002, 1.99734131e+002, 2.95589661e+002,
2.44549530e+002, 2.95629547e+002, 2.88889496e+002,
2.96138733e+002, 3.32610931e+002, 2.96520905e+002,
3.74608551e+002, 2.96987091e+002, 4.14774902e+002,
2.51414978e+002, 6.65755463e+001, 2.50681854e+002,
1.10189331e+002, 2.50183380e+002, 1.54658005e+002,
2.50331161e+002, 2.00073761e+002, 2.50590790e+002,
2.44719513e+002, 2.51083817e+002, 2.88868286e+002,
2.52134262e+002, 3.32266937e+002, 2.53097809e+002,
3.74022491e+002, 2.54404007e+002, 4.14018066e+002,
1.49524078e+002, 1.27699501e+002, 1.89511658e+002,
1.25816605e+002, 2.31050888e+002, 1.24260918e+002,
2.74076721e+002, 1.23023209e+002, 3.17643005e+002,
1.22288109e+002, 3.61785889e+002, 1.22105164e+002,
4.06142670e+002, 1.22401566e+002, 4.49623962e+002,
1.23246025e+002, 4.92677216e+002, 1.24087708e+002,
1.48706085e+002, 1.69077423e+002, 1.88827805e+002,
1.67750443e+002, 2.30439865e+002, 1.66769333e+002,
2.73830933e+002, 1.65871170e+002, 3.17596741e+002,
1.65410919e+002, 3.61983459e+002, 1.65327866e+002,
4.06748322e+002, 1.65463974e+002, 4.50450226e+002,
1.66126526e+002, 4.93614655e+002, 1.66970413e+002,
1.48312607e+002, 2.11499451e+002, 1.88574097e+002,
2.10860214e+002, 2.30130676e+002, 2.10261612e+002,
2.73557709e+002, 2.09837143e+002, 3.17542572e+002,
2.09633057e+002, 3.62091248e+002, 2.09732620e+002,
4.06934570e+002, 2.09926758e+002, 4.50914612e+002,
2.10320221e+002, 4.94044495e+002, 2.10900925e+002,
1.48613831e+002, 2.53997177e+002, 1.88797791e+002,
2.53912842e+002, 2.30240204e+002, 2.53975067e+002,
2.73746704e+002, 2.54010208e+002, 3.17718262e+002,
2.54106003e+002, 3.62188965e+002, 2.54205475e+002,
4.06908783e+002, 2.54317505e+002, 4.50824951e+002,
2.54539490e+002, 4.93825714e+002, 2.54753876e+002,
1.49541687e+002, 2.96404175e+002, 1.89357727e+002,
2.97117523e+002, 2.30807007e+002, 2.97805603e+002,
2.74325470e+002, 2.97966522e+002, 3.18042206e+002,
2.98304535e+002, 3.62105774e+002, 2.98552643e+002,
4.06672272e+002, 2.98572418e+002, 4.50363068e+002,
2.98569550e+002, 4.93109894e+002, 2.98516205e+002,
1.50883698e+002, 3.38493195e+002, 1.90633621e+002,
3.39862610e+002, 2.31920990e+002, 3.40869415e+002,
2.74971252e+002, 3.41453766e+002, 3.18235229e+002,
3.41952637e+002, 3.62063477e+002, 3.42314026e+002,
4.06098938e+002, 3.42221802e+002, 4.49477386e+002,
3.42063812e+002, 4.91864716e+002, 3.41727600e+002,
2.36129852e+002, 3.92798004e+002, 2.34999939e+002,
3.56118683e+002, 2.34376099e+002, 3.18607025e+002,
2.33822159e+002, 2.80400696e+002, 2.33565445e+002,
2.42213104e+002, 2.33583069e+002, 2.03937286e+002,
2.34028824e+002, 1.65756607e+002, 2.34613373e+002,
1.28586639e+002, 2.35190308e+002, 9.18279037e+001,
2.73031616e+002, 3.93267242e+002, 2.72295166e+002,
3.56342743e+002, 2.71799347e+002, 3.18847412e+002,
2.71418854e+002, 2.80287872e+002, 2.71161469e+002,
2.41881134e+002, 2.71248962e+002, 2.03348145e+002,
2.71379303e+002, 1.64895874e+002, 2.71946045e+002,
1.27450935e+002, 2.72322418e+002, 9.06900787e+001,
3.10670715e+002, 3.93568848e+002, 3.10389160e+002,
3.56545959e+002, 3.10084625e+002, 3.18814514e+002,
3.09801544e+002, 2.80242737e+002, 3.09678711e+002,
2.41574814e+002, 3.09779663e+002, 2.02989838e+002,
3.09842712e+002, 1.64338043e+002, 3.10076782e+002,
1.26870911e+002, 3.10243286e+002, 8.98413315e+001,
3.48618134e+002, 3.93563202e+002, 3.48617065e+002,
3.56472382e+002, 3.48608795e+002, 3.18855621e+002,
3.48544556e+002, 2.80011017e+002, 3.48556396e+002,
2.41388168e+002, 3.48585388e+002, 2.02692429e+002,
3.48435089e+002, 1.64099731e+002, 3.48442902e+002,
1.26549957e+002, 3.48338043e+002, 8.98002014e+001,
3.86625610e+002, 3.93188599e+002, 3.87047729e+002,
3.56377594e+002, 3.87306274e+002, 3.18714752e+002,
3.87337799e+002, 2.79868896e+002, 3.87402740e+002,
2.41228760e+002, 3.87295166e+002, 2.02695313e+002,
3.87030273e+002, 1.64203415e+002, 3.86741211e+002,
1.26606262e+002, 3.86337311e+002, 8.99655075e+001,
4.24534088e+002, 3.92702545e+002, 4.25310822e+002,
3.55900452e+002, 4.25869019e+002, 3.18160614e+002,
4.25909790e+002, 2.79615753e+002, 4.25977295e+002,
2.41165100e+002, 4.25826477e+002, 2.02876389e+002,
4.25331665e+002, 1.64527618e+002, 4.24775787e+002,
1.27097328e+002, 4.23985138e+002, 9.08176651e+001,
1.79142670e+002, 1.58573654e+002, 2.12791580e+002,
1.56291031e+002, 2.47140106e+002, 1.54265656e+002,
2.82607300e+002, 1.52373688e+002, 3.18175507e+002,
1.50692184e+002, 3.54185852e+002, 1.49404175e+002,
3.90455200e+002, 1.48229370e+002, 4.26106689e+002,
1.47507843e+002, 4.61576141e+002, 1.46712479e+002,
1.80388336e+002, 1.93027603e+002, 2.14026459e+002,
1.91128204e+002, 2.48376541e+002, 1.89414978e+002,
2.83795807e+002, 1.87720856e+002, 3.19472473e+002,
1.86192383e+002, 3.55483826e+002, 1.84929199e+002,
3.91970764e+002, 1.83747040e+002, 4.27654572e+002,
1.82931534e+002, 4.63295227e+002, 1.81977234e+002,
1.81914261e+002, 2.27955460e+002, 2.15291260e+002,
2.26512482e+002, 2.49628265e+002, 2.25067520e+002,
2.85066406e+002, 2.23593185e+002, 3.20846680e+002,
2.22337708e+002, 3.56862885e+002, 2.21191040e+002,
3.93279907e+002, 2.19905640e+002, 4.29202271e+002,
2.18870361e+002, 4.64728424e+002, 2.17972977e+002,
1.83496948e+002, 2.62963226e+002, 2.16930527e+002,
2.61755219e+002, 2.51115829e+002, 2.60777222e+002,
2.86553406e+002, 2.59500336e+002, 3.22299896e+002,
2.58380737e+002, 3.58307648e+002, 2.57236694e+002,
3.94551819e+002, 2.56009125e+002, 4.30358948e+002,
2.54925797e+002, 4.65684998e+002, 2.54021484e+002,
1.85461685e+002, 2.97687378e+002, 2.18712234e+002,
2.96999207e+002, 2.52770218e+002, 2.96270752e+002,
2.88213776e+002, 2.95168213e+002, 3.23698334e+002,
2.94233032e+002, 3.59477722e+002, 2.93170715e+002,
3.95647766e+002, 2.91897400e+002, 4.31309845e+002,
2.90856995e+002, 4.66494110e+002, 2.89726410e+002,
1.87661331e+002, 3.32186188e+002, 2.20767746e+002,
3.31906250e+002, 2.54839096e+002, 3.31398651e+002,
2.89963745e+002, 3.30524139e+002, 3.25207642e+002,
3.29771820e+002, 3.60686035e+002, 3.28762695e+002,
3.96576447e+002, 3.27542206e+002, 4.31994415e+002,
3.26294189e+002, 4.66894653e+002, 3.24949921e+002,
2.03543015e+002, 1.77473557e+002, 2.32777847e+002,
1.74712509e+002, 2.62628723e+002, 1.72331970e+002,
2.93045898e+002, 1.69686768e+002, 3.23527618e+002,
1.67496246e+002, 3.54206787e+002, 1.65446075e+002,
3.85180176e+002, 1.63360580e+002, 4.15484253e+002,
1.61536423e+002, 4.45720947e+002, 1.59896164e+002,
2.05864395e+002, 2.07228104e+002, 2.35242096e+002,
2.04699326e+002, 2.64853973e+002, 2.02407455e+002,
2.95353882e+002, 1.99972321e+002, 3.25811890e+002,
1.97671921e+002, 3.56471252e+002, 1.95763168e+002,
3.87280548e+002, 1.93597977e+002, 4.17615814e+002,
1.91867371e+002, 4.48018677e+002, 1.90067413e+002,
2.08421249e+002, 2.37166977e+002, 2.37513824e+002,
2.34982773e+002, 2.67261261e+002, 2.32802841e+002,
2.97555817e+002, 2.30466080e+002, 3.28118103e+002,
2.28462463e+002, 3.58699707e+002, 2.26417038e+002,
3.89468842e+002, 2.24356827e+002, 4.19895996e+002,
2.22421921e+002, 4.50077850e+002, 2.20683517e+002,
2.11095444e+002, 2.66940186e+002, 2.40241348e+002,
2.64970093e+002, 2.69563019e+002, 2.63153290e+002,
2.99863464e+002, 2.60983551e+002, 3.30282440e+002,
2.58911560e+002, 3.60724792e+002, 2.56935730e+002,
3.91487915e+002, 2.54799423e+002, 4.21789093e+002,
2.52929688e+002, 4.51818481e+002, 2.51059357e+002,
2.13829117e+002, 2.96591217e+002, 2.42742859e+002,
2.94884583e+002, 2.72209076e+002, 2.93215668e+002,
3.02402985e+002, 2.91230591e+002, 3.32536072e+002,
2.89165192e+002, 3.62860901e+002, 2.87413605e+002,
3.93481842e+002, 2.85199615e+002, 4.23728851e+002,
2.83277496e+002, 4.53453094e+002, 2.81229309e+002,
2.16799316e+002, 3.25975220e+002, 2.45605515e+002,
3.24619904e+002, 2.74777344e+002, 3.22958679e+002,
3.04762817e+002, 3.21008057e+002, 3.34797150e+002,
3.19291443e+002, 3.65005798e+002, 3.17295044e+002,
3.95311981e+002, 3.15296021e+002, 4.25312592e+002,
3.13086945e+002, 4.54931152e+002, 3.11027130e+002,
2.60550232e+002, 3.70739563e+002, 2.59674011e+002,
3.42115936e+002, 2.58910492e+002, 3.13278015e+002,
2.58195618e+002, 2.84013580e+002, 2.57727173e+002,
2.55017166e+002, 2.57326263e+002, 2.25760986e+002,
2.57096619e+002, 1.96577972e+002, 2.57031860e+002,
1.68026199e+002, 2.56873383e+002, 1.39550308e+002,
2.89019318e+002, 3.70481354e+002, 2.88355560e+002,
3.41833252e+002, 2.87601471e+002, 3.12872925e+002,
2.87057190e+002, 2.83485535e+002, 2.86599762e+002,
2.54255096e+002, 2.86174438e+002, 2.25023285e+002,
2.85775940e+002, 1.95715347e+002, 2.85577087e+002,
1.66989502e+002, 2.85395477e+002, 1.38597382e+002,
3.18072754e+002, 3.70118317e+002, 3.17432709e+002,
3.41398743e+002, 3.16917267e+002, 3.12476044e+002,
3.16284363e+002, 2.83001587e+002, 3.15799072e+002,
2.53725845e+002, 3.15411957e+002, 2.24337708e+002,
3.15070374e+002, 1.95034119e+002, 3.14736847e+002,
1.66195313e+002, 3.14439789e+002, 1.37797058e+002,
3.47083588e+002, 3.69678101e+002, 3.46717987e+002,
3.40949524e+002, 3.46185303e+002, 3.12009857e+002,
3.45728088e+002, 2.82454071e+002, 3.45226624e+002,
2.53109863e+002, 3.44883606e+002, 2.23839539e+002,
3.44373535e+002, 1.94399933e+002, 3.43879852e+002,
1.65690643e+002, 3.43438629e+002, 1.37252930e+002,
3.76341522e+002, 3.68972321e+002, 3.76086884e+002,
3.40412842e+002, 3.75708893e+002, 3.11398376e+002,
3.75143494e+002, 2.81901520e+002, 3.74762970e+002,
2.52577988e+002, 3.74223969e+002, 2.23348221e+002,
3.73600891e+002, 1.93979538e+002, 3.72983917e+002,
1.65201294e+002, 3.72517273e+002, 1.36871033e+002,
4.05512115e+002, 3.68243225e+002, 4.05366333e+002,
3.39678650e+002, 4.05090027e+002, 3.10679108e+002,
4.04612366e+002, 2.81203522e+002, 4.04152649e+002,
2.52051605e+002, 4.03539703e+002, 2.22930420e+002,
4.02903351e+002, 1.93625381e+002, 4.02272827e+002,
1.65004440e+002, 4.01353333e+002, 1.36796814e+002 ]
%YAML:1.0
calibration_Time: "08/19/11 20:44:38"
nrOfFrames: 8
image_Width: 640
image_Height: 480
board_Width: 9
board_Height: 6
square_Size: 50.
FixAspectRatio: 1.
# flags: +fix_aspectRatio +fix_principal_point +zero_tangent_dist
flagValue: 14
Camera_Matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 6.5746697810243404e+002, 0., 3.1950000000000000e+002, 0.,
6.5746697810243404e+002, 2.3950000000000000e+002, 0., 0., 1. ]
Distortion_Coefficients: !!opencv-matrix
rows: 5
cols: 1
dt: d
data: [ -4.1802327018241026e-001, 5.0715243805833121e-001, 0., 0.,
-5.7843596847939704e-001 ]
Avg_Reprojection_Error: 3.8441346462381665e-001
Per_View_Reprojection_Errors: !!opencv-matrix
rows: 8
cols: 1
dt: f
data: [ 5.04357755e-001, 4.85754758e-001, 3.99563968e-001,
4.13829178e-001, 3.53570908e-001, 3.21116358e-001,
2.74473161e-001, 2.39761785e-001 ]
# a set of 6-tuples (rotation vector + translation vector) for each view
Extrinsic_Parameters: !!opencv-matrix
rows: 8
cols: 6
dt: d
data: [ -7.8704123655486097e-002, -1.5922384772614945e-001,
3.1166227207451498e+000, 2.4224388101960471e+002,
1.1795590397660339e+002, 6.2576484126093249e+002,
-1.4117480285164308e-001, -1.7917415443804836e-002,
3.1333182268743949e+000, 2.5943034781849354e+002,
1.4039780562976958e+002, 6.3848706527260981e+002,
7.2230525186138789e-002, -7.5445981266787754e-002,
1.5712860749221762e+000, 1.7426560451795339e+002,
-1.9309240362258871e+002, 7.0891416556762647e+002,
2.0367310600105853e-002, 6.8565520026996951e-002,
-5.4313033031644169e-004, -2.0146314940404827e+002,
-1.3305643514116997e+002, 7.4933554744027231e+002,
-3.4468530027734055e-002, 2.1921265175331925e-002,
-1.5731053528054522e+000, -1.1155718744299284e+002,
2.0307615364261443e+002, 8.4915903914333899e+002,
3.7425562109513817e-002, 7.4883169379022230e-002,
-3.6031632305130512e-002, -2.0094505419395196e+002,
-1.1627359108310560e+002, 9.2021583518760133e+002,
6.8105689976949157e-002, 6.4426739692440949e-002,
-7.0967130057087435e-002, -1.9233852871740035e+002,
-1.0334652096641923e+002, 1.0755293563503658e+003,
-5.8017546499862287e-002, -1.6909812666033443e-003,
-1.5876137659782963e+000, -1.0242234847115104e+002,
2.2583088401423066e+002, 1.1125972190244058e+003 ]
Image_points: !!opencv-matrix
rows: 8
cols: 54
dt: "2f"
data: [ 5.58494690e+002, 3.55650085e+002, 5.13314697e+002,
3.59107666e+002, 4.65728333e+002, 3.62133911e+002,
4.15701111e+002, 3.65026459e+002, 3.64399353e+002,
3.67339203e+002, 3.12101196e+002, 3.69211914e+002,
2.59208405e+002, 3.70413513e+002, 2.07456192e+002,
3.71175995e+002, 1.56619507e+002, 3.72176544e+002,
5.60868713e+002, 3.08104828e+002, 5.15191772e+002,
3.10485626e+002, 4.67032959e+002, 3.12660004e+002,
4.16112152e+002, 3.14887177e+002, 3.64010712e+002,
3.16825775e+002, 3.10712372e+002, 3.18640808e+002,
2.56853943e+002, 3.20017365e+002, 2.04168182e+002,
3.20908417e+002, 1.52469528e+002, 3.22105377e+002,
5.62328369e+002, 2.58646881e+002, 5.16396301e+002,
2.59919281e+002, 4.67907654e+002, 2.61257874e+002,
4.16463440e+002, 2.62675537e+002, 3.63546570e+002,
2.64064117e+002, 3.09528137e+002, 2.65489990e+002,
2.54765533e+002, 2.66862030e+002, 2.01299225e+002,
2.67997345e+002, 1.48913437e+002, 2.69627167e+002,
5.63098022e+002, 2.08423523e+002, 5.16782654e+002,
2.08424667e+002, 4.68059296e+002, 2.08661697e+002,
4.16216431e+002, 2.09268982e+002, 3.62888763e+002,
2.10013397e+002, 3.08458557e+002, 2.11074738e+002,
2.53267990e+002, 2.12496582e+002, 1.99121384e+002,
2.14005814e+002, 1.46551376e+002, 2.15851318e+002,
5.62997437e+002, 1.57966492e+002, 5.16406494e+002,
1.56580688e+002, 4.67334900e+002, 1.55756500e+002,
4.15378235e+002, 1.55492874e+002, 3.62096710e+002,
1.55498734e+002, 3.07522827e+002, 1.56133240e+002,
2.52235214e+002, 1.57516571e+002, 1.97876328e+002,
1.59318787e+002, 1.45078247e+002, 1.61638428e+002,
5.62097168e+002, 1.07469536e+002, 5.15766846e+002,
1.04902527e+002, 4.66562866e+002, 1.03045807e+002,
4.14651459e+002, 1.01924713e+002, 3.61240662e+002,
1.01357826e+002, 3.06746613e+002, 1.01582802e+002,
2.51568024e+002, 1.02920105e+002, 1.97343307e+002,
1.04941299e+002, 1.44756821e+002, 1.07737488e+002,
5.68062500e+002, 3.73591125e+002, 5.25272644e+002,
3.77019318e+002, 4.79870941e+002, 3.80086578e+002,
4.31823730e+002, 3.83036652e+002, 3.81995758e+002,
3.85271759e+002, 3.30728729e+002, 3.86998779e+002,
2.78071167e+002, 3.88151031e+002, 2.26231567e+002,
3.88669586e+002, 1.74855331e+002, 3.89197998e+002,
5.69792542e+002, 3.27097382e+002, 5.26866028e+002,
3.29362366e+002, 4.81278229e+002, 3.31532928e+002,
4.32783203e+002, 3.33552185e+002, 3.82408234e+002,
3.35186554e+002, 3.30427399e+002, 3.36404053e+002,
2.77138519e+002, 3.37450958e+002, 2.24525131e+002,
3.37957092e+002, 1.72285507e+002, 3.38503540e+002,
5.70942749e+002, 2.79243713e+002, 5.27789307e+002,
2.80073486e+002, 4.82146576e+002, 2.81226410e+002,
4.33247375e+002, 2.82237427e+002, 3.82503662e+002,
2.83062286e+002, 3.30138885e+002, 2.83794434e+002,
2.76433228e+002, 2.84549286e+002, 2.23158783e+002,
2.84981049e+002, 1.70520218e+002, 2.85720886e+002,
5.71001953e+002, 2.30928329e+002, 5.27846863e+002,
2.30519928e+002, 4.82114563e+002, 2.30268906e+002,
4.33114563e+002, 2.30243515e+002, 3.82384857e+002,
2.30311340e+002, 3.29870392e+002, 2.30454620e+002,
2.76012634e+002, 2.30882156e+002, 2.22529434e+002,
2.31355362e+002, 1.69742065e+002, 2.32063004e+002,
5.70199036e+002, 1.82609772e+002, 5.27030884e+002,
1.80973267e+002, 4.81193573e+002, 1.79573792e+002,
4.32409821e+002, 1.78475616e+002, 3.81855530e+002,
1.77680283e+002, 3.29641937e+002, 1.77092087e+002,
2.75895782e+002, 1.77155502e+002, 2.22438889e+002,
1.77605667e+002, 1.69884583e+002, 1.78365585e+002,
5.69026245e+002, 1.34654831e+002, 5.26171570e+002,
1.31798691e+002, 4.80653503e+002, 1.29171509e+002,
4.31869904e+002, 1.27280067e+002, 3.81419739e+002,
1.25591202e+002, 3.29466644e+002, 1.24407089e+002,
2.76225342e+002, 1.24174736e+002, 2.23024109e+002,
1.24463333e+002, 1.70838898e+002, 1.25398903e+002,
4.73812897e+002, 6.94673386e+001, 4.74245453e+002,
1.12387466e+002, 4.74243347e+002, 1.56034164e+002,
4.73834778e+002, 2.00523651e+002, 4.72891602e+002,
2.44457306e+002, 4.71412811e+002, 2.87981171e+002,
4.69708252e+002, 3.30783173e+002, 4.67558228e+002,
3.71818420e+002, 4.65495667e+002, 4.11996979e+002,
4.31027649e+002, 6.75546722e+001, 4.31269440e+002,
1.10960022e+002, 4.31185486e+002, 1.55113556e+002,
4.30830139e+002, 2.00061066e+002, 4.30168427e+002,
2.44456863e+002, 4.29107544e+002, 2.88479645e+002,
4.27829071e+002, 3.31813507e+002, 4.26131653e+002,
3.73071228e+002, 4.24718811e+002, 4.13476563e+002,
3.86868805e+002, 6.61982269e+001, 3.86895416e+002,
1.09904411e+002, 3.86690216e+002, 1.54396423e+002,
3.86368591e+002, 1.99800369e+002, 3.85792206e+002,
2.44538574e+002, 3.85117279e+002, 2.88826447e+002,
3.84405273e+002, 3.32408020e+002, 3.83303772e+002,
3.74074097e+002, 3.82477448e+002, 4.14638977e+002,
3.41941437e+002, 6.54462357e+001, 3.41628204e+002,
1.09383698e+002, 3.41402344e+002, 1.54105545e+002,
3.41168854e+002, 1.99655045e+002, 3.40816681e+002,
2.44469910e+002, 3.40516937e+002, 2.88975800e+002,
3.40365662e+002, 3.32670990e+002, 3.39935211e+002,
3.74465759e+002, 3.39847626e+002, 4.14742279e+002,
2.96694000e+002, 6.56859589e+001, 2.96075226e+002,
1.09505333e+002, 2.95704895e+002, 1.54202652e+002,
2.95653107e+002, 1.99734131e+002, 2.95589661e+002,
2.44549530e+002, 2.95629547e+002, 2.88889496e+002,
2.96138733e+002, 3.32610931e+002, 2.96520905e+002,
3.74608551e+002, 2.96987091e+002, 4.14774902e+002,
2.51414978e+002, 6.65755463e+001, 2.50681854e+002,
1.10189331e+002, 2.50183380e+002, 1.54658005e+002,
2.50331161e+002, 2.00073761e+002, 2.50590790e+002,
2.44719513e+002, 2.51083817e+002, 2.88868286e+002,
2.52134262e+002, 3.32266937e+002, 2.53097809e+002,
3.74022491e+002, 2.54404007e+002, 4.14018066e+002,
1.49524078e+002, 1.27699501e+002, 1.89511658e+002,
1.25816605e+002, 2.31050888e+002, 1.24260918e+002,
2.74076721e+002, 1.23023209e+002, 3.17643005e+002,
1.22288109e+002, 3.61785889e+002, 1.22105164e+002,
4.06142670e+002, 1.22401566e+002, 4.49623962e+002,
1.23246025e+002, 4.92677216e+002, 1.24087708e+002,
1.48706085e+002, 1.69077423e+002, 1.88827805e+002,
1.67750443e+002, 2.30439865e+002, 1.66769333e+002,
2.73830933e+002, 1.65871170e+002, 3.17596741e+002,
1.65410919e+002, 3.61983459e+002, 1.65327866e+002,
4.06748322e+002, 1.65463974e+002, 4.50450226e+002,
1.66126526e+002, 4.93614655e+002, 1.66970413e+002,
1.48312607e+002, 2.11499451e+002, 1.88574097e+002,
2.10860214e+002, 2.30130676e+002, 2.10261612e+002,
2.73557709e+002, 2.09837143e+002, 3.17542572e+002,
2.09633057e+002, 3.62091248e+002, 2.09732620e+002,
4.06934570e+002, 2.09926758e+002, 4.50914612e+002,
2.10320221e+002, 4.94044495e+002, 2.10900925e+002,
1.48613831e+002, 2.53997177e+002, 1.88797791e+002,
2.53912842e+002, 2.30240204e+002, 2.53975067e+002,
2.73746704e+002, 2.54010208e+002, 3.17718262e+002,
2.54106003e+002, 3.62188965e+002, 2.54205475e+002,
4.06908783e+002, 2.54317505e+002, 4.50824951e+002,
2.54539490e+002, 4.93825714e+002, 2.54753876e+002,
1.49541687e+002, 2.96404175e+002, 1.89357727e+002,
2.97117523e+002, 2.30807007e+002, 2.97805603e+002,
2.74325470e+002, 2.97966522e+002, 3.18042206e+002,
2.98304535e+002, 3.62105774e+002, 2.98552643e+002,
4.06672272e+002, 2.98572418e+002, 4.50363068e+002,
2.98569550e+002, 4.93109894e+002, 2.98516205e+002,
1.50883698e+002, 3.38493195e+002, 1.90633621e+002,
3.39862610e+002, 2.31920990e+002, 3.40869415e+002,
2.74971252e+002, 3.41453766e+002, 3.18235229e+002,
3.41952637e+002, 3.62063477e+002, 3.42314026e+002,
4.06098938e+002, 3.42221802e+002, 4.49477386e+002,
3.42063812e+002, 4.91864716e+002, 3.41727600e+002,
2.36129852e+002, 3.92798004e+002, 2.34999939e+002,
3.56118683e+002, 2.34376099e+002, 3.18607025e+002,
2.33822159e+002, 2.80400696e+002, 2.33565445e+002,
2.42213104e+002, 2.33583069e+002, 2.03937286e+002,
2.34028824e+002, 1.65756607e+002, 2.34613373e+002,
1.28586639e+002, 2.35190308e+002, 9.18279037e+001,
2.73031616e+002, 3.93267242e+002, 2.72295166e+002,
3.56342743e+002, 2.71799347e+002, 3.18847412e+002,
2.71418854e+002, 2.80287872e+002, 2.71161469e+002,
2.41881134e+002, 2.71248962e+002, 2.03348145e+002,
2.71379303e+002, 1.64895874e+002, 2.71946045e+002,
1.27450935e+002, 2.72322418e+002, 9.06900787e+001,
3.10670715e+002, 3.93568848e+002, 3.10389160e+002,
3.56545959e+002, 3.10084625e+002, 3.18814514e+002,
3.09801544e+002, 2.80242737e+002, 3.09678711e+002,
2.41574814e+002, 3.09779663e+002, 2.02989838e+002,
3.09842712e+002, 1.64338043e+002, 3.10076782e+002,
1.26870911e+002, 3.10243286e+002, 8.98413315e+001,
3.48618134e+002, 3.93563202e+002, 3.48617065e+002,
3.56472382e+002, 3.48608795e+002, 3.18855621e+002,
3.48544556e+002, 2.80011017e+002, 3.48556396e+002,
2.41388168e+002, 3.48585388e+002, 2.02692429e+002,
3.48435089e+002, 1.64099731e+002, 3.48442902e+002,
1.26549957e+002, 3.48338043e+002, 8.98002014e+001,
3.86625610e+002, 3.93188599e+002, 3.87047729e+002,
3.56377594e+002, 3.87306274e+002, 3.18714752e+002,
3.87337799e+002, 2.79868896e+002, 3.87402740e+002,
2.41228760e+002, 3.87295166e+002, 2.02695313e+002,
3.87030273e+002, 1.64203415e+002, 3.86741211e+002,
1.26606262e+002, 3.86337311e+002, 8.99655075e+001,
4.24534088e+002, 3.92702545e+002, 4.25310822e+002,
3.55900452e+002, 4.25869019e+002, 3.18160614e+002,
4.25909790e+002, 2.79615753e+002, 4.25977295e+002,
2.41165100e+002, 4.25826477e+002, 2.02876389e+002,
4.25331665e+002, 1.64527618e+002, 4.24775787e+002,
1.27097328e+002, 4.23985138e+002, 9.08176651e+001,
1.79142670e+002, 1.58573654e+002, 2.12791580e+002,
1.56291031e+002, 2.47140106e+002, 1.54265656e+002,
2.82607300e+002, 1.52373688e+002, 3.18175507e+002,
1.50692184e+002, 3.54185852e+002, 1.49404175e+002,
3.90455200e+002, 1.48229370e+002, 4.26106689e+002,
1.47507843e+002, 4.61576141e+002, 1.46712479e+002,
1.80388336e+002, 1.93027603e+002, 2.14026459e+002,
1.91128204e+002, 2.48376541e+002, 1.89414978e+002,
2.83795807e+002, 1.87720856e+002, 3.19472473e+002,
1.86192383e+002, 3.55483826e+002, 1.84929199e+002,
3.91970764e+002, 1.83747040e+002, 4.27654572e+002,
1.82931534e+002, 4.63295227e+002, 1.81977234e+002,
1.81914261e+002, 2.27955460e+002, 2.15291260e+002,
2.26512482e+002, 2.49628265e+002, 2.25067520e+002,
2.85066406e+002, 2.23593185e+002, 3.20846680e+002,
2.22337708e+002, 3.56862885e+002, 2.21191040e+002,
3.93279907e+002, 2.19905640e+002, 4.29202271e+002,
2.18870361e+002, 4.64728424e+002, 2.17972977e+002,
1.83496948e+002, 2.62963226e+002, 2.16930527e+002,
2.61755219e+002, 2.51115829e+002, 2.60777222e+002,
2.86553406e+002, 2.59500336e+002, 3.22299896e+002,
2.58380737e+002, 3.58307648e+002, 2.57236694e+002,
3.94551819e+002, 2.56009125e+002, 4.30358948e+002,
2.54925797e+002, 4.65684998e+002, 2.54021484e+002,
1.85461685e+002, 2.97687378e+002, 2.18712234e+002,
2.96999207e+002, 2.52770218e+002, 2.96270752e+002,
2.88213776e+002, 2.95168213e+002, 3.23698334e+002,
2.94233032e+002, 3.59477722e+002, 2.93170715e+002,
3.95647766e+002, 2.91897400e+002, 4.31309845e+002,
2.90856995e+002, 4.66494110e+002, 2.89726410e+002,
1.87661331e+002, 3.32186188e+002, 2.20767746e+002,
3.31906250e+002, 2.54839096e+002, 3.31398651e+002,
2.89963745e+002, 3.30524139e+002, 3.25207642e+002,
3.29771820e+002, 3.60686035e+002, 3.28762695e+002,
3.96576447e+002, 3.27542206e+002, 4.31994415e+002,
3.26294189e+002, 4.66894653e+002, 3.24949921e+002,
2.03543015e+002, 1.77473557e+002, 2.32777847e+002,
1.74712509e+002, 2.62628723e+002, 1.72331970e+002,
2.93045898e+002, 1.69686768e+002, 3.23527618e+002,
1.67496246e+002, 3.54206787e+002, 1.65446075e+002,
3.85180176e+002, 1.63360580e+002, 4.15484253e+002,
1.61536423e+002, 4.45720947e+002, 1.59896164e+002,
2.05864395e+002, 2.07228104e+002, 2.35242096e+002,
2.04699326e+002, 2.64853973e+002, 2.02407455e+002,
2.95353882e+002, 1.99972321e+002, 3.25811890e+002,
1.97671921e+002, 3.56471252e+002, 1.95763168e+002,
3.87280548e+002, 1.93597977e+002, 4.17615814e+002,
1.91867371e+002, 4.48018677e+002, 1.90067413e+002,
2.08421249e+002, 2.37166977e+002, 2.37513824e+002,
2.34982773e+002, 2.67261261e+002, 2.32802841e+002,
2.97555817e+002, 2.30466080e+002, 3.28118103e+002,
2.28462463e+002, 3.58699707e+002, 2.26417038e+002,
3.89468842e+002, 2.24356827e+002, 4.19895996e+002,
2.22421921e+002, 4.50077850e+002, 2.20683517e+002,
2.11095444e+002, 2.66940186e+002, 2.40241348e+002,
2.64970093e+002, 2.69563019e+002, 2.63153290e+002,
2.99863464e+002, 2.60983551e+002, 3.30282440e+002,
2.58911560e+002, 3.60724792e+002, 2.56935730e+002,
3.91487915e+002, 2.54799423e+002, 4.21789093e+002,
2.52929688e+002, 4.51818481e+002, 2.51059357e+002,
2.13829117e+002, 2.96591217e+002, 2.42742859e+002,
2.94884583e+002, 2.72209076e+002, 2.93215668e+002,
3.02402985e+002, 2.91230591e+002, 3.32536072e+002,
2.89165192e+002, 3.62860901e+002, 2.87413605e+002,
3.93481842e+002, 2.85199615e+002, 4.23728851e+002,
2.83277496e+002, 4.53453094e+002, 2.81229309e+002,
2.16799316e+002, 3.25975220e+002, 2.45605515e+002,
3.24619904e+002, 2.74777344e+002, 3.22958679e+002,
3.04762817e+002, 3.21008057e+002, 3.34797150e+002,
3.19291443e+002, 3.65005798e+002, 3.17295044e+002,
3.95311981e+002, 3.15296021e+002, 4.25312592e+002,
3.13086945e+002, 4.54931152e+002, 3.11027130e+002,
2.60550232e+002, 3.70739563e+002, 2.59674011e+002,
3.42115936e+002, 2.58910492e+002, 3.13278015e+002,
2.58195618e+002, 2.84013580e+002, 2.57727173e+002,
2.55017166e+002, 2.57326263e+002, 2.25760986e+002,
2.57096619e+002, 1.96577972e+002, 2.57031860e+002,
1.68026199e+002, 2.56873383e+002, 1.39550308e+002,
2.89019318e+002, 3.70481354e+002, 2.88355560e+002,
3.41833252e+002, 2.87601471e+002, 3.12872925e+002,
2.87057190e+002, 2.83485535e+002, 2.86599762e+002,
2.54255096e+002, 2.86174438e+002, 2.25023285e+002,
2.85775940e+002, 1.95715347e+002, 2.85577087e+002,
1.66989502e+002, 2.85395477e+002, 1.38597382e+002,
3.18072754e+002, 3.70118317e+002, 3.17432709e+002,
3.41398743e+002, 3.16917267e+002, 3.12476044e+002,
3.16284363e+002, 2.83001587e+002, 3.15799072e+002,
2.53725845e+002, 3.15411957e+002, 2.24337708e+002,
3.15070374e+002, 1.95034119e+002, 3.14736847e+002,
1.66195313e+002, 3.14439789e+002, 1.37797058e+002,
3.47083588e+002, 3.69678101e+002, 3.46717987e+002,
3.40949524e+002, 3.46185303e+002, 3.12009857e+002,
3.45728088e+002, 2.82454071e+002, 3.45226624e+002,
2.53109863e+002, 3.44883606e+002, 2.23839539e+002,
3.44373535e+002, 1.94399933e+002, 3.43879852e+002,
1.65690643e+002, 3.43438629e+002, 1.37252930e+002,
3.76341522e+002, 3.68972321e+002, 3.76086884e+002,
3.40412842e+002, 3.75708893e+002, 3.11398376e+002,
3.75143494e+002, 2.81901520e+002, 3.74762970e+002,
2.52577988e+002, 3.74223969e+002, 2.23348221e+002,
3.73600891e+002, 1.93979538e+002, 3.72983917e+002,
1.65201294e+002, 3.72517273e+002, 1.36871033e+002,
4.05512115e+002, 3.68243225e+002, 4.05366333e+002,
3.39678650e+002, 4.05090027e+002, 3.10679108e+002,
4.04612366e+002, 2.81203522e+002, 4.04152649e+002,
2.52051605e+002, 4.03539703e+002, 2.22930420e+002,
4.02903351e+002, 1.93625381e+002, 4.02272827e+002,
1.65004440e+002, 4.01353333e+002, 1.36796814e+002 ]

View File

@@ -31,7 +31,7 @@ int main( int argc, char** argv )
//-- And create the image in which we will save our disparities
Mat imgDisparity16S = Mat( imgLeft.rows, imgLeft.cols, CV_16S );
Mat imgDisparity8U = Mat( imgLeft.rows, imgLeft.cols, CV_8UC1 );
if( !imgLeft.data || !imgRight.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
@@ -40,7 +40,7 @@ int main( int argc, char** argv )
int SADWindowSize = 21; /**< Size of the block window. Must be odd */
StereoBM sbm( StereoBM::BASIC_PRESET,
ndisparities,
ndisparities,
SADWindowSize );
//-- 3. Calculate the disparity image

View File

@@ -26,14 +26,14 @@ int main( int argc, char **argv ){
char atom_window[] = "Drawing 1: Atom";
char rook_window[] = "Drawing 2: Rook";
/// Create black empty images
/// Create black empty images
Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
/// 1. Draw a simple atom:
/// -----------------------
/// 1.a. Creating ellipses
/// 1.a. Creating ellipses
MyEllipse( atom_image, 90 );
MyEllipse( atom_image, 0 );
MyEllipse( atom_image, 45 );
@@ -50,13 +50,13 @@ int main( int argc, char **argv ){
/// 2.b. Creating rectangles
rectangle( rook_image,
Point( 0, 7*w/8.0 ),
Point( w, w),
Scalar( 0, 255, 255 ),
-1,
8 );
Point( 0, 7*w/8.0 ),
Point( w, w),
Scalar( 0, 255, 255 ),
-1,
8 );
/// 2.c. Create a few lines
/// 2.c. Create a few lines
MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
MyLine( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
MyLine( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
@@ -84,14 +84,14 @@ void MyEllipse( Mat img, double angle )
int lineType = 8;
ellipse( img,
Point( w/2.0, w/2.0 ),
Size( w/4.0, w/16.0 ),
angle,
0,
360,
Scalar( 255, 0, 0 ),
thickness,
lineType );
Point( w/2.0, w/2.0 ),
Size( w/4.0, w/16.0 ),
angle,
0,
360,
Scalar( 255, 0, 0 ),
thickness,
lineType );
}
/**
@@ -103,12 +103,12 @@ void MyFilledCircle( Mat img, Point center )
int thickness = -1;
int lineType = 8;
circle( img,
center,
w/32.0,
Scalar( 0, 0, 255 ),
thickness,
lineType );
circle( img,
center,
w/32.0,
Scalar( 0, 0, 255 ),
thickness,
lineType );
}
/**
@@ -146,11 +146,11 @@ void MyPolygon( Mat img )
int npt[] = { 20 };
fillPoly( img,
ppt,
npt,
ppt,
npt,
1,
Scalar( 255, 255, 255 ),
lineType );
Scalar( 255, 255, 255 ),
lineType );
}
/**
@@ -161,12 +161,12 @@ void MyLine( Mat img, Point start, Point end )
{
int thickness = 2;
int lineType = 8;
line( img,
start,
end,
Scalar( 0, 0, 0 ),
thickness,
lineType );
line( img,
start,
end,
Scalar( 0, 0, 0 ),
thickness,
lineType );
}

View File

@@ -11,7 +11,7 @@
using namespace cv;
/// Global Variables
const int NUMBER = 100;
const int NUMBER = 100;
const int DELAY = 5;
const int window_width = 900;
@@ -44,14 +44,14 @@ int main( int argc, char** argv )
char window_name[] = "Drawing_2 Tutorial";
/// Also create a random object (RNG)
RNG rng( 0xFFFFFFFF );
RNG rng( 0xFFFFFFFF );
/// Initialize a matrix filled with zeros
Mat image = Mat::zeros( window_height, window_width, CV_8UC3 );
/// Show it in a window during DELAY ms
imshow( window_name, image );
imshow( window_name, image );
waitKey( DELAY );
/// Now, let's draw some lines
c = Drawing_Random_Lines(image, window_name, rng);
if( c != 0 ) return 0;
@@ -145,7 +145,7 @@ int Drawing_Random_Rectangles( Mat image, char* window_name, RNG rng )
imshow( window_name, image );
if( waitKey( DELAY ) >= 0 )
{ return -1; }
{ return -1; }
}
return 0;
@@ -163,7 +163,7 @@ int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng )
Point center;
center.x = rng.uniform(x_1, x_2);
center.y = rng.uniform(y_1, y_2);
Size axes;
axes.width = rng.uniform(0, 200);
axes.height = rng.uniform(0, 200);
@@ -172,10 +172,10 @@ int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng )
ellipse( image, center, axes, angle, angle - 100, angle + 200,
randomColor(rng), rng.uniform(-1,9), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
@@ -194,28 +194,28 @@ int Drawing_Random_Polylines( Mat image, char* window_name, RNG rng )
Point pt[2][3];
pt[0][0].x = rng.uniform(x_1, x_2);
pt[0][0].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][2].x = rng.uniform(x_1, x_2);
pt[0][2].y = rng.uniform(y_1, y_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].y = rng.uniform(y_1, y_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].y = rng.uniform(y_1, y_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].y = rng.uniform(y_1, y_2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
polylines(image, ppt, npt, 2, true, randomColor(rng), rng.uniform(1,10), lineType);
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}
}
/**
* @function Drawing_Random_Filled_Polygons
@@ -229,22 +229,22 @@ int Drawing_Random_Filled_Polygons( Mat image, char* window_name, RNG rng )
Point pt[2][3];
pt[0][0].x = rng.uniform(x_1, x_2);
pt[0][0].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][2].x = rng.uniform(x_1, x_2);
pt[0][2].y = rng.uniform(y_1, y_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].y = rng.uniform(y_1, y_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].y = rng.uniform(y_1, y_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].y = rng.uniform(y_1, y_2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
fillPoly( image, ppt, npt, 2, randomColor(rng), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@@ -264,10 +264,10 @@ int Drawing_Random_Circles( Mat image, char* window_name, RNG rng )
Point center;
center.x = rng.uniform(x_1, x_2);
center.y = rng.uniform(y_1, y_2);
circle( image, center, rng.uniform(0, 300), randomColor(rng),
rng.uniform(-1, 9), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@@ -291,7 +291,7 @@ int Displaying_Random_Text( Mat image, char* window_name, RNG rng )
putText( image, "Testing text rendering", org, rng.uniform(0,8),
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@@ -308,7 +308,7 @@ int Displaying_Big_End( Mat image, char* window_name, RNG rng )
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
int lineType = 8;
Mat image2;
for( int i = 0; i < 255; i += 2 )
@@ -316,7 +316,7 @@ int Displaying_Big_End( Mat image, char* window_name, RNG rng )
image2 = image - Scalar::all(i);
putText( image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType );
imshow( window_name, image2 );
if( waitKey(DELAY) >= 0 )
{ return -1; }

View File

@@ -1,78 +1,78 @@
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void help(char* progName)
{
cout << endl
<< "This program demonstrated the use of the discrete Fourier transform (DFT). " << endl
<< "The dft of an image is taken and it's power spectrum is displayed." << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] " << endl << endl;
}
int main(int argc, char ** argv)
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
if( I.empty())
return -1;
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize( I.rows );
int n = getOptimalDFTSize( I.cols ); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
dft(complexI, complexI); // this way the result may fit in the source matrix
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
// crop the spectrum, if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
imshow("Input Image" , I ); // Show the result
imshow("spectrum magnitude", magI);
waitKey();
return 0;
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void help(char* progName)
{
cout << endl
<< "This program demonstrated the use of the discrete Fourier transform (DFT). " << endl
<< "The dft of an image is taken and it's power spectrum is displayed." << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] " << endl << endl;
}
int main(int argc, char ** argv)
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
if( I.empty())
return -1;
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize( I.rows );
int n = getOptimalDFTSize( I.cols ); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
dft(complexI, complexI); // this way the result may fit in the source matrix
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
// crop the spectrum, if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
imshow("Input Image" , I ); // Show the result
imshow("spectrum magnitude", magI);
waitKey();
return 0;
}

View File

@@ -7,7 +7,7 @@ using namespace std;
void help(char** av)
{
cout << endl
cout << endl
<< av[0] << " shows the usage of the OpenCV serialization functionality." << endl
<< "usage: " << endl
<< av[0] << " outputfile.yml.gz" << endl
@@ -54,8 +54,8 @@ void read(const FileNode& node, MyData& x, const MyData& default_value = MyData(
}
// This function will print our custom class to the console
ostream& operator<<(ostream& out, const MyData& m)
{
ostream& operator<<(ostream& out, const MyData& m)
{
out << "{ id = " << m.id << ", ";
out << "X = " << m.X << ", ";
out << "A = " << m.A << "}";
@@ -82,10 +82,10 @@ int main(int ac, char** av)
fs << "strings" << "["; // text - string sequence
fs << "image1.jpg" << "Awesomeness" << "baboon.jpg";
fs << "]"; // close sequence
fs << "Mapping"; // text - mapping
fs << "{" << "One" << 1;
fs << "Two" << 2 << "}";
fs << "Two" << 2 << "}";
fs << "R" << R; // cv::Mat
fs << "T" << T;
@@ -98,10 +98,10 @@ int main(int ac, char** av)
{//read
cout << endl << "Reading: " << endl;
FileStorage fs;
FileStorage fs;
fs.open(filename, FileStorage::READ);
int itNr;
int itNr;
//fs["iterationNr"] >> itNr;
itNr = (int) fs["iterationNr"];
cout << itNr;
@@ -122,12 +122,12 @@ int main(int ac, char** av)
FileNodeIterator it = n.begin(), it_end = n.end(); // Go through the node
for (; it != it_end; ++it)
cout << (string)*it << endl;
n = fs["Mapping"]; // Read mappings from a sequence
cout << "Two " << (int)(n["Two"]) << "; ";
cout << "One " << (int)(n["One"]) << endl << endl;
cout << "Two " << (int)(n["Two"]) << "; ";
cout << "One " << (int)(n["One"]) << endl << endl;
MyData m;
Mat R, T;
@@ -136,18 +136,18 @@ int main(int ac, char** av)
fs["T"] >> T;
fs["MyData"] >> m; // Read your own structure_
cout << endl
cout << endl
<< "R = " << R << endl;
cout << "T = " << T << endl << endl;
cout << "MyData = " << endl << m << endl << endl;
//Show default behavior for non existing nodes
cout << "Attempt to read NonExisting (should initialize the data structure with its default).";
cout << "Attempt to read NonExisting (should initialize the data structure with its default).";
fs["NonExisting"] >> m;
cout << endl << "NonExisting = " << endl << m << endl;
}
cout << endl
cout << endl
<< "Tip: Open up " << filename << " with a text editor to see the serialized data." << endl;
return 0;

View File

@@ -1,216 +1,216 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
using namespace std;
using namespace cv;
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to scan image objects in OpenCV (cv::Mat). As use case"
<< " we take an input image and divide the native color palette (255) with the " << endl
<< "input. Shows C operator[] method, iterators and at function for on-the-fly item address calculation."<< endl
<< "Usage:" << endl
<< "./howToScanImages imageNameToUse divideWith [G]" << endl
<< "if you add a G parameter the image is processed in gray scale" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* table);
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* table);
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar * table);
int main( int argc, char* argv[])
{
help();
if (argc < 3)
{
cout << "Not enough parameters" << endl;
return -1;
}
Mat I, J;
if( argc == 4 && !strcmp(argv[3],"G") )
I = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (!I.data)
{
cout << "The image" << argv[1] << " could not be loaded." << endl;
return -1;
}
int divideWith; // convert our input string to number - C++ style
stringstream s;
s << argv[2];
s >> divideWith;
if (!s)
{
cout << "Invalid number entered for dividing. " << endl;
return -1;
}
uchar table[256];
for (int i = 0; i < 256; ++i)
table[i] = divideWith* (i/divideWith);
const int times = 100;
double t;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceC(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the C operator [] (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceIterator(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the iterator (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
ScanImageAndReduceRandomAccess(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the on-the-fly address generation - at function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.data;
for( int i = 0; i < 256; ++i)
p[i] = table[i];
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
LUT(I, lookUpTable, J);
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the LUT function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
return 0;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
int channels = I.channels();
int nRows = I.rows;
int nCols = I.cols * channels;
if (I.isContinuous())
{
nCols *= nRows;
nRows = 1;
}
int i,j;
uchar* p;
for( i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for ( j = 0; j < nCols; ++j)
{
p[j] = table[p[j]];
}
}
return I;
}
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
MatIterator_<uchar> it, end;
for( it = I.begin<uchar>(), end = I.end<uchar>(); it != end; ++it)
*it = table[*it];
break;
}
case 3:
{
MatIterator_<Vec3b> it, end;
for( it = I.begin<Vec3b>(), end = I.end<Vec3b>(); it != end; ++it)
{
(*it)[0] = table[(*it)[0]];
(*it)[1] = table[(*it)[1]];
(*it)[2] = table[(*it)[2]];
}
}
}
return I;
}
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
I.at<uchar>(i,j) = table[I.at<uchar>(i,j)];
break;
}
case 3:
{
Mat_<Vec3b> _I = I;
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
{
_I(i,j)[0] = table[_I(i,j)[0]];
_I(i,j)[1] = table[_I(i,j)[1]];
_I(i,j)[2] = table[_I(i,j)[2]];
}
I = _I;
break;
}
}
return I;
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
using namespace std;
using namespace cv;
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to scan image objects in OpenCV (cv::Mat). As use case"
<< " we take an input image and divide the native color palette (255) with the " << endl
<< "input. Shows C operator[] method, iterators and at function for on-the-fly item address calculation."<< endl
<< "Usage:" << endl
<< "./howToScanImages imageNameToUse divideWith [G]" << endl
<< "if you add a G parameter the image is processed in gray scale" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* table);
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* table);
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar * table);
int main( int argc, char* argv[])
{
help();
if (argc < 3)
{
cout << "Not enough parameters" << endl;
return -1;
}
Mat I, J;
if( argc == 4 && !strcmp(argv[3],"G") )
I = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (!I.data)
{
cout << "The image" << argv[1] << " could not be loaded." << endl;
return -1;
}
int divideWith; // convert our input string to number - C++ style
stringstream s;
s << argv[2];
s >> divideWith;
if (!s)
{
cout << "Invalid number entered for dividing. " << endl;
return -1;
}
uchar table[256];
for (int i = 0; i < 256; ++i)
table[i] = divideWith* (i/divideWith);
const int times = 100;
double t;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceC(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the C operator [] (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceIterator(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the iterator (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
ScanImageAndReduceRandomAccess(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the on-the-fly address generation - at function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.data;
for( int i = 0; i < 256; ++i)
p[i] = table[i];
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
LUT(I, lookUpTable, J);
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the LUT function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
return 0;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
int channels = I.channels();
int nRows = I.rows;
int nCols = I.cols * channels;
if (I.isContinuous())
{
nCols *= nRows;
nRows = 1;
}
int i,j;
uchar* p;
for( i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for ( j = 0; j < nCols; ++j)
{
p[j] = table[p[j]];
}
}
return I;
}
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
MatIterator_<uchar> it, end;
for( it = I.begin<uchar>(), end = I.end<uchar>(); it != end; ++it)
*it = table[*it];
break;
}
case 3:
{
MatIterator_<Vec3b> it, end;
for( it = I.begin<Vec3b>(), end = I.end<Vec3b>(); it != end; ++it)
{
(*it)[0] = table[(*it)[0]];
(*it)[1] = table[(*it)[1]];
(*it)[2] = table[(*it)[2]];
}
}
}
return I;
}
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
I.at<uchar>(i,j) = table[I.at<uchar>(i,j)];
break;
}
case 3:
{
Mat_<Vec3b> _I = I;
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
{
_I(i,j)[0] = table[_I(i,j)[0]];
_I(i,j)[1] = table[_I(i,j)[1]];
_I(i,j)[2] = table[_I(i,j)[2]];
}
I = _I;
break;
}
}
return I;
}

View File

@@ -1,134 +1,134 @@
#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv; // The new C++ interface API is inside this namespace. Import it.
using namespace std;
void help( char* progName)
{
cout << endl << progName
<< " shows how to use cv::Mat and IplImages together (converting back and forth)." << endl
<< "Also contains example for image read, spliting the planes, merging back and " << endl
<< " color conversion, plus iterating through pixels. " << endl
<< "Usage:" << endl
<< progName << " [image-name Default: lena.jpg]" << endl << endl;
}
// comment out the define to use only the latest C++ API
#define DEMO_MIXED_API_USE
int main( int argc, char** argv )
{
help(argv[0]);
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#ifdef DEMO_MIXED_API_USE
Ptr<IplImage> IplI = cvLoadImage(imagename); // Ptr<T> is safe ref-counting pointer class
if(IplI.empty())
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
Mat I(IplI); // Convert to the new style container. Only header created. Image not copied.
#else
Mat I = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
if( I.empty() ) // same as if( !I.data )
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
#endif
// convert image to YUV color space. The output image will be created automatically.
Mat I_YUV;
cvtColor(I, I_YUV, CV_BGR2YCrCb);
vector<Mat> planes; // Use the STL's vector structure to store multiple Mat objects
split(I_YUV, planes); // split the image into separate color planes (Y U V)
#if 1 // change it to 0 if you want to see a blurred and noisy version of this processing
// Mat scanning
// Method 1. process Y plane using an iterator
MatIterator_<uchar> it = planes[0].begin<uchar>(), it_end = planes[0].end<uchar>();
for(; it != it_end; ++it)
{
double v = *it * 1.7 + rand()%21 - 10;
*it = saturate_cast<uchar>(v*v/255);
}
for( int y = 0; y < I_YUV.rows; y++ )
{
// Method 2. process the first chroma plane using pre-stored row pointer.
uchar* Uptr = planes[1].ptr<uchar>(y);
for( int x = 0; x < I_YUV.cols; x++ )
{
Uptr[x] = saturate_cast<uchar>((Uptr[x]-128)/2 + 128);
// Method 3. process the second chroma plane using individual element access
uchar& Vxy = planes[2].at<uchar>(y, x);
Vxy = saturate_cast<uchar>((Vxy-128)/2 + 128);
}
}
#else
Mat noisyI(I.size(), CV_8U); // Create a matrix of the specified size and type
// Fills the matrix with normally distributed random values (around number with deviation off).
// There is also randu() for uniformly distributed random number generation
randn(noisyI, Scalar::all(128), Scalar::all(20));
// blur the noisyI a bit, kernel size is 3x3 and both sigma's are set to 0.5
GaussianBlur(noisyI, noisyI, Size(3, 3), 0.5, 0.5);
const double brightness_gain = 0;
const double contrast_gain = 1.7;
#ifdef DEMO_MIXED_API_USE
// To pass the new matrices to the functions that only work with IplImage or CvMat do:
// step 1) Convert the headers (tip: data will not be copied).
// step 2) call the function (tip: to pass a pointer do not forget unary "&" to form pointers)
IplImage cv_planes_0 = planes[0], cv_noise = noisyI;
cvAddWeighted(&cv_planes_0, contrast_gain, &cv_noise, 1, -128 + brightness_gain, &cv_planes_0);
#else
addWeighted(planes[0], contrast_gain, noisyI, 1, -128 + brightness_gain, planes[0]);
#endif
const double color_scale = 0.5;
// Mat::convertTo() replaces cvConvertScale.
// One must explicitly specify the output matrix type (we keep it intact - planes[1].type())
planes[1].convertTo(planes[1], planes[1].type(), color_scale, 128*(1-color_scale));
// alternative form of cv::convertScale if we know the datatype at compile time ("uchar" here).
// This expression will not create any temporary arrays ( so should be almost as fast as above)
planes[2] = Mat_<uchar>(planes[2]*color_scale + 128*(1-color_scale));
// Mat::mul replaces cvMul(). Again, no temporary arrays are created in case of simple expressions.
planes[0] = planes[0].mul(planes[0], 1./255);
#endif
merge(planes, I_YUV); // now merge the results back
cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images
#ifdef DEMO_MIXED_API_USE
// this is to demonstrate that I and IplI really share the data - the result of the above
// processing is stored in I and thus in IplI too.
cvShowImage("image with grain", IplI);
#else
imshow("image with grain", I); // the new MATLAB style function show
#endif
waitKey();
// Tip: No memory freeing is required!
// All the memory will be automatically released by the Vector<>, Mat and Ptr<> destructor.
return 0;
}
#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv; // The new C++ interface API is inside this namespace. Import it.
using namespace std;
void help( char* progName)
{
cout << endl << progName
<< " shows how to use cv::Mat and IplImages together (converting back and forth)." << endl
<< "Also contains example for image read, spliting the planes, merging back and " << endl
<< " color conversion, plus iterating through pixels. " << endl
<< "Usage:" << endl
<< progName << " [image-name Default: lena.jpg]" << endl << endl;
}
// comment out the define to use only the latest C++ API
#define DEMO_MIXED_API_USE
int main( int argc, char** argv )
{
help(argv[0]);
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#ifdef DEMO_MIXED_API_USE
Ptr<IplImage> IplI = cvLoadImage(imagename); // Ptr<T> is safe ref-counting pointer class
if(IplI.empty())
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
Mat I(IplI); // Convert to the new style container. Only header created. Image not copied.
#else
Mat I = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
if( I.empty() ) // same as if( !I.data )
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
#endif
// convert image to YUV color space. The output image will be created automatically.
Mat I_YUV;
cvtColor(I, I_YUV, CV_BGR2YCrCb);
vector<Mat> planes; // Use the STL's vector structure to store multiple Mat objects
split(I_YUV, planes); // split the image into separate color planes (Y U V)
#if 1 // change it to 0 if you want to see a blurred and noisy version of this processing
// Mat scanning
// Method 1. process Y plane using an iterator
MatIterator_<uchar> it = planes[0].begin<uchar>(), it_end = planes[0].end<uchar>();
for(; it != it_end; ++it)
{
double v = *it * 1.7 + rand()%21 - 10;
*it = saturate_cast<uchar>(v*v/255);
}
for( int y = 0; y < I_YUV.rows; y++ )
{
// Method 2. process the first chroma plane using pre-stored row pointer.
uchar* Uptr = planes[1].ptr<uchar>(y);
for( int x = 0; x < I_YUV.cols; x++ )
{
Uptr[x] = saturate_cast<uchar>((Uptr[x]-128)/2 + 128);
// Method 3. process the second chroma plane using individual element access
uchar& Vxy = planes[2].at<uchar>(y, x);
Vxy = saturate_cast<uchar>((Vxy-128)/2 + 128);
}
}
#else
Mat noisyI(I.size(), CV_8U); // Create a matrix of the specified size and type
// Fills the matrix with normally distributed random values (around number with deviation off).
// There is also randu() for uniformly distributed random number generation
randn(noisyI, Scalar::all(128), Scalar::all(20));
// blur the noisyI a bit, kernel size is 3x3 and both sigma's are set to 0.5
GaussianBlur(noisyI, noisyI, Size(3, 3), 0.5, 0.5);
const double brightness_gain = 0;
const double contrast_gain = 1.7;
#ifdef DEMO_MIXED_API_USE
// To pass the new matrices to the functions that only work with IplImage or CvMat do:
// step 1) Convert the headers (tip: data will not be copied).
// step 2) call the function (tip: to pass a pointer do not forget unary "&" to form pointers)
IplImage cv_planes_0 = planes[0], cv_noise = noisyI;
cvAddWeighted(&cv_planes_0, contrast_gain, &cv_noise, 1, -128 + brightness_gain, &cv_planes_0);
#else
addWeighted(planes[0], contrast_gain, noisyI, 1, -128 + brightness_gain, planes[0]);
#endif
const double color_scale = 0.5;
// Mat::convertTo() replaces cvConvertScale.
// One must explicitly specify the output matrix type (we keep it intact - planes[1].type())
planes[1].convertTo(planes[1], planes[1].type(), color_scale, 128*(1-color_scale));
// alternative form of cv::convertScale if we know the datatype at compile time ("uchar" here).
// This expression will not create any temporary arrays ( so should be almost as fast as above)
planes[2] = Mat_<uchar>(planes[2]*color_scale + 128*(1-color_scale));
// Mat::mul replaces cvMul(). Again, no temporary arrays are created in case of simple expressions.
planes[0] = planes[0].mul(planes[0], 1./255);
#endif
merge(planes, I_YUV); // now merge the results back
cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images
#ifdef DEMO_MIXED_API_USE
// this is to demonstrate that I and IplI really share the data - the result of the above
// processing is stored in I and thus in IplI too.
cvShowImage("image with grain", IplI);
#else
imshow("image with grain", I); // the new MATLAB style function show
#endif
waitKey();
// Tip: No memory freeing is required!
// All the memory will be automatically released by the Vector<>, Mat and Ptr<> destructor.
return 0;
}

View File

@@ -1,86 +1,86 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
void help(char* progName)
{
cout << endl
<< "This program shows how to filter images with mask: the write it yourself and the"
<< "filter2d way. " << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] [G -- grayscale] " << endl << endl;
}
void Sharpen(const Mat& myImage,Mat& Result);
int main( int argc, char* argv[])
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I, J, K;
if (argc >= 3 && !strcmp("G", argv[2]))
I = imread( filename, CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread( filename, CV_LOAD_IMAGE_COLOR);
namedWindow("Input", CV_WINDOW_AUTOSIZE);
namedWindow("Output", CV_WINDOW_AUTOSIZE);
imshow("Input", I);
double t = (double)getTickCount();
Sharpen(I, J);
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J);
cvWaitKey(0);
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
t = (double)getTickCount();
filter2D(I, K, I.depth(), kern );
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Built-in filter2D time passed in seconds: " << t << endl;
imshow("Output", K);
cvWaitKey(0);
return 0;
}
void Sharpen(const Mat& myImage,Mat& Result)
{
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
const int nChannels = myImage.channels();
Result.create(myImage.size(),myImage.type());
for(int j = 1 ; j < myImage.rows-1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
const uchar* next = myImage.ptr<uchar>(j + 1);
uchar* output = Result.ptr<uchar>(j);
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
{
*output++ = saturate_cast<uchar>(5*current[i]
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
}
}
Result.row(0).setTo(Scalar(0));
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
void help(char* progName)
{
cout << endl
<< "This program shows how to filter images with mask: the write it yourself and the"
<< "filter2d way. " << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] [G -- grayscale] " << endl << endl;
}
void Sharpen(const Mat& myImage,Mat& Result);
int main( int argc, char* argv[])
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I, J, K;
if (argc >= 3 && !strcmp("G", argv[2]))
I = imread( filename, CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread( filename, CV_LOAD_IMAGE_COLOR);
namedWindow("Input", CV_WINDOW_AUTOSIZE);
namedWindow("Output", CV_WINDOW_AUTOSIZE);
imshow("Input", I);
double t = (double)getTickCount();
Sharpen(I, J);
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J);
cvWaitKey(0);
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
t = (double)getTickCount();
filter2D(I, K, I.depth(), kern );
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Built-in filter2D time passed in seconds: " << t << endl;
imshow("Output", K);
cvWaitKey(0);
return 0;
}
void Sharpen(const Mat& myImage,Mat& Result)
{
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
const int nChannels = myImage.channels();
Result.create(myImage.size(),myImage.type());
for(int j = 1 ; j < myImage.rows-1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
const uchar* next = myImage.ptr<uchar>(j + 1);
uchar* output = Result.ptr<uchar>(j);
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
{
*output++ = saturate_cast<uchar>(5*current[i]
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
}
}
Result.row(0).setTo(Scalar(0));
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
}

View File

@@ -16,38 +16,38 @@ void help()
<< "Shows how output can be formated to OpenCV, python, numpy, csv and C styles." << endl
<< "Usage:" << endl
<< "./cvout_sample" << endl
<< "--------------------------------------------------------------------------" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int,char**)
{
help();
help();
// create by using the constructor
Mat M(2,2, CV_8UC3, Scalar(0,0,255));
cout << "M = " << endl << " " << M << endl << endl;
Mat M(2,2, CV_8UC3, Scalar(0,0,255));
cout << "M = " << endl << " " << M << endl << endl;
// create by using the create function()
M.create(4,4, CV_8UC(2));
cout << "M = "<< endl << " " << M << endl << endl;
// create multidimensional matrices
int sz[3] = {2,2,2};
int sz[3] = {2,2,2};
Mat L(3,sz, CV_8UC(1), Scalar::all(0));
// Cannot print via operator <<
// Create using MATLAB style eye, ones or zero matrix
Mat E = Mat::eye(4, 4, CV_64F);
Mat E = Mat::eye(4, 4, CV_64F);
cout << "E = " << endl << " " << E << endl << endl;
Mat O = Mat::ones(2, 2, CV_32F);
Mat O = Mat::ones(2, 2, CV_32F);
cout << "O = " << endl << " " << O << endl << endl;
Mat Z = Mat::zeros(3,3, CV_8UC1);
cout << "Z = " << endl << " " << Z << endl << endl;
// create a 3x3 double-precision identity matrix
Mat C = (Mat_<double>(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
Mat C = (Mat_<double>(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cout << "C = " << endl << " " << C << endl << endl;
Mat RowClone = C.row(1).clone();
@@ -73,9 +73,9 @@ int main(int,char**)
vector<float> v;
v.push_back( (float)CV_PI); v.push_back(2); v.push_back(3.01f);
cout << "Vector of floats via Mat = " << Mat(v) << endl << endl;
vector<Point2f> vPoints(20);
for (size_t E = 0; E < vPoints.size(); ++E)
vPoints[E] = Point2f((float)(E * 5), (float)(E % 7));

View File

@@ -26,7 +26,7 @@ int main( int argc, char** argv )
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
@@ -64,7 +64,7 @@ int main( int argc, char** argv )
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
@@ -72,13 +72,13 @@ int main( int argc, char** argv )
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance < 2*min_dist )
{ good_matches.push_back( matches[i]); }
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );

View File

@@ -27,7 +27,7 @@ int main( int argc, char** argv )
Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
@@ -65,22 +65,22 @@ int main( int argc, char** argv )
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
//-- Localize the object from img_1 in img_2
std::vector<Point2f> obj;
std::vector<Point2f> scene;
@@ -88,7 +88,7 @@ int main( int argc, char** argv )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
@@ -101,7 +101,7 @@ int main( int argc, char** argv )
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

View File

@@ -26,7 +26,7 @@ int main( int argc, char** argv )
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
@@ -55,7 +55,7 @@ int main( int argc, char** argv )
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );

View File

@@ -26,7 +26,7 @@ int main( int argc, char** argv )
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
@@ -43,8 +43,8 @@ int main( int argc, char** argv )
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//-- Show detected (drawn) keypoints
imshow("Keypoints 1", img_keypoints_1 );

View File

@@ -1,429 +1,429 @@
#include <iostream> // Console I/O
#include <sstream> // String to number conversion
#include <opencv2/core/core.hpp> // Basic OpenCV structures
#include <opencv2/imgproc/imgproc.hpp>// Image processing methods for the CPU
#include <opencv2/highgui/highgui.hpp>// Read images
#include <opencv2/gpu/gpu.hpp> // GPU structures and methods
using namespace std;
using namespace cv;
double getPSNR(const Mat& I1, const Mat& I2); // CPU versions
Scalar getMSSIM( const Mat& I1, const Mat& I2);
double getPSNR_GPU(const Mat& I1, const Mat& I2); // Basic GPU versions
Scalar getMSSIM_GPU( const Mat& I1, const Mat& I2);
struct BufferPSNR // Optimized GPU versions
{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
gpu::GpuMat gI1, gI2, gs, t1,t2;
gpu::GpuMat buf;
};
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b);
struct BufferMSSIM // Optimized GPU versions
{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
gpu::GpuMat gI1, gI2, gs, t1,t2;
gpu::GpuMat I1_2, I2_2, I1_I2;
vector<gpu::GpuMat> vI1, vI2;
gpu::GpuMat mu1, mu2;
gpu::GpuMat mu1_2, mu2_2, mu1_mu2;
gpu::GpuMat sigma1_2, sigma2_2, sigma12;
gpu::GpuMat t3;
gpu::GpuMat ssim_map;
gpu::GpuMat buf;
};
Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b);
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to port your CPU code to GPU or write that from scratch." << endl
<< "You can see the performance improvement for the similarity check methods (PSNR and SSIM)." << endl
<< "Usage:" << endl
<< "./gpu-basics-similarity referenceImage comparedImage numberOfTimesToRunTest(like 10)." << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[])
{
help();
Mat I1 = imread(argv[1]); // Read the two images
Mat I2 = imread(argv[2]);
if (!I1.data || !I2.data) // Check for success
{
cout << "Couldn't read the image";
return 0;
}
BufferPSNR bufferPSNR;
BufferMSSIM bufferMSSIM;
int TIMES;
stringstream sstr(argv[3]);
sstr >> TIMES;
double time, result;
//------------------------------- PSNR CPU ----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
//------------------------------- PSNR GPU ----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR_GPU(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
//------------------------------- PSNR GPU Optimized--------------------------------------------
time = (double)getTickCount(); // Initial call
result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
cout << "Initial call GPU optimized: " << time <<" milliseconds."
<< " With result of: " << result << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR GPU OPTIMIZED ( / " << TIMES << " runs): " << time
<< " milliseconds." << " With result of: " << result << endl << endl;
//------------------------------- SSIM CPU -----------------------------------------------------
Scalar x;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
//------------------------------- SSIM GPU -----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM_GPU(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
//------------------------------- SSIM GPU Optimized--------------------------------------------
time = (double)getTickCount();
x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
cout << "Time of MSSIM GPU Initial Call " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM GPU OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl << endl;
return 0;
}
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
{
b.gI1.upload(I1);
b.gI2.upload(I2);
b.gI1.convertTo(b.t1, CV_32F);
b.gI2.convertTo(b.t2, CV_32F);
gpu::absdiff(b.t1.reshape(1), b.t2.reshape(1), b.gs);
gpu::multiply(b.gs, b.gs, b.gs);
double sse = gpu::sum(b.gs, b.buf)[0];
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse = sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
double getPSNR_GPU(const Mat& I1, const Mat& I2)
{
gpu::GpuMat gI1, gI2, gs, t1,t2;
gI1.upload(I1);
gI2.upload(I2);
gI1.convertTo(t1, CV_32F);
gI2.convertTo(t2, CV_32F);
gpu::absdiff(t1.reshape(1), t2.reshape(1), gs);
gpu::multiply(gs, gs, gs);
Scalar s = gpu::sum(gs);
double sse = s.val[0] + s.val[1] + s.val[2];
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(gI1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
Scalar getMSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
}
Scalar getMSSIM_GPU( const Mat& i1, const Mat& i2)
{
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
gpu::GpuMat gI1, gI2, gs1, t1,t2;
gI1.upload(i1);
gI2.upload(i2);
gI1.convertTo(t1, CV_MAKE_TYPE(CV_32F, gI1.channels()));
gI2.convertTo(t2, CV_MAKE_TYPE(CV_32F, gI2.channels()));
vector<gpu::GpuMat> vI1, vI2;
gpu::split(t1, vI1);
gpu::split(t2, vI2);
Scalar mssim;
for( int i = 0; i < gI1.channels(); ++i )
{
gpu::GpuMat I2_2, I1_2, I1_I2;
gpu::multiply(vI2[i], vI2[i], I2_2); // I2^2
gpu::multiply(vI1[i], vI1[i], I1_2); // I1^2
gpu::multiply(vI1[i], vI2[i], I1_I2); // I1 * I2
/*************************** END INITS **********************************/
gpu::GpuMat mu1, mu2; // PRELIMINARY COMPUTING
gpu::GaussianBlur(vI1[i], mu1, Size(11, 11), 1.5);
gpu::GaussianBlur(vI2[i], mu2, Size(11, 11), 1.5);
gpu::GpuMat mu1_2, mu2_2, mu1_mu2;
gpu::multiply(mu1, mu1, mu1_2);
gpu::multiply(mu2, mu2, mu2_2);
gpu::multiply(mu1, mu2, mu1_mu2);
gpu::GpuMat sigma1_2, sigma2_2, sigma12;
gpu::GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
gpu::subtract(sigma1_2, mu1_2, sigma1_2); // sigma1_2 -= mu1_2;
gpu::GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
gpu::subtract(sigma2_2, mu2_2, sigma2_2); // sigma2_2 -= mu2_2;
gpu::GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
gpu::subtract(sigma12, mu1_mu2, sigma12); // sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
gpu::GpuMat t1, t2, t3;
mu1_mu2.convertTo(t1, -1, 2, C1); // t1 = 2 * mu1_mu2 + C1;
sigma12.convertTo(t2, -1, 2, C2); // t2 = 2 * sigma12 + C2;
gpu::multiply(t1, t2, t3); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
gpu::addWeighted(mu1_2, 1.0, mu2_2, 1.0, C1, t1); // t1 = mu1_2 + mu2_2 + C1;
gpu::addWeighted(sigma1_2, 1.0, sigma2_2, 1.0, C2, t2); // t2 = sigma1_2 + sigma2_2 + C2;
gpu::multiply(t1, t2, t1); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
gpu::GpuMat ssim_map;
gpu::divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar s = gpu::sum(ssim_map);
mssim.val[i] = s.val[0] / (ssim_map.rows * ssim_map.cols);
}
return mssim;
}
Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
{
int cn = i1.channels();
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
b.gI1.upload(i1);
b.gI2.upload(i2);
gpu::Stream stream;
stream.enqueueConvert(b.gI1, b.t1, CV_32F);
stream.enqueueConvert(b.gI2, b.t2, CV_32F);
gpu::split(b.t1, b.vI1, stream);
gpu::split(b.t2, b.vI2, stream);
Scalar mssim;
gpu::GpuMat buf;
for( int i = 0; i < b.gI1.channels(); ++i )
{
gpu::multiply(b.vI2[i], b.vI2[i], b.I2_2, stream); // I2^2
gpu::multiply(b.vI1[i], b.vI1[i], b.I1_2, stream); // I1^2
gpu::multiply(b.vI1[i], b.vI2[i], b.I1_I2, stream); // I1 * I2
gpu::GaussianBlur(b.vI1[i], b.mu1, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::GaussianBlur(b.vI2[i], b.mu2, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::multiply(b.mu1, b.mu1, b.mu1_2, stream);
gpu::multiply(b.mu2, b.mu2, b.mu2_2, stream);
gpu::multiply(b.mu1, b.mu2, b.mu1_mu2, stream);
gpu::GaussianBlur(b.I1_2, b.sigma1_2, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::subtract(b.sigma1_2, b.mu1_2, b.sigma1_2, gpu::GpuMat(), -1, stream);
//b.sigma1_2 -= b.mu1_2; - This would result in an extra data transfer operation
gpu::GaussianBlur(b.I2_2, b.sigma2_2, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::subtract(b.sigma2_2, b.mu2_2, b.sigma2_2, gpu::GpuMat(), -1, stream);
//b.sigma2_2 -= b.mu2_2;
gpu::GaussianBlur(b.I1_I2, b.sigma12, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::subtract(b.sigma12, b.mu1_mu2, b.sigma12, gpu::GpuMat(), -1, stream);
//b.sigma12 -= b.mu1_mu2;
//here too it would be an extra data transfer due to call of operator*(Scalar, Mat)
gpu::multiply(b.mu1_mu2, 2, b.t1, 1, -1, stream); //b.t1 = 2 * b.mu1_mu2 + C1;
gpu::add(b.t1, C1, b.t1, gpu::GpuMat(), -1, stream);
gpu::multiply(b.sigma12, 2, b.t2, 1, -1, stream); //b.t2 = 2 * b.sigma12 + C2;
gpu::add(b.t2, C2, b.t2, gpu::GpuMat(), -12, stream);
gpu::multiply(b.t1, b.t2, b.t3, 1, -1, stream); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
gpu::add(b.mu1_2, b.mu2_2, b.t1, gpu::GpuMat(), -1, stream);
gpu::add(b.t1, C1, b.t1, gpu::GpuMat(), -1, stream);
gpu::add(b.sigma1_2, b.sigma2_2, b.t2, gpu::GpuMat(), -1, stream);
gpu::add(b.t2, C2, b.t2, gpu::GpuMat(), -1, stream);
gpu::multiply(b.t1, b.t2, b.t1, 1, -1, stream); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
gpu::divide(b.t3, b.t1, b.ssim_map, 1, -1, stream); // ssim_map = t3./t1;
stream.waitForCompletion();
Scalar s = gpu::sum(b.ssim_map, b.buf);
mssim.val[i] = s.val[0] / (b.ssim_map.rows * b.ssim_map.cols);
}
return mssim;
}
#include <iostream> // Console I/O
#include <sstream> // String to number conversion
#include <opencv2/core/core.hpp> // Basic OpenCV structures
#include <opencv2/imgproc/imgproc.hpp>// Image processing methods for the CPU
#include <opencv2/highgui/highgui.hpp>// Read images
#include <opencv2/gpu/gpu.hpp> // GPU structures and methods
using namespace std;
using namespace cv;
double getPSNR(const Mat& I1, const Mat& I2); // CPU versions
Scalar getMSSIM( const Mat& I1, const Mat& I2);
double getPSNR_GPU(const Mat& I1, const Mat& I2); // Basic GPU versions
Scalar getMSSIM_GPU( const Mat& I1, const Mat& I2);
struct BufferPSNR // Optimized GPU versions
{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
gpu::GpuMat gI1, gI2, gs, t1,t2;
gpu::GpuMat buf;
};
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b);
struct BufferMSSIM // Optimized GPU versions
{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
gpu::GpuMat gI1, gI2, gs, t1,t2;
gpu::GpuMat I1_2, I2_2, I1_I2;
vector<gpu::GpuMat> vI1, vI2;
gpu::GpuMat mu1, mu2;
gpu::GpuMat mu1_2, mu2_2, mu1_mu2;
gpu::GpuMat sigma1_2, sigma2_2, sigma12;
gpu::GpuMat t3;
gpu::GpuMat ssim_map;
gpu::GpuMat buf;
};
Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b);
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to port your CPU code to GPU or write that from scratch." << endl
<< "You can see the performance improvement for the similarity check methods (PSNR and SSIM)." << endl
<< "Usage:" << endl
<< "./gpu-basics-similarity referenceImage comparedImage numberOfTimesToRunTest(like 10)." << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[])
{
help();
Mat I1 = imread(argv[1]); // Read the two images
Mat I2 = imread(argv[2]);
if (!I1.data || !I2.data) // Check for success
{
cout << "Couldn't read the image";
return 0;
}
BufferPSNR bufferPSNR;
BufferMSSIM bufferMSSIM;
int TIMES;
stringstream sstr(argv[3]);
sstr >> TIMES;
double time, result;
//------------------------------- PSNR CPU ----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
//------------------------------- PSNR GPU ----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR_GPU(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
//------------------------------- PSNR GPU Optimized--------------------------------------------
time = (double)getTickCount(); // Initial call
result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
cout << "Initial call GPU optimized: " << time <<" milliseconds."
<< " With result of: " << result << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR GPU OPTIMIZED ( / " << TIMES << " runs): " << time
<< " milliseconds." << " With result of: " << result << endl << endl;
//------------------------------- SSIM CPU -----------------------------------------------------
Scalar x;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
//------------------------------- SSIM GPU -----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM_GPU(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
//------------------------------- SSIM GPU Optimized--------------------------------------------
time = (double)getTickCount();
x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
cout << "Time of MSSIM GPU Initial Call " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM GPU OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl << endl;
return 0;
}
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
{
b.gI1.upload(I1);
b.gI2.upload(I2);
b.gI1.convertTo(b.t1, CV_32F);
b.gI2.convertTo(b.t2, CV_32F);
gpu::absdiff(b.t1.reshape(1), b.t2.reshape(1), b.gs);
gpu::multiply(b.gs, b.gs, b.gs);
double sse = gpu::sum(b.gs, b.buf)[0];
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse = sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
double getPSNR_GPU(const Mat& I1, const Mat& I2)
{
gpu::GpuMat gI1, gI2, gs, t1,t2;
gI1.upload(I1);
gI2.upload(I2);
gI1.convertTo(t1, CV_32F);
gI2.convertTo(t2, CV_32F);
gpu::absdiff(t1.reshape(1), t2.reshape(1), gs);
gpu::multiply(gs, gs, gs);
Scalar s = gpu::sum(gs);
double sse = s.val[0] + s.val[1] + s.val[2];
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(gI1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
Scalar getMSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
}
Scalar getMSSIM_GPU( const Mat& i1, const Mat& i2)
{
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
gpu::GpuMat gI1, gI2, gs1, t1,t2;
gI1.upload(i1);
gI2.upload(i2);
gI1.convertTo(t1, CV_MAKE_TYPE(CV_32F, gI1.channels()));
gI2.convertTo(t2, CV_MAKE_TYPE(CV_32F, gI2.channels()));
vector<gpu::GpuMat> vI1, vI2;
gpu::split(t1, vI1);
gpu::split(t2, vI2);
Scalar mssim;
for( int i = 0; i < gI1.channels(); ++i )
{
gpu::GpuMat I2_2, I1_2, I1_I2;
gpu::multiply(vI2[i], vI2[i], I2_2); // I2^2
gpu::multiply(vI1[i], vI1[i], I1_2); // I1^2
gpu::multiply(vI1[i], vI2[i], I1_I2); // I1 * I2
/*************************** END INITS **********************************/
gpu::GpuMat mu1, mu2; // PRELIMINARY COMPUTING
gpu::GaussianBlur(vI1[i], mu1, Size(11, 11), 1.5);
gpu::GaussianBlur(vI2[i], mu2, Size(11, 11), 1.5);
gpu::GpuMat mu1_2, mu2_2, mu1_mu2;
gpu::multiply(mu1, mu1, mu1_2);
gpu::multiply(mu2, mu2, mu2_2);
gpu::multiply(mu1, mu2, mu1_mu2);
gpu::GpuMat sigma1_2, sigma2_2, sigma12;
gpu::GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
gpu::subtract(sigma1_2, mu1_2, sigma1_2); // sigma1_2 -= mu1_2;
gpu::GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
gpu::subtract(sigma2_2, mu2_2, sigma2_2); // sigma2_2 -= mu2_2;
gpu::GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
gpu::subtract(sigma12, mu1_mu2, sigma12); // sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
gpu::GpuMat t1, t2, t3;
mu1_mu2.convertTo(t1, -1, 2, C1); // t1 = 2 * mu1_mu2 + C1;
sigma12.convertTo(t2, -1, 2, C2); // t2 = 2 * sigma12 + C2;
gpu::multiply(t1, t2, t3); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
gpu::addWeighted(mu1_2, 1.0, mu2_2, 1.0, C1, t1); // t1 = mu1_2 + mu2_2 + C1;
gpu::addWeighted(sigma1_2, 1.0, sigma2_2, 1.0, C2, t2); // t2 = sigma1_2 + sigma2_2 + C2;
gpu::multiply(t1, t2, t1); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
gpu::GpuMat ssim_map;
gpu::divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar s = gpu::sum(ssim_map);
mssim.val[i] = s.val[0] / (ssim_map.rows * ssim_map.cols);
}
return mssim;
}
Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
{
int cn = i1.channels();
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
b.gI1.upload(i1);
b.gI2.upload(i2);
gpu::Stream stream;
stream.enqueueConvert(b.gI1, b.t1, CV_32F);
stream.enqueueConvert(b.gI2, b.t2, CV_32F);
gpu::split(b.t1, b.vI1, stream);
gpu::split(b.t2, b.vI2, stream);
Scalar mssim;
gpu::GpuMat buf;
for( int i = 0; i < b.gI1.channels(); ++i )
{
gpu::multiply(b.vI2[i], b.vI2[i], b.I2_2, stream); // I2^2
gpu::multiply(b.vI1[i], b.vI1[i], b.I1_2, stream); // I1^2
gpu::multiply(b.vI1[i], b.vI2[i], b.I1_I2, stream); // I1 * I2
gpu::GaussianBlur(b.vI1[i], b.mu1, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::GaussianBlur(b.vI2[i], b.mu2, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::multiply(b.mu1, b.mu1, b.mu1_2, stream);
gpu::multiply(b.mu2, b.mu2, b.mu2_2, stream);
gpu::multiply(b.mu1, b.mu2, b.mu1_mu2, stream);
gpu::GaussianBlur(b.I1_2, b.sigma1_2, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::subtract(b.sigma1_2, b.mu1_2, b.sigma1_2, gpu::GpuMat(), -1, stream);
//b.sigma1_2 -= b.mu1_2; - This would result in an extra data transfer operation
gpu::GaussianBlur(b.I2_2, b.sigma2_2, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::subtract(b.sigma2_2, b.mu2_2, b.sigma2_2, gpu::GpuMat(), -1, stream);
//b.sigma2_2 -= b.mu2_2;
gpu::GaussianBlur(b.I1_I2, b.sigma12, Size(11, 11), buf, 1.5, 0, BORDER_DEFAULT, -1, stream);
gpu::subtract(b.sigma12, b.mu1_mu2, b.sigma12, gpu::GpuMat(), -1, stream);
//b.sigma12 -= b.mu1_mu2;
//here too it would be an extra data transfer due to call of operator*(Scalar, Mat)
gpu::multiply(b.mu1_mu2, 2, b.t1, 1, -1, stream); //b.t1 = 2 * b.mu1_mu2 + C1;
gpu::add(b.t1, C1, b.t1, gpu::GpuMat(), -1, stream);
gpu::multiply(b.sigma12, 2, b.t2, 1, -1, stream); //b.t2 = 2 * b.sigma12 + C2;
gpu::add(b.t2, C2, b.t2, gpu::GpuMat(), -12, stream);
gpu::multiply(b.t1, b.t2, b.t3, 1, -1, stream); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
gpu::add(b.mu1_2, b.mu2_2, b.t1, gpu::GpuMat(), -1, stream);
gpu::add(b.t1, C1, b.t1, gpu::GpuMat(), -1, stream);
gpu::add(b.sigma1_2, b.sigma2_2, b.t2, gpu::GpuMat(), -1, stream);
gpu::add(b.t2, C2, b.t2, gpu::GpuMat(), -1, stream);
gpu::multiply(b.t1, b.t2, b.t1, 1, -1, stream); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
gpu::divide(b.t3, b.t1, b.ssim_map, 1, -1, stream); // ssim_map = t3./t1;
stream.waitForCompletion();
Scalar s = gpu::sum(b.ssim_map, b.buf);
mssim.val[i] = s.val[0] / (b.ssim_map.rows * b.ssim_map.cols);
}
return mssim;
}

View File

@@ -1,30 +1,30 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
if( argc != 2)
{
cout <<" Usage: display_image ImageToLoadAndDisplay" << endl;
return -1;
}
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_COLOR); // Read the file
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
namedWindow( "Display window", CV_WINDOW_AUTOSIZE );// Create a window for display.
imshow( "Display window", image ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
if( argc != 2)
{
cout <<" Usage: display_image ImageToLoadAndDisplay" << endl;
return -1;
}
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_COLOR); // Read the file
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
namedWindow( "Display window", CV_WINDOW_AUTOSIZE );// Create a window for display.
imshow( "Display window", image ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
}

View File

@@ -1,206 +1,206 @@
// Video Image PSNR and SSIM
#include <iostream> // for standard I/O
#include <string> // for strings
#include <iomanip> // for controlling float print precision
#include <sstream> // string to number conversion
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
using namespace std;
using namespace cv;
double getPSNR ( const Mat& I1, const Mat& I2);
Scalar getMSSIM( const Mat& I1, const Mat& I2);
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to read a video file with OpenCV. In addition, it tests the"
<< " similarity of two input videos first with PSNR, and for the frames below a PSNR " << endl
<< "trigger value, also with MSSIM."<< endl
<< "Usage:" << endl
<< "./video-source referenceVideo useCaseTestVideo PSNR_Trigger_Value Wait_Between_Frames " << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[], char *window_name)
{
help();
if (argc != 5)
{
cout << "Not enough parameters" << endl;
return -1;
}
stringstream conv;
const string sourceReference = argv[1],sourceCompareWith = argv[2];
int psnrTriggerValue, delay;
conv << argv[3] << argv[4]; // put in the strings
conv >> psnrTriggerValue >> delay;// take out the numbers
char c;
int frameNum = -1; // Frame counter
VideoCapture captRefrnc(sourceReference),
captUndTst(sourceCompareWith);
if ( !captRefrnc.isOpened())
{
cout << "Could not open reference " << sourceReference << endl;
return -1;
}
if( !captUndTst.isOpened())
{
cout << "Could not open case test " << sourceCompareWith << endl;
return -1;
}
Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi)
{
cout << "Inputs have different size!!! Closing." << endl;
return -1;
}
const char* WIN_UT = "Under Test";
const char* WIN_RF = "Reference";
// Windows
namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE );
namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE );
cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " <<
setiosflags(ios::fixed) << setprecision(3) << psnrTriggerValue << endl;
Mat frameReference, frameUnderTest;
double psnrV;
Scalar mssimV;
while( true) //Show the image captured in the window and repeat
{
captRefrnc >> frameReference;
captUndTst >> frameUnderTest;
if( frameReference.empty() || frameUnderTest.empty())
{
cout << " < < < Game over! > > > ";
break;
}
++frameNum;
cout <<"Frame:" << frameNum;
///////////////////////////////// PSNR ////////////////////////////////////////////////////
psnrV = getPSNR(frameReference,frameUnderTest); //get PSNR
cout << setiosflags(ios::fixed) << setprecision(3) << psnrV << "dB";
//////////////////////////////////// MSSIM /////////////////////////////////////////////////
if (psnrV < psnrTriggerValue)
{
mssimV = getMSSIM(frameReference,frameUnderTest);
cout << " MSSIM: "
<< "R" << setiosflags(ios::fixed) << setprecision(3) << mssimV.val[2] * 100
<< "G" << setiosflags(ios::fixed) << setprecision(3) << mssimV.val[1] * 100
<< "B" << setiosflags(ios::fixed) << setprecision(3) << mssimV.val[0] * 100;
}
cout << endl;
////////////////////////////////// Show Image /////////////////////////////////////////////
imshow( WIN_RF, frameReference);
imshow( WIN_UT, frameUnderTest);
c = cvWaitKey(delay);
if (c == 27) break;
}
return 0;
}
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
Scalar getMSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
// Video Image PSNR and SSIM
#include <iostream> // for standard I/O
#include <string> // for strings
#include <iomanip> // for controlling float print precision
#include <sstream> // string to number conversion
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
using namespace std;
using namespace cv;
double getPSNR ( const Mat& I1, const Mat& I2);
Scalar getMSSIM( const Mat& I1, const Mat& I2);
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to read a video file with OpenCV. In addition, it tests the"
<< " similarity of two input videos first with PSNR, and for the frames below a PSNR " << endl
<< "trigger value, also with MSSIM."<< endl
<< "Usage:" << endl
<< "./video-source referenceVideo useCaseTestVideo PSNR_Trigger_Value Wait_Between_Frames " << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[], char *window_name)
{
help();
if (argc != 5)
{
cout << "Not enough parameters" << endl;
return -1;
}
stringstream conv;
const string sourceReference = argv[1],sourceCompareWith = argv[2];
int psnrTriggerValue, delay;
conv << argv[3] << argv[4]; // put in the strings
conv >> psnrTriggerValue >> delay;// take out the numbers
char c;
int frameNum = -1; // Frame counter
VideoCapture captRefrnc(sourceReference),
captUndTst(sourceCompareWith);
if ( !captRefrnc.isOpened())
{
cout << "Could not open reference " << sourceReference << endl;
return -1;
}
if( !captUndTst.isOpened())
{
cout << "Could not open case test " << sourceCompareWith << endl;
return -1;
}
Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH),
(int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi)
{
cout << "Inputs have different size!!! Closing." << endl;
return -1;
}
const char* WIN_UT = "Under Test";
const char* WIN_RF = "Reference";
// Windows
namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE );
namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE );
cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " <<
setiosflags(ios::fixed) << setprecision(3) << psnrTriggerValue << endl;
Mat frameReference, frameUnderTest;
double psnrV;
Scalar mssimV;
while( true) //Show the image captured in the window and repeat
{
captRefrnc >> frameReference;
captUndTst >> frameUnderTest;
if( frameReference.empty() || frameUnderTest.empty())
{
cout << " < < < Game over! > > > ";
break;
}
++frameNum;
cout <<"Frame:" << frameNum;
///////////////////////////////// PSNR ////////////////////////////////////////////////////
psnrV = getPSNR(frameReference,frameUnderTest); //get PSNR
cout << setiosflags(ios::fixed) << setprecision(3) << psnrV << "dB";
//////////////////////////////////// MSSIM /////////////////////////////////////////////////
if (psnrV < psnrTriggerValue)
{
mssimV = getMSSIM(frameReference,frameUnderTest);
cout << " MSSIM: "
<< "R" << setiosflags(ios::fixed) << setprecision(3) << mssimV.val[2] * 100
<< "G" << setiosflags(ios::fixed) << setprecision(3) << mssimV.val[1] * 100
<< "B" << setiosflags(ios::fixed) << setprecision(3) << mssimV.val[0] * 100;
}
cout << endl;
////////////////////////////////// Show Image /////////////////////////////////////////////
imshow( WIN_RF, frameReference);
imshow( WIN_UT, frameUnderTest);
c = cvWaitKey(delay);
if (c == 27) break;
}
return 0;
}
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse =sse /(double)(I1.channels() * I1.total());
double psnr = 10.0*log10((255*255)/mse);
return psnr;
}
}
Scalar getMSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
}

View File

@@ -30,7 +30,7 @@ int main()
//--------------------- 1. Set up training data randomly ---------------------------------------
Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32FC1);
Mat labels (2*NTRAINING_SAMPLES, 1, CV_32FC1);
RNG rng(100); // Random value generation class
// Set up the linearly separable part of the training data
@@ -48,7 +48,7 @@ int main()
// Generate random points for the class 2
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0 , 1);
c = trainClass.colRange(0 , 1);
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
@@ -60,11 +60,11 @@ int main()
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0,1);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//------------------------- Set up the labels for the classes ---------------------------------
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
@@ -81,9 +81,9 @@ int main()
CvSVM svm;
svm.train(trainData, labels, Mat(), Mat(), params);
cout << "Finished training process" << endl;
//------------------------ 4. Show the decision regions ----------------------------------------
Vec3b green(0,100,0), blue (100,0,0);
Vec3b green(0,100,0), blue (100,0,0);
for (int i = 0; i < I.rows; ++i)
for (int j = 0; j < I.cols; ++j)
{

View File

@@ -36,7 +36,7 @@ int main( int argc, const char** argv )
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
@@ -44,15 +44,15 @@ int main( int argc, const char** argv )
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
if( (char)c == 'c' ) { break; }
}
}
@@ -85,11 +85,11 @@ void detectAndDisplay( Mat frame )
for( int j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
}
}
}
//-- Show what you got
imshow( window_name, frame );
}

View File

@@ -44,15 +44,15 @@ int main( int argc, const char** argv )
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
if( (char)c == 'c' ) { break; }
}
}
@@ -88,13 +88,13 @@ void detectAndDisplay( Mat frame )
for( int j = 0; j < eyes.size(); j++ )
{ //-- Draw the eyes
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 255 ), 3, 8, 0 );
}
}
}
}
//-- Show what you got
imshow( window_name, frame );
}

View File

@@ -37,7 +37,7 @@ namespace
<< " Resize the screen to be large enough for your camera to see, and it should find an read it.\n\n"
<< endl;
}
int process(VideoCapture& capture)
{
int n = 0;
@@ -76,12 +76,12 @@ namespace
}
return 0;
}
}
int main(int ac, char** av)
{
if (ac != 2)
{
help(av);

View File

@@ -524,7 +524,7 @@ int main(int argc, const char **argv)
{
inpainters->setRadius(argi("radius"));
stabilizer->setInpainter(inpainters_);
}
}
if (arg("output") != "no")
outputPath = arg("output");