Merge remote-tracking branch 'upstream/master'
Conflicts: modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
@@ -4,6 +4,7 @@
|
||||
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
@@ -3,9 +3,10 @@
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
|
||||
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree opencv_softcascade
|
||||
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab opencv_shape)
|
||||
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
|
||||
opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
|
||||
opencv_objdetect opencv_photo opencv_nonfree opencv_features2d opencv_calib3d
|
||||
opencv_stitching opencv_videostab opencv_shape)
|
||||
|
||||
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
|
||||
|
||||
|
11
samples/cpp/H1to3p.xml
Executable file
@@ -0,0 +1,11 @@
|
||||
<?xml version="1.0"?>
|
||||
<opencv_storage>
|
||||
<H13 type_id="opencv-matrix">
|
||||
<rows>3</rows>
|
||||
<cols>3</cols>
|
||||
<dt>d</dt>
|
||||
<data>
|
||||
7.6285898e-01 -2.9922929e-01 2.2567123e+02
|
||||
3.3443473e-01 1.0143901e+00 -7.6999973e+01
|
||||
3.4663091e-04 -1.4364524e-05 1.0000000e+00 </data></H13>
|
||||
</opencv_storage>
|
@@ -1,12 +0,0 @@
|
||||
#http://www.cmake.org/cmake/help/cmake2.6docs.html
|
||||
cmake_minimum_required (VERSION 2.6)
|
||||
project (OpenGL_Qt_Binding)
|
||||
|
||||
|
||||
FIND_PACKAGE( OpenCV REQUIRED )
|
||||
find_package (OpenGL REQUIRED)
|
||||
|
||||
|
||||
ADD_EXECUTABLE(OpenGL_Qt_Binding qt_opengl.cpp)
|
||||
TARGET_LINK_LIBRARIES(OpenGL_Qt_Binding ${OpenCV_LIBS} ${OPENGL_LIBRARIES} )
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cube4.avi ${CMAKE_CURRENT_BINARY_DIR}/cube4.avi COPYONLY)
|
@@ -1,269 +0,0 @@
|
||||
// Yannick Verdie 2010
|
||||
// --- Please read help() below: ---
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <opencv2/calib3d/calib3d_c.h>
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/legacy/compat.hpp>
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <OpenGL/gl.h>
|
||||
#else
|
||||
#include <GL/gl.h>
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
cout << "This demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
|
||||
"and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
|
||||
"It works off of the video: cube4.avi\n"
|
||||
"Using OpenCV version " << CV_VERSION << "\n\n"
|
||||
|
||||
" 1) This demo is mainly based on work from Javier Barandiaran Martirena\n"
|
||||
" See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
|
||||
" 2) This is a demo to illustrate how to use **OpenGL Callback**.\n"
|
||||
" 3) You need Qt binding to compile this sample with OpenGL support enabled.\n"
|
||||
" 4) The features' detection is very basic and could highly be improved\n"
|
||||
" (basic thresholding tuned for the specific video) but 2).\n"
|
||||
" 5) Thanks to Google Summer of Code 2010 for supporting this work!\n" << endl;
|
||||
}
|
||||
|
||||
#define FOCAL_LENGTH 600
|
||||
#define CUBE_SIZE 0.5
|
||||
|
||||
static void renderCube(float size)
|
||||
{
|
||||
glBegin(GL_QUADS);
|
||||
// Front Face
|
||||
glNormal3f( 0.0f, 0.0f, 1.0f);
|
||||
glVertex3f( 0.0f, 0.0f, 0.0f);
|
||||
glVertex3f( size, 0.0f, 0.0f);
|
||||
glVertex3f( size, size, 0.0f);
|
||||
glVertex3f( 0.0f, size, 0.0f);
|
||||
// Back Face
|
||||
glNormal3f( 0.0f, 0.0f,-1.0f);
|
||||
glVertex3f( 0.0f, 0.0f, size);
|
||||
glVertex3f( 0.0f, size, size);
|
||||
glVertex3f( size, size, size);
|
||||
glVertex3f( size, 0.0f, size);
|
||||
// Top Face
|
||||
glNormal3f( 0.0f, 1.0f, 0.0f);
|
||||
glVertex3f( 0.0f, size, 0.0f);
|
||||
glVertex3f( size, size, 0.0f);
|
||||
glVertex3f( size, size, size);
|
||||
glVertex3f( 0.0f, size, size);
|
||||
// Bottom Face
|
||||
glNormal3f( 0.0f,-1.0f, 0.0f);
|
||||
glVertex3f( 0.0f, 0.0f, 0.0f);
|
||||
glVertex3f( 0.0f, 0.0f, size);
|
||||
glVertex3f( size, 0.0f, size);
|
||||
glVertex3f( size, 0.0f, 0.0f);
|
||||
// Right face
|
||||
glNormal3f( 1.0f, 0.0f, 0.0f);
|
||||
glVertex3f( size, 0.0f, 0.0f);
|
||||
glVertex3f( size, 0.0f, size);
|
||||
glVertex3f( size, size, size);
|
||||
glVertex3f( size, size, 0.0f);
|
||||
// Left Face
|
||||
glNormal3f(-1.0f, 0.0f, 0.0f);
|
||||
glVertex3f( 0.0f, 0.0f, 0.0f);
|
||||
glVertex3f( 0.0f, size, 0.0f);
|
||||
glVertex3f( 0.0f, size, size);
|
||||
glVertex3f( 0.0f, 0.0f, size);
|
||||
glEnd();
|
||||
}
|
||||
|
||||
static void on_opengl(void* param)
|
||||
{
|
||||
//Draw the object with the estimated pose
|
||||
glLoadIdentity();
|
||||
glScalef( 1.0f, 1.0f, -1.0f);
|
||||
glMultMatrixf( (float*)param );
|
||||
glEnable( GL_LIGHTING );
|
||||
glEnable( GL_LIGHT0 );
|
||||
glEnable( GL_BLEND );
|
||||
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
|
||||
renderCube( CUBE_SIZE );
|
||||
glDisable(GL_BLEND);
|
||||
glDisable( GL_LIGHTING );
|
||||
}
|
||||
|
||||
static void initPOSIT(std::vector<CvPoint3D32f> * modelPoints)
|
||||
{
|
||||
// Create the model pointss
|
||||
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); // The first must be (0, 0, 0)
|
||||
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
|
||||
modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
|
||||
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
|
||||
}
|
||||
|
||||
static void foundCorners(vector<CvPoint2D32f> * srcImagePoints, const Mat & source, Mat & grayImage)
|
||||
{
|
||||
cvtColor(source, grayImage, COLOR_RGB2GRAY);
|
||||
GaussianBlur(grayImage, grayImage, Size(11, 11), 0, 0);
|
||||
normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
|
||||
threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
|
||||
|
||||
Mat MgrayImage = grayImage;
|
||||
vector<vector<Point> > contours;
|
||||
vector<Vec4i> hierarchy;
|
||||
findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
|
||||
|
||||
Point p;
|
||||
vector<CvPoint2D32f> srcImagePoints_temp(4, cvPoint2D32f(0, 0));
|
||||
|
||||
if (contours.size() == srcImagePoints_temp.size())
|
||||
{
|
||||
for (size_t i = 0; i < contours.size(); i++ )
|
||||
{
|
||||
p.x = p.y = 0;
|
||||
|
||||
for (size_t j = 0 ; j < contours[i].size(); j++)
|
||||
p += contours[i][j];
|
||||
|
||||
srcImagePoints_temp.at(i) = cvPoint2D32f(float(p.x) / contours[i].size(), float(p.y) / contours[i].size());
|
||||
}
|
||||
|
||||
// Need to keep the same order
|
||||
// > y = 0
|
||||
// > x = 1
|
||||
// < x = 2
|
||||
// < y = 3
|
||||
|
||||
// get point 0;
|
||||
size_t index = 0;
|
||||
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
|
||||
if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
|
||||
index = i;
|
||||
srcImagePoints->at(0) = srcImagePoints_temp.at(index);
|
||||
|
||||
// get point 1;
|
||||
index = 0;
|
||||
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
|
||||
if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
|
||||
index = i;
|
||||
srcImagePoints->at(1) = srcImagePoints_temp.at(index);
|
||||
|
||||
// get point 2;
|
||||
index = 0;
|
||||
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
|
||||
if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
|
||||
index = i;
|
||||
srcImagePoints->at(2) = srcImagePoints_temp.at(index);
|
||||
|
||||
// get point 3;
|
||||
index = 0;
|
||||
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
|
||||
if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
|
||||
index = i;
|
||||
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
|
||||
|
||||
Mat Msource = source;
|
||||
stringstream ss;
|
||||
for (size_t i = 0; i<srcImagePoints_temp.size(); i++ )
|
||||
{
|
||||
ss << i;
|
||||
circle(Msource, srcImagePoints->at(i), 5, Scalar(0, 0, 255));
|
||||
putText(Msource, ss.str(), srcImagePoints->at(i), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255));
|
||||
ss.str("");
|
||||
|
||||
// new coordinate system in the middle of the frame and reversed (camera coordinate system)
|
||||
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x - source.cols / 2,
|
||||
source.rows / 2 - srcImagePoints_temp.at(i).y);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void createOpenGLMatrixFrom(float * posePOSIT, const CvMatr32f & rotationMatrix,
|
||||
const CvVect32f & translationVector)
|
||||
{
|
||||
// coordinate system returned is relative to the first 3D input point
|
||||
for (int f = 0; f < 3; f++)
|
||||
for (int c = 0; c < 3; c++)
|
||||
posePOSIT[c * 4 + f] = rotationMatrix[f * 3 + c]; // transposed
|
||||
|
||||
posePOSIT[3] = translationVector[0];
|
||||
posePOSIT[7] = translationVector[1];
|
||||
posePOSIT[11] = translationVector[2];
|
||||
posePOSIT[12] = 0.0f;
|
||||
posePOSIT[13] = 0.0f;
|
||||
posePOSIT[14] = 0.0f;
|
||||
posePOSIT[15] = 1.0f;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
help();
|
||||
|
||||
string fileName = "cube4.avi";
|
||||
VideoCapture video(fileName);
|
||||
if (!video.isOpened())
|
||||
{
|
||||
cerr << "Video file " << fileName << " could not be opened" << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
Mat source, grayImage;
|
||||
video >> source;
|
||||
|
||||
namedWindow("Original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
|
||||
namedWindow("POSIT", WINDOW_OPENGL | WINDOW_FREERATIO);
|
||||
resizeWindow("POSIT", source.cols, source.rows);
|
||||
|
||||
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear).\n"
|
||||
"This demo is only to illustrate how to use OpenGL callback.\n"
|
||||
" -- Press ESC to exit.", 10000);
|
||||
|
||||
float OpenGLMatrix[] = { 0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0 };
|
||||
setOpenGlContext("POSIT");
|
||||
setOpenGlDrawCallback("POSIT", on_opengl, OpenGLMatrix);
|
||||
|
||||
vector<CvPoint3D32f> modelPoints;
|
||||
initPOSIT(&modelPoints);
|
||||
|
||||
// Create the POSIT object with the model points
|
||||
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size());
|
||||
|
||||
CvMatr32f rotation_matrix = new float[9];
|
||||
CvVect32f translation_vector = new float[3];
|
||||
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1e-4f);
|
||||
vector<CvPoint2D32f> srcImagePoints(4, cvPoint2D32f(0, 0));
|
||||
|
||||
while (waitKey(33) != 27)
|
||||
{
|
||||
video >> source;
|
||||
if (source.empty())
|
||||
break;
|
||||
|
||||
imshow("Original", source);
|
||||
|
||||
foundCorners(&srcImagePoints, source, grayImage);
|
||||
cvPOSIT(positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector);
|
||||
createOpenGLMatrixFrom(OpenGLMatrix, rotation_matrix, translation_vector);
|
||||
|
||||
updateWindow("POSIT");
|
||||
|
||||
if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
|
||||
video.set(CAP_PROP_POS_AVI_RATIO, 0);
|
||||
}
|
||||
|
||||
setOpenGlDrawCallback("POSIT", NULL, NULL);
|
||||
destroyAllWindows();
|
||||
cvReleasePOSITObject(&positObject);
|
||||
|
||||
delete[]rotation_matrix;
|
||||
delete[]translation_vector;
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
BIN
samples/cpp/airplane.jpg
Normal file
After Width: | Height: | Size: 83 KiB |
BIN
samples/cpp/baboon200.jpg
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
samples/cpp/baboon200_rotated.jpg
Normal file
After Width: | Height: | Size: 11 KiB |
@@ -1,13 +1,10 @@
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
#include "opencv2/ml/ml.hpp"
|
||||
#ifdef HAVE_OPENCV_OCL
|
||||
#define _OCL_SVM_ 1 //select whether using ocl::svm method or not, default is using
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
#endif
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
@@ -26,6 +23,7 @@
|
||||
#define DEBUG_DESC_PROGRESS
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::ml;
|
||||
using namespace std;
|
||||
|
||||
const string paramsFile = "params.xml";
|
||||
@@ -680,7 +678,7 @@ void VocData::writeClassifierResultsFile( const string& out_dir, const string& o
|
||||
result_file.close();
|
||||
} else {
|
||||
string err_msg = "could not open classifier results file '" + output_file + "' for writing. Before running for the first time, a 'results' subdirectory should be created within the VOC dataset base directory. e.g. if the VOC data is stored in /VOC/VOC2010 then the path /VOC/results must be created.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,9 +702,9 @@ void VocData::writeClassifierResultsFile( const string& out_dir, const string& o
|
||||
string VocData::getResultsFilename(const string& obj_class, const VocTask task, const ObdDatasetType dataset, const int competition, const int number)
|
||||
{
|
||||
if ((competition < 1) && (competition != -1))
|
||||
CV_Error(CV_StsBadArg,"competition argument should be a positive non-zero number or -1 to accept the default");
|
||||
CV_Error(Error::StsBadArg,"competition argument should be a positive non-zero number or -1 to accept the default");
|
||||
if ((number < 1) && (number != -1))
|
||||
CV_Error(CV_StsBadArg,"number argument should be a positive non-zero number or -1 to accept the default");
|
||||
CV_Error(Error::StsBadArg,"number argument should be a positive non-zero number or -1 to accept the default");
|
||||
|
||||
string dset, task_type;
|
||||
|
||||
@@ -818,7 +816,7 @@ void VocData::calcClassifierPrecRecall(const string& input_file, vector<float>&
|
||||
scoregt_file.close();
|
||||
} else {
|
||||
string err_msg = "could not open scoregt file '" + scoregt_file_str + "' for writing.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -977,7 +975,7 @@ void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<Obd
|
||||
if (target_idx_it == output_headers.end())
|
||||
{
|
||||
string err_msg = "could not find the target object class '" + obj_class + "' in list of valid classes.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
/* convert iterator to index */
|
||||
target_idx = (int)std::distance(output_headers.begin(),target_idx_it);
|
||||
@@ -1040,7 +1038,7 @@ void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<Obd
|
||||
if (class_idx_it == output_headers.end())
|
||||
{
|
||||
string err_msg = "could not find object class '" + img_objects[obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
/* convert iterator to index */
|
||||
int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
|
||||
@@ -1192,7 +1190,7 @@ void VocData::calcDetectorConfMatRow(const string& obj_class, const ObdDatasetTy
|
||||
if (class_idx_it == output_headers.end())
|
||||
{
|
||||
string err_msg = "could not find object class '" + img_objects[max_gt_obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
/* convert iterator to index */
|
||||
int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
|
||||
@@ -1285,7 +1283,7 @@ void VocData::savePrecRecallToGnuplot(const string& output_file, const vector<fl
|
||||
plot_file.close();
|
||||
} else {
|
||||
string err_msg = "could not open plot file '" + output_file_std + "' for writing.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1449,7 +1447,7 @@ void VocData::readClassifierGroundTruth(const string& filename, vector<string>&
|
||||
if (!gtfile.is_open())
|
||||
{
|
||||
string err_msg = "could not open VOC ground truth textfile '" + filename + "'.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
|
||||
string line;
|
||||
@@ -1465,7 +1463,7 @@ void VocData::readClassifierGroundTruth(const string& filename, vector<string>&
|
||||
image_codes.push_back(image);
|
||||
object_present.push_back(obj_present == 1);
|
||||
} else {
|
||||
if (!gtfile.eof()) CV_Error(CV_StsParseError,"error parsing VOC ground truth textfile.");
|
||||
if (!gtfile.eof()) CV_Error(Error::StsParseError,"error parsing VOC ground truth textfile.");
|
||||
}
|
||||
}
|
||||
gtfile.close();
|
||||
@@ -1491,13 +1489,13 @@ void VocData::readClassifierResultsFile(const string& input_file, vector<string>
|
||||
image_codes.push_back(image);
|
||||
scores.push_back(score);
|
||||
} else {
|
||||
if(!result_file.eof()) CV_Error(CV_StsParseError,"error parsing VOC classifier results file.");
|
||||
if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC classifier results file.");
|
||||
}
|
||||
}
|
||||
result_file.close();
|
||||
} else {
|
||||
string err_msg = "could not open classifier results file '" + input_file + "' for reading.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1548,13 +1546,13 @@ void VocData::readDetectorResultsFile(const string& input_file, vector<string>&
|
||||
bounding_boxes[image_idx].push_back(bounding_box);
|
||||
}
|
||||
} else {
|
||||
if(!result_file.eof()) CV_Error(CV_StsParseError,"error parsing VOC detector results file.");
|
||||
if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC detector results file.");
|
||||
}
|
||||
}
|
||||
result_file.close();
|
||||
} else {
|
||||
string err_msg = "could not open detector results file '" + input_file + "' for reading.";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1598,23 +1596,23 @@ void VocData::extractVocObjects(const string filename, vector<ObdObject>& object
|
||||
|
||||
//object class -------------
|
||||
|
||||
if (extractXMLBlock(object_contents, "name", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <name> tag in object definition of '" + filename + "'");
|
||||
if (extractXMLBlock(object_contents, "name", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <name> tag in object definition of '" + filename + "'");
|
||||
object.object_class.swap(tag_contents);
|
||||
|
||||
//object bounding box -------------
|
||||
|
||||
int xmax, xmin, ymax, ymin;
|
||||
|
||||
if (extractXMLBlock(object_contents, "xmax", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <xmax> tag in object definition of '" + filename + "'");
|
||||
if (extractXMLBlock(object_contents, "xmax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <xmax> tag in object definition of '" + filename + "'");
|
||||
xmax = stringToInteger(tag_contents);
|
||||
|
||||
if (extractXMLBlock(object_contents, "xmin", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <xmin> tag in object definition of '" + filename + "'");
|
||||
if (extractXMLBlock(object_contents, "xmin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <xmin> tag in object definition of '" + filename + "'");
|
||||
xmin = stringToInteger(tag_contents);
|
||||
|
||||
if (extractXMLBlock(object_contents, "ymax", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <ymax> tag in object definition of '" + filename + "'");
|
||||
if (extractXMLBlock(object_contents, "ymax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <ymax> tag in object definition of '" + filename + "'");
|
||||
ymax = stringToInteger(tag_contents);
|
||||
|
||||
if (extractXMLBlock(object_contents, "ymin", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <ymin> tag in object definition of '" + filename + "'");
|
||||
if (extractXMLBlock(object_contents, "ymin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <ymin> tag in object definition of '" + filename + "'");
|
||||
ymin = stringToInteger(tag_contents);
|
||||
|
||||
object.boundingBox.x = xmin-1; //convert to 0-based indexing
|
||||
@@ -1717,11 +1715,11 @@ void VocData::extractDataFromResultsFilename(const string& input_file, string& c
|
||||
size_t fnameend = input_file_std.rfind(".txt");
|
||||
|
||||
if ((fnamestart == input_file_std.npos) || (fnameend == input_file_std.npos))
|
||||
CV_Error(CV_StsError,"Could not extract filename of results file.");
|
||||
CV_Error(Error::StsError,"Could not extract filename of results file.");
|
||||
|
||||
++fnamestart;
|
||||
if (fnamestart >= fnameend)
|
||||
CV_Error(CV_StsError,"Could not extract filename of results file.");
|
||||
CV_Error(Error::StsError,"Could not extract filename of results file.");
|
||||
|
||||
//extract dataset and class names, triggering exception if the filename format is not correct
|
||||
string filename = input_file_std.substr(fnamestart, fnameend-fnamestart);
|
||||
@@ -1732,11 +1730,11 @@ void VocData::extractDataFromResultsFilename(const string& input_file, string& c
|
||||
size_t classend = filename.find("_",classstart+1);
|
||||
if (classend == filename.npos) classend = filename.size();
|
||||
if ((datasetstart == filename.npos) || (classstart == filename.npos))
|
||||
CV_Error(CV_StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
|
||||
CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
|
||||
++datasetstart;
|
||||
++classstart;
|
||||
if (((datasetstart-classstart) < 1) || ((classend-datasetstart) < 1))
|
||||
CV_Error(CV_StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
|
||||
CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
|
||||
|
||||
dataset_name = filename.substr(datasetstart,classstart-datasetstart-1);
|
||||
class_name = filename.substr(classstart,classend-classstart);
|
||||
@@ -1784,7 +1782,7 @@ bool VocData::getClassifierGroundTruthImage(const string& obj_class, const strin
|
||||
return m_classifier_gt_all_present[std::distance(m_classifier_gt_all_ids.begin(),it)] != 0;
|
||||
} else {
|
||||
string err_msg = "could not find classifier ground truth for image '" + id + "' and class '" + obj_class + "'";
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
CV_Error(Error::StsError,err_msg.c_str());
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -1817,7 +1815,7 @@ void VocData::getSortOrder(const vector<float>& values, vector<size_t>& order, b
|
||||
void VocData::readFileToString(const string filename, string& file_contents)
|
||||
{
|
||||
std::ifstream ifs(filename.c_str());
|
||||
if (!ifs.is_open()) CV_Error(CV_StsError,"could not open text file");
|
||||
if (!ifs.is_open()) CV_Error(Error::StsError,"could not open text file");
|
||||
|
||||
stringstream oss;
|
||||
oss << ifs.rdbuf();
|
||||
@@ -1832,7 +1830,7 @@ int VocData::stringToInteger(const string input_str)
|
||||
stringstream ss(input_str);
|
||||
if ((ss >> result).fail())
|
||||
{
|
||||
CV_Error(CV_StsBadArg,"could not perform string to integer conversion");
|
||||
CV_Error(Error::StsBadArg,"could not perform string to integer conversion");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@@ -1844,7 +1842,7 @@ string VocData::integerToString(const int input_int)
|
||||
stringstream ss;
|
||||
if ((ss << input_int).fail())
|
||||
{
|
||||
CV_Error(CV_StsBadArg,"could not perform integer to string conversion");
|
||||
CV_Error(Error::StsBadArg,"could not perform integer to string conversion");
|
||||
}
|
||||
result = ss.str();
|
||||
return result;
|
||||
@@ -2328,14 +2326,14 @@ static void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<M
|
||||
CV_Assert( bowImageDescriptors.size() == objectPresent.size() );
|
||||
}
|
||||
|
||||
static void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat& responses, bool balanceClasses )
|
||||
static void setSVMParams( SVM::Params& svmParams, Mat& class_wts_cv, const Mat& responses, bool balanceClasses )
|
||||
{
|
||||
int pos_ex = countNonZero(responses == 1);
|
||||
int neg_ex = countNonZero(responses == -1);
|
||||
cout << pos_ex << " positive training samples; " << neg_ex << " negative training samples" << endl;
|
||||
|
||||
svmParams.svm_type = CvSVM::C_SVC;
|
||||
svmParams.kernel_type = CvSVM::RBF;
|
||||
svmParams.svmType = SVM::C_SVC;
|
||||
svmParams.kernelType = SVM::RBF;
|
||||
if( balanceClasses )
|
||||
{
|
||||
Mat class_wts( 2, 1, CV_32FC1 );
|
||||
@@ -2353,49 +2351,44 @@ static void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat
|
||||
class_wts.at<float>(1) = static_cast<float>(pos_ex)/static_cast<float>(pos_ex+neg_ex);
|
||||
}
|
||||
class_wts_cv = class_wts;
|
||||
svmParams.class_weights = &class_wts_cv;
|
||||
svmParams.classWeights = class_wts_cv;
|
||||
}
|
||||
}
|
||||
|
||||
static void setSVMTrainAutoParams( CvParamGrid& c_grid, CvParamGrid& gamma_grid,
|
||||
CvParamGrid& p_grid, CvParamGrid& nu_grid,
|
||||
CvParamGrid& coef_grid, CvParamGrid& degree_grid )
|
||||
static void setSVMTrainAutoParams( ParamGrid& c_grid, ParamGrid& gamma_grid,
|
||||
ParamGrid& p_grid, ParamGrid& nu_grid,
|
||||
ParamGrid& coef_grid, ParamGrid& degree_grid )
|
||||
{
|
||||
c_grid = CvSVM::get_default_grid(CvSVM::C);
|
||||
c_grid = SVM::getDefaultGrid(SVM::C);
|
||||
|
||||
gamma_grid = CvSVM::get_default_grid(CvSVM::GAMMA);
|
||||
gamma_grid = SVM::getDefaultGrid(SVM::GAMMA);
|
||||
|
||||
p_grid = CvSVM::get_default_grid(CvSVM::P);
|
||||
p_grid.step = 0;
|
||||
p_grid = SVM::getDefaultGrid(SVM::P);
|
||||
p_grid.logStep = 0;
|
||||
|
||||
nu_grid = CvSVM::get_default_grid(CvSVM::NU);
|
||||
nu_grid.step = 0;
|
||||
nu_grid = SVM::getDefaultGrid(SVM::NU);
|
||||
nu_grid.logStep = 0;
|
||||
|
||||
coef_grid = CvSVM::get_default_grid(CvSVM::COEF);
|
||||
coef_grid.step = 0;
|
||||
coef_grid = SVM::getDefaultGrid(SVM::COEF);
|
||||
coef_grid.logStep = 0;
|
||||
|
||||
degree_grid = CvSVM::get_default_grid(CvSVM::DEGREE);
|
||||
degree_grid.step = 0;
|
||||
degree_grid = SVM::getDefaultGrid(SVM::DEGREE);
|
||||
degree_grid.logStep = 0;
|
||||
}
|
||||
|
||||
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
|
||||
static void trainSVMClassifier( cv::ocl::CvSVM_OCL& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
|
||||
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
|
||||
const string& resPath )
|
||||
#else
|
||||
static void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
|
||||
static Ptr<SVM> trainSVMClassifier( const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
|
||||
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
|
||||
const string& resPath )
|
||||
#endif
|
||||
{
|
||||
/* first check if a previously trained svm for the current class has been saved to file */
|
||||
string svmFilename = resPath + svmsDir + "/" + objClassName + ".xml.gz";
|
||||
Ptr<SVM> svm;
|
||||
|
||||
FileStorage fs( svmFilename, FileStorage::READ);
|
||||
if( fs.isOpened() )
|
||||
{
|
||||
cout << "*** LOADING SVM CLASSIFIER FOR CLASS " << objClassName << " ***" << endl;
|
||||
svm.load( svmFilename.c_str() );
|
||||
svm = StatModel::load<SVM>( svmFilename );
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -2446,28 +2439,26 @@ static void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsEx
|
||||
}
|
||||
|
||||
cout << "TRAINING SVM FOR CLASS ..." << objClassName << "..." << endl;
|
||||
CvSVMParams svmParams;
|
||||
CvMat class_wts_cv;
|
||||
SVM::Params svmParams;
|
||||
Mat class_wts_cv;
|
||||
setSVMParams( svmParams, class_wts_cv, responses, svmParamsExt.balanceClasses );
|
||||
CvParamGrid c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid;
|
||||
svm = SVM::create(svmParams);
|
||||
ParamGrid c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid;
|
||||
setSVMTrainAutoParams( c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid );
|
||||
svm.train_auto( trainData, responses, Mat(), Mat(), svmParams, 10, c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid );
|
||||
|
||||
svm->trainAuto(TrainData::create(trainData, ROW_SAMPLE, responses), 10,
|
||||
c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid);
|
||||
cout << "SVM TRAINING FOR CLASS " << objClassName << " COMPLETED" << endl;
|
||||
|
||||
svm.save( svmFilename.c_str() );
|
||||
svm->save( svmFilename );
|
||||
cout << "SAVED CLASSIFIER TO FILE" << endl;
|
||||
}
|
||||
return svm;
|
||||
}
|
||||
|
||||
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
|
||||
static void computeConfidences( cv::ocl::CvSVM_OCL& svm, const string& objClassName, VocData& vocData,
|
||||
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
|
||||
const string& resPath )
|
||||
#else
|
||||
static void computeConfidences( CvSVM& svm, const string& objClassName, VocData& vocData,
|
||||
static void computeConfidences( const Ptr<SVM>& svm, const string& objClassName, VocData& vocData,
|
||||
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
|
||||
const string& resPath )
|
||||
#endif
|
||||
{
|
||||
cout << "*** CALCULATING CONFIDENCES FOR CLASS " << objClassName << " ***" << endl;
|
||||
cout << "CALCULATING BOW VECTORS FOR TEST SET OF " << objClassName << "..." << endl;
|
||||
@@ -2491,12 +2482,12 @@ static void computeConfidences( CvSVM& svm, const string& objClassName, VocData&
|
||||
if( imageIdx == 0 )
|
||||
{
|
||||
// In the first iteration, determine the sign of the positive class
|
||||
float classVal = confidences[imageIdx] = svm.predict( bowImageDescriptors[imageIdx], false );
|
||||
float scoreVal = confidences[imageIdx] = svm.predict( bowImageDescriptors[imageIdx], true );
|
||||
float classVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), 0 );
|
||||
float scoreVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT );
|
||||
signMul = (classVal < 0) == (scoreVal < 0) ? 1.f : -1.f;
|
||||
}
|
||||
// svm output of decision function
|
||||
confidences[imageIdx] = signMul * svm.predict( bowImageDescriptors[imageIdx], true );
|
||||
confidences[imageIdx] = signMul * svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT );
|
||||
}
|
||||
|
||||
cout << "WRITING QUERY RESULTS TO VOC RESULTS FILE FOR CLASS " << objClassName << "..." << endl;
|
||||
@@ -2606,13 +2597,8 @@ int main(int argc, char** argv)
|
||||
for( size_t classIdx = 0; classIdx < objClasses.size(); ++classIdx )
|
||||
{
|
||||
// Train a classifier on train dataset
|
||||
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
|
||||
cv::ocl::CvSVM_OCL svm;
|
||||
#else
|
||||
CvSVM svm;
|
||||
#endif
|
||||
trainSVMClassifier( svm, svmTrainParamsExt, objClasses[classIdx], vocData,
|
||||
bowExtractor, featureDetector, resPath );
|
||||
Ptr<SVM> svm = trainSVMClassifier( svmTrainParamsExt, objClasses[classIdx], vocData,
|
||||
bowExtractor, featureDetector, resPath );
|
||||
|
||||
// Now use the classifier over all images on the test dataset and rank according to score order
|
||||
// also calculating precision-recall etc.
|
||||
|
@@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <iostream>
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/video/background_segm.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdio.h>
|
||||
|
||||
|
BIN
samples/cpp/box.png
Normal file
After Width: | Height: | Size: 50 KiB |
BIN
samples/cpp/box_in_scene.png
Normal file
After Width: | Height: | Size: 120 KiB |
@@ -1,131 +0,0 @@
|
||||
/*
|
||||
* matching_test.cpp
|
||||
*
|
||||
* Created on: Oct 17, 2010
|
||||
* Author: ethan
|
||||
*/
|
||||
#include "opencv2/core.hpp"
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
//Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors
|
||||
static void matches2points(const vector<DMatch>& matches, const vector<KeyPoint>& kpts_train,
|
||||
const vector<KeyPoint>& kpts_query, vector<Point2f>& pts_train, vector<Point2f>& pts_query)
|
||||
{
|
||||
pts_train.clear();
|
||||
pts_query.clear();
|
||||
pts_train.reserve(matches.size());
|
||||
pts_query.reserve(matches.size());
|
||||
for (size_t i = 0; i < matches.size(); i++)
|
||||
{
|
||||
const DMatch& match = matches[i];
|
||||
pts_query.push_back(kpts_query[match.queryIdx].pt);
|
||||
pts_train.push_back(kpts_train[match.trainIdx].pt);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*kpts_query*/, DescriptorMatcher& matcher,
|
||||
const Mat& train, const Mat& query, vector<DMatch>& matches)
|
||||
{
|
||||
|
||||
double t = (double)getTickCount();
|
||||
matcher.match(query, train, matches); //Using features2d
|
||||
return ((double)getTickCount() - t) / getTickFrequency();
|
||||
}
|
||||
|
||||
static void help()
|
||||
{
|
||||
cout << "This program shows how to use BRIEF descriptor to match points in features2d" << endl <<
|
||||
"It takes in two images, finds keypoints and matches them displaying matches and final homography warped results" << endl <<
|
||||
"Usage: " << endl <<
|
||||
"image1 image2 " << endl <<
|
||||
"Example: " << endl <<
|
||||
"box.png box_in_scene.png " << endl;
|
||||
}
|
||||
|
||||
const char* keys =
|
||||
{
|
||||
"{@first_image | box.png | the first image}"
|
||||
"{@second_image | box_in_scene.png | the second image}"
|
||||
};
|
||||
|
||||
int main(int argc, const char ** argv)
|
||||
{
|
||||
|
||||
help();
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
string im1_name = parser.get<string>(0);
|
||||
string im2_name = parser.get<string>(1);
|
||||
|
||||
Mat im1 = imread(im1_name, IMREAD_GRAYSCALE);
|
||||
Mat im2 = imread(im2_name, IMREAD_GRAYSCALE);
|
||||
|
||||
if (im1.empty() || im2.empty())
|
||||
{
|
||||
cout << "could not open one of the images..." << endl;
|
||||
cout << "the cmd parameters have next current value: " << endl;
|
||||
parser.printMessage();
|
||||
return 1;
|
||||
}
|
||||
|
||||
double t = (double)getTickCount();
|
||||
|
||||
FastFeatureDetector detector(50);
|
||||
BriefDescriptorExtractor extractor(32); //this is really 32 x 8 matches since they are binary matches packed into bytes
|
||||
|
||||
vector<KeyPoint> kpts_1, kpts_2;
|
||||
detector.detect(im1, kpts_1);
|
||||
detector.detect(im2, kpts_2);
|
||||
|
||||
t = ((double)getTickCount() - t) / getTickFrequency();
|
||||
|
||||
cout << "found " << kpts_1.size() << " keypoints in " << im1_name << endl << "fount " << kpts_2.size()
|
||||
<< " keypoints in " << im2_name << endl << "took " << t << " seconds." << endl;
|
||||
|
||||
Mat desc_1, desc_2;
|
||||
|
||||
cout << "computing descriptors..." << endl;
|
||||
|
||||
t = (double)getTickCount();
|
||||
|
||||
extractor.compute(im1, kpts_1, desc_1);
|
||||
extractor.compute(im2, kpts_2, desc_2);
|
||||
|
||||
t = ((double)getTickCount() - t) / getTickFrequency();
|
||||
|
||||
cout << "done computing descriptors... took " << t << " seconds" << endl;
|
||||
|
||||
//Do matching using features2d
|
||||
cout << "matching with BruteForceMatcher<Hamming>" << endl;
|
||||
BFMatcher matcher_popcount(extractor.defaultNorm());
|
||||
vector<DMatch> matches_popcount;
|
||||
double pop_time = match(kpts_1, kpts_2, matcher_popcount, desc_1, desc_2, matches_popcount);
|
||||
cout << "done BruteForceMatcher<Hamming> matching. took " << pop_time << " seconds" << endl;
|
||||
|
||||
vector<Point2f> mpts_1, mpts_2;
|
||||
matches2points(matches_popcount, kpts_1, kpts_2, mpts_1, mpts_2); //Extract a list of the (x,y) location of the matches
|
||||
vector<char> outlier_mask;
|
||||
Mat H = findHomography(mpts_2, mpts_1, RANSAC, 1, outlier_mask);
|
||||
|
||||
Mat outimg;
|
||||
drawMatches(im2, kpts_2, im1, kpts_1, matches_popcount, outimg, Scalar::all(-1), Scalar::all(-1), outlier_mask);
|
||||
imshow("matches - popcount - outliers removed", outimg);
|
||||
|
||||
Mat warped;
|
||||
Mat diff;
|
||||
warpPerspective(im2, warped, H, im1.size());
|
||||
imshow("warped", warped);
|
||||
absdiff(im1,warped,diff);
|
||||
imshow("diff", diff);
|
||||
waitKey();
|
||||
return 0;
|
||||
}
|
@@ -1,778 +0,0 @@
|
||||
#include "opencv2/core.hpp"
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <map>
|
||||
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void help()
|
||||
{
|
||||
printf("\nSigh: This program is not complete/will be replaced. \n"
|
||||
"So: Use this just to see hints of how to use things like Rodrigues\n"
|
||||
" conversions, finding the fundamental matrix, using descriptor\n"
|
||||
" finding and matching in features2d and using camera parameters\n"
|
||||
"Usage: build3dmodel -i <intrinsics_filename>\n"
|
||||
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static bool readCameraMatrix(const string& filename,
|
||||
Mat& cameraMatrix, Mat& distCoeffs,
|
||||
Size& calibratedImageSize )
|
||||
{
|
||||
FileStorage fs(filename, FileStorage::READ);
|
||||
fs["image_width"] >> calibratedImageSize.width;
|
||||
fs["image_height"] >> calibratedImageSize.height;
|
||||
fs["distortion_coefficients"] >> distCoeffs;
|
||||
fs["camera_matrix"] >> cameraMatrix;
|
||||
|
||||
if( distCoeffs.type() != CV_64F )
|
||||
distCoeffs = Mat_<double>(distCoeffs);
|
||||
if( cameraMatrix.type() != CV_64F )
|
||||
cameraMatrix = Mat_<double>(cameraMatrix);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool readModelViews( const string& filename, vector<Point3f>& box,
|
||||
vector<string>& imagelist,
|
||||
vector<Rect>& roiList, vector<Vec6f>& poseList )
|
||||
{
|
||||
imagelist.resize(0);
|
||||
roiList.resize(0);
|
||||
poseList.resize(0);
|
||||
box.resize(0);
|
||||
|
||||
FileStorage fs(filename, FileStorage::READ);
|
||||
if( !fs.isOpened() )
|
||||
return false;
|
||||
fs["box"] >> box;
|
||||
|
||||
FileNode all = fs["views"];
|
||||
if( all.type() != FileNode::SEQ )
|
||||
return false;
|
||||
FileNodeIterator it = all.begin(), it_end = all.end();
|
||||
|
||||
for(; it != it_end; ++it)
|
||||
{
|
||||
FileNode n = *it;
|
||||
imagelist.push_back((string)n["image"]);
|
||||
FileNode nr = n["roi"];
|
||||
roiList.push_back(Rect((int)nr[0], (int)nr[1], (int)nr[2], (int)nr[3]));
|
||||
FileNode np = n["pose"];
|
||||
poseList.push_back(Vec6f((float)np[0], (float)np[1], (float)np[2],
|
||||
(float)np[3], (float)np[4], (float)np[5]));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
struct PointModel
|
||||
{
|
||||
vector<Point3f> points;
|
||||
vector<vector<int> > didx;
|
||||
Mat descriptors;
|
||||
string name;
|
||||
};
|
||||
|
||||
|
||||
static void writeModel(const string& modelFileName, const string& modelname,
|
||||
const PointModel& model)
|
||||
{
|
||||
FileStorage fs(modelFileName, FileStorage::WRITE);
|
||||
|
||||
fs << modelname << "{" <<
|
||||
"points" << "[:" << model.points << "]" <<
|
||||
"idx" << "[:";
|
||||
|
||||
for( size_t i = 0; i < model.didx.size(); i++ )
|
||||
fs << "[:" << model.didx[i] << "]";
|
||||
fs << "]" << "descriptors" << model.descriptors;
|
||||
}
|
||||
|
||||
|
||||
static void unpackPose(const Vec6f& pose, Mat& R, Mat& t)
|
||||
{
|
||||
Mat rvec = (Mat_<double>(3,1) << pose[0], pose[1], pose[2]);
|
||||
t = (Mat_<double>(3,1) << pose[3], pose[4], pose[5]);
|
||||
Rodrigues(rvec, R);
|
||||
}
|
||||
|
||||
|
||||
static Mat getFundamentalMat( const Mat& R1, const Mat& t1,
|
||||
const Mat& R2, const Mat& t2,
|
||||
const Mat& cameraMatrix )
|
||||
{
|
||||
Mat_<double> R = R2*R1.t(), t = t2 - R*t1;
|
||||
double tx = t.at<double>(0,0), ty = t.at<double>(1,0), tz = t.at<double>(2,0);
|
||||
Mat E = (Mat_<double>(3,3) << 0, -tz, ty, tz, 0, -tx, -ty, tx, 0)*R;
|
||||
Mat iK = cameraMatrix.inv();
|
||||
Mat F = iK.t()*E*iK;
|
||||
|
||||
#if 0
|
||||
static bool checked = false;
|
||||
if(!checked)
|
||||
{
|
||||
vector<Point3f> objpoints(100);
|
||||
Mat O(objpoints);
|
||||
randu(O, Scalar::all(-10), Scalar::all(10));
|
||||
vector<Point2f> imgpoints1, imgpoints2;
|
||||
projectPoints(Mat(objpoints), R1, t1, cameraMatrix, Mat(), imgpoints1);
|
||||
projectPoints(Mat(objpoints), R2, t2, cameraMatrix, Mat(), imgpoints2);
|
||||
double* f = (double*)F.data;
|
||||
for( size_t i = 0; i < objpoints.size(); i++ )
|
||||
{
|
||||
Point2f p1 = imgpoints1[i], p2 = imgpoints2[i];
|
||||
double diff = p2.x*(f[0]*p1.x + f[1]*p1.y + f[2]) +
|
||||
p2.y*(f[3]*p1.x + f[4]*p1.y + f[5]) +
|
||||
f[6]*p1.x + f[7]*p1.y + f[8];
|
||||
CV_Assert(fabs(diff) < 1e-3);
|
||||
}
|
||||
checked = true;
|
||||
}
|
||||
#endif
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
static void findConstrainedCorrespondences(const Mat& _F,
|
||||
const vector<KeyPoint>& keypoints1,
|
||||
const vector<KeyPoint>& keypoints2,
|
||||
const Mat& descriptors1,
|
||||
const Mat& descriptors2,
|
||||
vector<Vec2i>& matches,
|
||||
double eps, double ratio)
|
||||
{
|
||||
float F[9]={0};
|
||||
int dsize = descriptors1.cols;
|
||||
|
||||
Mat Fhdr = Mat(3, 3, CV_32F, F);
|
||||
_F.convertTo(Fhdr, CV_32F);
|
||||
matches.clear();
|
||||
|
||||
for( int i = 0; i < (int)keypoints1.size(); i++ )
|
||||
{
|
||||
Point2f p1 = keypoints1[i].pt;
|
||||
double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX;
|
||||
int bestIdx1 = -1;//, bestIdx2 = -1;
|
||||
const float* d1 = descriptors1.ptr<float>(i);
|
||||
|
||||
for( int j = 0; j < (int)keypoints2.size(); j++ )
|
||||
{
|
||||
Point2f p2 = keypoints2[j].pt;
|
||||
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
|
||||
p2.y*(F[3]*p1.x + F[4]*p1.y + F[5]) +
|
||||
F[6]*p1.x + F[7]*p1.y + F[8];
|
||||
if( fabs(e) > eps )
|
||||
continue;
|
||||
const float* d2 = descriptors2.ptr<float>(j);
|
||||
double dist = 0;
|
||||
int k = 0;
|
||||
|
||||
for( ; k <= dsize - 8; k += 8 )
|
||||
{
|
||||
float t0 = d1[k] - d2[k], t1 = d1[k+1] - d2[k+1];
|
||||
float t2 = d1[k+2] - d2[k+2], t3 = d1[k+3] - d2[k+3];
|
||||
float t4 = d1[k+4] - d2[k+4], t5 = d1[k+5] - d2[k+5];
|
||||
float t6 = d1[k+6] - d2[k+6], t7 = d1[k+7] - d2[k+7];
|
||||
dist += t0*t0 + t1*t1 + t2*t2 + t3*t3 +
|
||||
t4*t4 + t5*t5 + t6*t6 + t7*t7;
|
||||
|
||||
if( dist >= bestDist2 )
|
||||
break;
|
||||
}
|
||||
|
||||
if( dist < bestDist2 )
|
||||
{
|
||||
for( ; k < dsize; k++ )
|
||||
{
|
||||
float t = d1[k] - d2[k];
|
||||
dist += t*t;
|
||||
}
|
||||
|
||||
if( dist < bestDist1 )
|
||||
{
|
||||
bestDist2 = bestDist1;
|
||||
//bestIdx2 = bestIdx1;
|
||||
bestDist1 = dist;
|
||||
bestIdx1 = (int)j;
|
||||
}
|
||||
else if( dist < bestDist2 )
|
||||
{
|
||||
bestDist2 = dist;
|
||||
//bestIdx2 = (int)j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if( bestIdx1 >= 0 && bestDist1 < bestDist2*ratio )
|
||||
{
|
||||
Point2f p2 = keypoints1[bestIdx1].pt;
|
||||
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
|
||||
p2.y*(F[3]*p1.x + F[4]*p1.y + F[5]) +
|
||||
F[6]*p1.x + F[7]*p1.y + F[8];
|
||||
if( e > eps*0.25 )
|
||||
continue;
|
||||
double threshold = bestDist1/ratio;
|
||||
const float* d22 = descriptors2.ptr<float>(bestIdx1);
|
||||
int i1 = 0;
|
||||
for( ; i1 < (int)keypoints1.size(); i1++ )
|
||||
{
|
||||
if( i1 == i )
|
||||
continue;
|
||||
Point2f pt1 = keypoints1[i1].pt;
|
||||
const float* d11 = descriptors1.ptr<float>(i1);
|
||||
double dist = 0;
|
||||
|
||||
e = p2.x*(F[0]*pt1.x + F[1]*pt1.y + F[2]) +
|
||||
p2.y*(F[3]*pt1.x + F[4]*pt1.y + F[5]) +
|
||||
F[6]*pt1.x + F[7]*pt1.y + F[8];
|
||||
if( fabs(e) > eps )
|
||||
continue;
|
||||
|
||||
for( int k = 0; k < dsize; k++ )
|
||||
{
|
||||
float t = d11[k] - d22[k];
|
||||
dist += t*t;
|
||||
if( dist >= threshold )
|
||||
break;
|
||||
}
|
||||
|
||||
if( dist < threshold )
|
||||
break;
|
||||
}
|
||||
if( i1 == (int)keypoints1.size() )
|
||||
matches.push_back(Vec2i(i,bestIdx1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static Point3f findRayIntersection(Point3f k1, Point3f b1, Point3f k2, Point3f b2)
|
||||
{
|
||||
float a[4], b[2], x[2];
|
||||
a[0] = k1.dot(k1);
|
||||
a[1] = a[2] = -k1.dot(k2);
|
||||
a[3] = k2.dot(k2);
|
||||
b[0] = k1.dot(b2 - b1);
|
||||
b[1] = k2.dot(b1 - b2);
|
||||
Mat_<float> A(2, 2, a), B(2, 1, b), X(2, 1, x);
|
||||
solve(A, B, X);
|
||||
|
||||
float s1 = X.at<float>(0, 0);
|
||||
float s2 = X.at<float>(1, 0);
|
||||
return (k1*s1 + b1 + k2*s2 + b2)*0.5f;
|
||||
}
|
||||
|
||||
|
||||
static Point3f triangulatePoint(const vector<Point2f>& ps,
|
||||
const vector<Mat>& Rs,
|
||||
const vector<Mat>& ts,
|
||||
const Mat& cameraMatrix)
|
||||
{
|
||||
Mat_<double> K(cameraMatrix);
|
||||
|
||||
/*if( ps.size() > 2 )
|
||||
{
|
||||
Mat_<double> L(ps.size()*3, 4), U, evalues;
|
||||
Mat_<double> P(3,4), Rt(3,4), Rt_part1=Rt.colRange(0,3), Rt_part2=Rt.colRange(3,4);
|
||||
|
||||
for( size_t i = 0; i < ps.size(); i++ )
|
||||
{
|
||||
double x = ps[i].x, y = ps[i].y;
|
||||
Rs[i].convertTo(Rt_part1, Rt_part1.type());
|
||||
ts[i].convertTo(Rt_part2, Rt_part2.type());
|
||||
P = K*Rt;
|
||||
|
||||
for( int k = 0; k < 4; k++ )
|
||||
{
|
||||
L(i*3, k) = x*P(2,k) - P(0,k);
|
||||
L(i*3+1, k) = y*P(2,k) - P(1,k);
|
||||
L(i*3+2, k) = x*P(1,k) - y*P(0,k);
|
||||
}
|
||||
}
|
||||
|
||||
eigen(L.t()*L, evalues, U);
|
||||
CV_Assert(evalues(0,0) >= evalues(3,0));
|
||||
|
||||
double W = fabs(U(3,3)) > FLT_EPSILON ? 1./U(3,3) : 0;
|
||||
return Point3f((float)(U(3,0)*W), (float)(U(3,1)*W), (float)(U(3,2)*W));
|
||||
}
|
||||
else*/
|
||||
{
|
||||
Mat_<float> iK = K.inv();
|
||||
Mat_<float> R1t = Mat_<float>(Rs[0]).t();
|
||||
Mat_<float> R2t = Mat_<float>(Rs[1]).t();
|
||||
Mat_<float> m1 = (Mat_<float>(3,1) << ps[0].x, ps[0].y, 1);
|
||||
Mat_<float> m2 = (Mat_<float>(3,1) << ps[1].x, ps[1].y, 1);
|
||||
Mat_<float> K1 = R1t*(iK*m1), K2 = R2t*(iK*m2);
|
||||
Mat_<float> B1 = -R1t*Mat_<float>(ts[0]);
|
||||
Mat_<float> B2 = -R2t*Mat_<float>(ts[1]);
|
||||
return findRayIntersection(*K1.ptr<Point3f>(), *B1.ptr<Point3f>(),
|
||||
*K2.ptr<Point3f>(), *B2.ptr<Point3f>());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void triangulatePoint_test(void)
|
||||
{
|
||||
int i, n = 100;
|
||||
vector<Point3f> objpt(n), delta1(n), delta2(n);
|
||||
Mat rvec1(3,1,CV_32F), tvec1(3,1,CV_64F);
|
||||
Mat rvec2(3,1,CV_32F), tvec2(3,1,CV_64F);
|
||||
Mat objptmat(objpt), deltamat1(delta1), deltamat2(delta2);
|
||||
randu(rvec1, Scalar::all(-10), Scalar::all(10));
|
||||
randu(tvec1, Scalar::all(-10), Scalar::all(10));
|
||||
randu(rvec2, Scalar::all(-10), Scalar::all(10));
|
||||
randu(tvec2, Scalar::all(-10), Scalar::all(10));
|
||||
|
||||
randu(objptmat, Scalar::all(-10), Scalar::all(10));
|
||||
double eps = 1e-2;
|
||||
randu(deltamat1, Scalar::all(-eps), Scalar::all(eps));
|
||||
randu(deltamat2, Scalar::all(-eps), Scalar::all(eps));
|
||||
vector<Point2f> imgpt1, imgpt2;
|
||||
Mat_<float> cameraMatrix(3,3);
|
||||
double fx = 1000., fy = 1010., cx = 400.5, cy = 300.5;
|
||||
cameraMatrix << fx, 0, cx, 0, fy, cy, 0, 0, 1;
|
||||
|
||||
projectPoints(Mat(objpt)+Mat(delta1), rvec1, tvec1, cameraMatrix, Mat(), imgpt1);
|
||||
projectPoints(Mat(objpt)+Mat(delta2), rvec2, tvec2, cameraMatrix, Mat(), imgpt2);
|
||||
|
||||
vector<Point3f> objptt(n);
|
||||
vector<Point2f> pts(2);
|
||||
vector<Mat> Rv(2), tv(2);
|
||||
Rodrigues(rvec1, Rv[0]);
|
||||
Rodrigues(rvec2, Rv[1]);
|
||||
tv[0] = tvec1; tv[1] = tvec2;
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
pts[0] = imgpt1[i]; pts[1] = imgpt2[i];
|
||||
objptt[i] = triangulatePoint(pts, Rv, tv, cameraMatrix);
|
||||
}
|
||||
double err = norm(Mat(objpt), Mat(objptt), NORM_INF);
|
||||
CV_Assert(err < 1e-1);
|
||||
}
|
||||
|
||||
typedef pair<int, int> Pair2i;
|
||||
typedef map<Pair2i, int> Set2i;
|
||||
|
||||
struct EqKeypoints
|
||||
{
|
||||
EqKeypoints(const vector<int>* _dstart, const Set2i* _pairs)
|
||||
: dstart(_dstart), pairs(_pairs) {}
|
||||
|
||||
bool operator()(const Pair2i& a, const Pair2i& b) const
|
||||
{
|
||||
return pairs->find(Pair2i(dstart->at(a.first) + a.second,
|
||||
dstart->at(b.first) + b.second)) != pairs->end();
|
||||
}
|
||||
|
||||
const vector<int>* dstart;
|
||||
const Set2i* pairs;
|
||||
};
|
||||
|
||||
template<typename _Tp, class _EqPredicate> static
|
||||
int partition( const std::vector<_Tp>& _vec, std::vector<int>& labels,
|
||||
_EqPredicate predicate=_EqPredicate())
|
||||
{
|
||||
int i, j, N = (int)_vec.size();
|
||||
const _Tp* vec = &_vec[0];
|
||||
|
||||
const int PARENT=0;
|
||||
const int RANK=1;
|
||||
|
||||
std::vector<int> _nodes(N*2);
|
||||
int (*nodes)[2] = (int(*)[2])&_nodes[0];
|
||||
|
||||
// The first O(N) pass: create N single-vertex trees
|
||||
for(i = 0; i < N; i++)
|
||||
{
|
||||
nodes[i][PARENT]=-1;
|
||||
nodes[i][RANK] = 0;
|
||||
}
|
||||
|
||||
// The main O(N^2) pass: merge connected components
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
int root = i;
|
||||
|
||||
// find root
|
||||
while( nodes[root][PARENT] >= 0 )
|
||||
root = nodes[root][PARENT];
|
||||
|
||||
for( j = 0; j < N; j++ )
|
||||
{
|
||||
if( i == j || !predicate(vec[i], vec[j]))
|
||||
continue;
|
||||
int root2 = j;
|
||||
|
||||
while( nodes[root2][PARENT] >= 0 )
|
||||
root2 = nodes[root2][PARENT];
|
||||
|
||||
if( root2 != root )
|
||||
{
|
||||
// unite both trees
|
||||
int rank = nodes[root][RANK], rank2 = nodes[root2][RANK];
|
||||
if( rank > rank2 )
|
||||
nodes[root2][PARENT] = root;
|
||||
else
|
||||
{
|
||||
nodes[root][PARENT] = root2;
|
||||
nodes[root2][RANK] += rank == rank2;
|
||||
root = root2;
|
||||
}
|
||||
CV_Assert( nodes[root][PARENT] < 0 );
|
||||
|
||||
int k = j, parent;
|
||||
|
||||
// compress the path from node2 to root
|
||||
while( (parent = nodes[k][PARENT]) >= 0 )
|
||||
{
|
||||
nodes[k][PARENT] = root;
|
||||
k = parent;
|
||||
}
|
||||
|
||||
// compress the path from node to root
|
||||
k = i;
|
||||
while( (parent = nodes[k][PARENT]) >= 0 )
|
||||
{
|
||||
nodes[k][PARENT] = root;
|
||||
k = parent;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Final O(N) pass: enumerate classes
|
||||
labels.resize(N);
|
||||
int nclasses = 0;
|
||||
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
int root = i;
|
||||
while( nodes[root][PARENT] >= 0 )
|
||||
root = nodes[root][PARENT];
|
||||
// re-use the rank as the class label
|
||||
if( nodes[root][RANK] >= 0 )
|
||||
nodes[root][RANK] = ~nclasses++;
|
||||
labels[i] = ~nodes[root][RANK];
|
||||
}
|
||||
|
||||
return nclasses;
|
||||
}
|
||||
|
||||
static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
const Ptr<DescriptorExtractor>& descriptorExtractor,
|
||||
const vector<Point3f>& /*modelBox*/,
|
||||
const vector<string>& imageList,
|
||||
const vector<Rect>& roiList,
|
||||
const vector<Vec6f>& poseList,
|
||||
const Mat& cameraMatrix,
|
||||
PointModel& model )
|
||||
{
|
||||
int progressBarSize = 10;
|
||||
|
||||
const double Feps = 5;
|
||||
const double DescriptorRatio = 0.7;
|
||||
|
||||
vector<vector<KeyPoint> > allkeypoints;
|
||||
vector<int> dstart;
|
||||
vector<float> alldescriptorsVec;
|
||||
vector<Vec2i> pairwiseMatches;
|
||||
vector<Mat> Rs, ts;
|
||||
int descriptorSize = 0;
|
||||
Mat descriptorbuf;
|
||||
Set2i pairs, keypointsIdxMap;
|
||||
|
||||
model.points.clear();
|
||||
model.didx.clear();
|
||||
|
||||
dstart.push_back(0);
|
||||
|
||||
size_t nimages = imageList.size();
|
||||
size_t nimagePairs = (nimages - 1)*nimages/2 - nimages;
|
||||
|
||||
printf("\nComputing descriptors ");
|
||||
|
||||
// 1. find all the keypoints and all the descriptors
|
||||
for( size_t i = 0; i < nimages; i++ )
|
||||
{
|
||||
Mat img = imread(imageList[i], 1), gray;
|
||||
cvtColor(img, gray, COLOR_BGR2GRAY);
|
||||
|
||||
vector<KeyPoint> keypoints;
|
||||
detector->detect(gray, keypoints);
|
||||
descriptorExtractor->compute(gray, keypoints, descriptorbuf);
|
||||
Point2f roiofs = roiList[i].tl();
|
||||
for( size_t k = 0; k < keypoints.size(); k++ )
|
||||
keypoints[k].pt += roiofs;
|
||||
allkeypoints.push_back(keypoints);
|
||||
|
||||
Mat buf = descriptorbuf;
|
||||
if( !buf.isContinuous() || buf.type() != CV_32F )
|
||||
{
|
||||
buf.release();
|
||||
descriptorbuf.convertTo(buf, CV_32F);
|
||||
}
|
||||
descriptorSize = buf.cols;
|
||||
|
||||
size_t prev = alldescriptorsVec.size();
|
||||
size_t delta = buf.rows*buf.cols;
|
||||
alldescriptorsVec.resize(prev + delta);
|
||||
std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
|
||||
alldescriptorsVec.begin() + prev);
|
||||
dstart.push_back(dstart.back() + (int)keypoints.size());
|
||||
|
||||
Mat R, t;
|
||||
unpackPose(poseList[i], R, t);
|
||||
Rs.push_back(R);
|
||||
ts.push_back(t);
|
||||
|
||||
if( (i+1)*progressBarSize/nimages > i*progressBarSize/nimages )
|
||||
{
|
||||
putchar('.');
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
Mat alldescriptors((int)alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
|
||||
&alldescriptorsVec[0]);
|
||||
|
||||
printf("\nOk. total images = %d. total keypoints = %d\n",
|
||||
(int)nimages, alldescriptors.rows);
|
||||
|
||||
printf("\nFinding correspondences ");
|
||||
|
||||
int pairsFound = 0;
|
||||
|
||||
vector<Point2f> pts_k(2);
|
||||
vector<Mat> Rs_k(2), ts_k(2);
|
||||
//namedWindow("img1", 1);
|
||||
//namedWindow("img2", 1);
|
||||
|
||||
// 2. find pairwise correspondences
|
||||
for( size_t i = 0; i < nimages; i++ )
|
||||
for( size_t j = i+1; j < nimages; j++ )
|
||||
{
|
||||
const vector<KeyPoint>& keypoints1 = allkeypoints[i];
|
||||
const vector<KeyPoint>& keypoints2 = allkeypoints[j];
|
||||
Mat descriptors1 = alldescriptors.rowRange(dstart[i], dstart[i+1]);
|
||||
Mat descriptors2 = alldescriptors.rowRange(dstart[j], dstart[j+1]);
|
||||
|
||||
Mat F = getFundamentalMat(Rs[i], ts[i], Rs[j], ts[j], cameraMatrix);
|
||||
|
||||
findConstrainedCorrespondences( F, keypoints1, keypoints2,
|
||||
descriptors1, descriptors2,
|
||||
pairwiseMatches, Feps, DescriptorRatio );
|
||||
|
||||
//pairsFound += (int)pairwiseMatches.size();
|
||||
|
||||
//Mat img1 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
|
||||
//Mat img2 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)j), 1);
|
||||
|
||||
//double avg_err = 0;
|
||||
for( size_t k = 0; k < pairwiseMatches.size(); k++ )
|
||||
{
|
||||
int i1 = pairwiseMatches[k][0], i2 = pairwiseMatches[k][1];
|
||||
|
||||
pts_k[0] = keypoints1[i1].pt;
|
||||
pts_k[1] = keypoints2[i2].pt;
|
||||
Rs_k[0] = Rs[i]; Rs_k[1] = Rs[j];
|
||||
ts_k[0] = ts[i]; ts_k[1] = ts[j];
|
||||
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
|
||||
|
||||
vector<Point3f> objpts;
|
||||
objpts.push_back(objpt);
|
||||
vector<Point2f> imgpts1, imgpts2;
|
||||
projectPoints(Mat(objpts), Rs_k[0], ts_k[0], cameraMatrix, Mat(), imgpts1);
|
||||
projectPoints(Mat(objpts), Rs_k[1], ts_k[1], cameraMatrix, Mat(), imgpts2);
|
||||
|
||||
double e1 = norm(imgpts1[0] - keypoints1[i1].pt);
|
||||
double e2 = norm(imgpts2[0] - keypoints2[i2].pt);
|
||||
if( e1 + e2 > 5 )
|
||||
continue;
|
||||
|
||||
pairsFound++;
|
||||
|
||||
//model.points.push_back(objpt);
|
||||
pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
|
||||
pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
|
||||
keypointsIdxMap[Pair2i((int)i,i1)] = 1;
|
||||
keypointsIdxMap[Pair2i((int)j,i2)] = 1;
|
||||
//CV_Assert(e1 < 5 && e2 < 5);
|
||||
//Scalar color(rand()%256,rand()%256, rand()%256);
|
||||
//circle(img1, keypoints1[i1].pt, 2, color, -1, CV_AA);
|
||||
//circle(img2, keypoints2[i2].pt, 2, color, -1, CV_AA);
|
||||
}
|
||||
//printf("avg err = %g\n", pairwiseMatches.size() ? avg_err/(2*pairwiseMatches.size()) : 0.);
|
||||
//imshow("img1", img1);
|
||||
//imshow("img2", img2);
|
||||
//waitKey();
|
||||
|
||||
if( (i+1)*progressBarSize/nimagePairs > i*progressBarSize/nimagePairs )
|
||||
{
|
||||
putchar('.');
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
printf("\nOk. Total pairs = %d\n", pairsFound );
|
||||
|
||||
// 3. build the keypoint clusters
|
||||
vector<Pair2i> keypointsIdx;
|
||||
Set2i::iterator kpidx_it = keypointsIdxMap.begin(), kpidx_end = keypointsIdxMap.end();
|
||||
|
||||
for( ; kpidx_it != kpidx_end; ++kpidx_it )
|
||||
keypointsIdx.push_back(kpidx_it->first);
|
||||
|
||||
printf("\nClustering correspondences ");
|
||||
|
||||
vector<int> labels;
|
||||
int nclasses = partition( keypointsIdx, labels, EqKeypoints(&dstart, &pairs) );
|
||||
|
||||
printf("\nOk. Total classes (i.e. 3d points) = %d\n", nclasses );
|
||||
|
||||
model.descriptors.create((int)keypointsIdx.size(), descriptorSize, CV_32F);
|
||||
model.didx.resize(nclasses);
|
||||
model.points.resize(nclasses);
|
||||
|
||||
vector<vector<Pair2i> > clusters(nclasses);
|
||||
for( size_t i = 0; i < keypointsIdx.size(); i++ )
|
||||
clusters[labels[i]].push_back(keypointsIdx[i]);
|
||||
|
||||
// 4. now compute 3D points corresponding to each cluster and fill in the model data
|
||||
printf("\nComputing 3D coordinates ");
|
||||
|
||||
int globalDIdx = 0;
|
||||
for( int k = 0; k < nclasses; k++ )
|
||||
{
|
||||
int i, n = (int)clusters[k].size();
|
||||
pts_k.resize(n);
|
||||
Rs_k.resize(n);
|
||||
ts_k.resize(n);
|
||||
model.didx[k].resize(n);
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
int imgidx = clusters[k][i].first, ptidx = clusters[k][i].second;
|
||||
Mat dstrow = model.descriptors.row(globalDIdx);
|
||||
alldescriptors.row(dstart[imgidx] + ptidx).copyTo(dstrow);
|
||||
|
||||
model.didx[k][i] = globalDIdx++;
|
||||
pts_k[i] = allkeypoints[imgidx][ptidx].pt;
|
||||
Rs_k[i] = Rs[imgidx];
|
||||
ts_k[i] = ts[imgidx];
|
||||
}
|
||||
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
|
||||
model.points[k] = objpt;
|
||||
|
||||
if( (i+1)*progressBarSize/nclasses > i*progressBarSize/nclasses )
|
||||
{
|
||||
putchar('.');
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
Mat img(768, 1024, CV_8UC3);
|
||||
vector<Point2f> imagePoints;
|
||||
namedWindow("Test", 1);
|
||||
|
||||
// visualize the cloud
|
||||
for( size_t i = 0; i < nimages; i++ )
|
||||
{
|
||||
img = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
|
||||
projectPoints(Mat(model.points), Rs[i], ts[i], cameraMatrix, Mat(), imagePoints);
|
||||
|
||||
for( int k = 0; k < (int)imagePoints.size(); k++ )
|
||||
circle(img, imagePoints[k], 2, Scalar(0,255,0), -1, LINE_AA, 0);
|
||||
|
||||
imshow("Test", img);
|
||||
int c = waitKey();
|
||||
if( c == 'q' || c == 'Q' )
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const char* intrinsicsFilename = 0;
|
||||
const char* modelName = 0;
|
||||
const char* detectorName = "SURF";
|
||||
const char* descriptorExtractorName = "SURF";
|
||||
|
||||
vector<Point3f> modelBox;
|
||||
vector<string> imageList;
|
||||
vector<Rect> roiList;
|
||||
vector<Vec6f> poseList;
|
||||
|
||||
if(argc < 3)
|
||||
{
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
for( int i = 1; i < argc; i++ )
|
||||
{
|
||||
if( strcmp(argv[i], "-i") == 0 )
|
||||
intrinsicsFilename = argv[++i];
|
||||
else if( strcmp(argv[i], "-m") == 0 )
|
||||
modelName = argv[++i];
|
||||
else if( strcmp(argv[i], "-d") == 0 )
|
||||
detectorName = argv[++i];
|
||||
else if( strcmp(argv[i], "-de") == 0 )
|
||||
descriptorExtractorName = argv[++i];
|
||||
else
|
||||
{
|
||||
help();
|
||||
printf("Incorrect option\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if( !intrinsicsFilename || !modelName )
|
||||
{
|
||||
printf("Some of the required parameters are missing\n");
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
triangulatePoint_test();
|
||||
|
||||
Mat cameraMatrix, distCoeffs;
|
||||
Size calibratedImageSize;
|
||||
readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize);
|
||||
|
||||
Ptr<FeatureDetector> detector = FeatureDetector::create(detectorName);
|
||||
Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create(descriptorExtractorName);
|
||||
|
||||
string modelIndexFilename = format("%s_segm/frame_index.yml", modelName);
|
||||
if(!readModelViews( modelIndexFilename, modelBox, imageList, roiList, poseList))
|
||||
{
|
||||
printf("Can not read the model. Check the parameters and the working directory\n");
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
PointModel model;
|
||||
model.name = modelName;
|
||||
build3dmodel( detector, descriptorExtractor, modelBox,
|
||||
imageList, roiList, poseList, cameraMatrix, model );
|
||||
string outputModelName = format("%s_model.yml.gz", modelName);
|
||||
|
||||
|
||||
printf("\nDone! Now saving the model ...\n");
|
||||
writeModel(outputModelName, modelName, model);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -2,6 +2,8 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <cctype>
|
||||
|
@@ -1,334 +0,0 @@
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void help()
|
||||
{
|
||||
printf( "\nThis code generates an artificial camera and artificial chessboard images,\n"
|
||||
"and then calibrates. It is basically test code for calibration that shows\n"
|
||||
"how to package calibration points and then calibrate the camera.\n"
|
||||
"Usage:\n"
|
||||
"./calibration_artificial\n\n");
|
||||
}
|
||||
namespace cv
|
||||
{
|
||||
|
||||
/* copy of class defines int tests/cv/chessboardgenerator.h */
|
||||
class ChessBoardGenerator
|
||||
{
|
||||
public:
|
||||
double sensorWidth;
|
||||
double sensorHeight;
|
||||
size_t squareEdgePointsNum;
|
||||
double min_cos;
|
||||
mutable double cov;
|
||||
Size patternSize;
|
||||
int rendererResolutionMultiplier;
|
||||
|
||||
ChessBoardGenerator(const Size& patternSize = Size(8, 6));
|
||||
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
|
||||
Size cornersSize() const;
|
||||
private:
|
||||
void generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const;
|
||||
Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
|
||||
const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
|
||||
float sqWidth, float sqHeight, const vector<Point3f>& whole, vector<Point2f>& corners) const;
|
||||
void generateBasis(Point3f& pb1, Point3f& pb2) const;
|
||||
Point3f generateChessBoardCenter(const Mat& camMat, const Size& imgSize) const;
|
||||
Mat rvec, tvec;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
const Size imgSize(800, 600);
|
||||
const Size brdSize(8, 7);
|
||||
const size_t brds_num = 20;
|
||||
|
||||
template<class T> ostream& operator<<(ostream& out, const Mat_<T>& mat)
|
||||
{
|
||||
for(int j = 0; j < mat.rows; ++j)
|
||||
for(int i = 0; i < mat.cols; ++i)
|
||||
out << mat(j, i) << " ";
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
help();
|
||||
cout << "Initializing background...";
|
||||
Mat background(imgSize, CV_8UC3);
|
||||
randu(background, Scalar::all(32), Scalar::all(255));
|
||||
GaussianBlur(background, background, Size(5, 5), 2);
|
||||
cout << "Done" << endl;
|
||||
|
||||
cout << "Initializing chess board generator...";
|
||||
ChessBoardGenerator cbg(brdSize);
|
||||
cbg.rendererResolutionMultiplier = 4;
|
||||
cout << "Done" << endl;
|
||||
|
||||
/* camera params */
|
||||
Mat_<double> camMat(3, 3);
|
||||
camMat << 300., 0., background.cols/2., 0, 300., background.rows/2., 0., 0., 1.;
|
||||
|
||||
Mat_<double> distCoeffs(1, 5);
|
||||
distCoeffs << 1.2, 0.2, 0., 0., 0.;
|
||||
|
||||
cout << "Generating chessboards...";
|
||||
vector<Mat> boards(brds_num);
|
||||
vector<Point2f> tmp;
|
||||
for(size_t i = 0; i < brds_num; ++i)
|
||||
cout << (boards[i] = cbg(background, camMat, distCoeffs, tmp), i) << " ";
|
||||
cout << "Done" << endl;
|
||||
|
||||
vector<Point3f> chessboard3D;
|
||||
for(int j = 0; j < cbg.cornersSize().height; ++j)
|
||||
for(int i = 0; i < cbg.cornersSize().width; ++i)
|
||||
chessboard3D.push_back(Point3i(i, j, 0));
|
||||
|
||||
/* init points */
|
||||
vector< vector<Point3f> > objectPoints;
|
||||
vector< vector<Point2f> > imagePoints;
|
||||
|
||||
cout << endl << "Finding chessboards' corners...";
|
||||
for(size_t i = 0; i < brds_num; ++i)
|
||||
{
|
||||
cout << i;
|
||||
namedWindow("Current chessboard"); imshow("Current chessboard", boards[i]); waitKey(100);
|
||||
bool found = findChessboardCorners(boards[i], cbg.cornersSize(), tmp);
|
||||
if (found)
|
||||
{
|
||||
imagePoints.push_back(tmp);
|
||||
objectPoints.push_back(chessboard3D);
|
||||
cout<< "-found ";
|
||||
}
|
||||
else
|
||||
cout<< "-not-found ";
|
||||
|
||||
drawChessboardCorners(boards[i], cbg.cornersSize(), Mat(tmp), found);
|
||||
imshow("Current chessboard", boards[i]); waitKey(1000);
|
||||
}
|
||||
cout << "Done" << endl;
|
||||
destroyAllWindows();
|
||||
|
||||
Mat camMat_est;
|
||||
Mat distCoeffs_est;
|
||||
vector<Mat> rvecs, tvecs;
|
||||
|
||||
cout << "Calibrating...";
|
||||
double rep_err = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs, tvecs);
|
||||
cout << "Done" << endl;
|
||||
|
||||
cout << endl << "Average Reprojection error: " << rep_err/brds_num/cbg.cornersSize().area() << endl;
|
||||
cout << "==================================" << endl;
|
||||
cout << "Original camera matrix:\n" << camMat << endl;
|
||||
cout << "Original distCoeffs:\n" << distCoeffs << endl;
|
||||
cout << "==================================" << endl;
|
||||
cout << "Estimated camera matrix:\n" << (Mat_<double>&)camMat_est << endl;
|
||||
cout << "Estimated distCoeffs:\n" << (Mat_<double>&)distCoeffs_est << endl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Copy of tests/cv/src/chessboardgenerator code. Just do not want to add dependency.
|
||||
|
||||
|
||||
ChessBoardGenerator::ChessBoardGenerator(const Size& _patternSize) : sensorWidth(32), sensorHeight(24),
|
||||
squareEdgePointsNum(200), min_cos(std::sqrt(2.f)*0.5f), cov(0.5),
|
||||
patternSize(_patternSize), rendererResolutionMultiplier(4), tvec(Mat::zeros(1, 3, CV_32F))
|
||||
{
|
||||
Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
|
||||
}
|
||||
|
||||
void cv::ChessBoardGenerator::generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const
|
||||
{
|
||||
Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);
|
||||
for(size_t n = 0; n < squareEdgePointsNum; ++n)
|
||||
out.push_back( p1 + step * (float)n);
|
||||
}
|
||||
|
||||
Size cv::ChessBoardGenerator::cornersSize() const
|
||||
{
|
||||
return Size(patternSize.width-1, patternSize.height-1);
|
||||
}
|
||||
|
||||
struct Mult
|
||||
{
|
||||
float m;
|
||||
Mult(int mult) : m((float)mult) {}
|
||||
Point2f operator()(const Point2f& p)const { return p * m; }
|
||||
};
|
||||
|
||||
void cv::ChessBoardGenerator::generateBasis(Point3f& pb1, Point3f& pb2) const
|
||||
{
|
||||
RNG& rng = theRNG();
|
||||
|
||||
Vec3f n;
|
||||
for(;;)
|
||||
{
|
||||
n[0] = rng.uniform(-1.f, 1.f);
|
||||
n[1] = rng.uniform(-1.f, 1.f);
|
||||
n[2] = rng.uniform(-1.f, 1.f);
|
||||
float len = (float)norm(n);
|
||||
n[0]/=len;
|
||||
n[1]/=len;
|
||||
n[2]/=len;
|
||||
|
||||
if (fabs(n[2]) > min_cos)
|
||||
break;
|
||||
}
|
||||
|
||||
Vec3f n_temp = n; n_temp[0] += 100;
|
||||
Vec3f b1 = n.cross(n_temp);
|
||||
Vec3f b2 = n.cross(b1);
|
||||
float len_b1 = (float)norm(b1);
|
||||
float len_b2 = (float)norm(b2);
|
||||
|
||||
pb1 = Point3f(b1[0]/len_b1, b1[1]/len_b1, b1[2]/len_b1);
|
||||
pb2 = Point3f(b2[0]/len_b1, b2[1]/len_b2, b2[2]/len_b2);
|
||||
}
|
||||
|
||||
Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
|
||||
const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
|
||||
float sqWidth, float sqHeight, const vector<Point3f>& whole,
|
||||
vector<Point2f>& corners) const
|
||||
{
|
||||
vector< vector<Point> > squares_black;
|
||||
for(int i = 0; i < patternSize.width; ++i)
|
||||
for(int j = 0; j < patternSize.height; ++j)
|
||||
if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) )
|
||||
{
|
||||
vector<Point3f> pts_square3d;
|
||||
vector<Point2f> pts_square2d;
|
||||
|
||||
Point3f p1 = zero + (i + 0) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
|
||||
Point3f p2 = zero + (i + 1) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
|
||||
Point3f p3 = zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
|
||||
Point3f p4 = zero + (i + 0) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
|
||||
generateEdge(p1, p2, pts_square3d);
|
||||
generateEdge(p2, p3, pts_square3d);
|
||||
generateEdge(p3, p4, pts_square3d);
|
||||
generateEdge(p4, p1, pts_square3d);
|
||||
|
||||
projectPoints( Mat(pts_square3d), rvec, tvec, camMat, distCoeffs, pts_square2d);
|
||||
squares_black.resize(squares_black.size() + 1);
|
||||
vector<Point2f> temp;
|
||||
approxPolyDP(Mat(pts_square2d), temp, 1.0, true);
|
||||
transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));
|
||||
}
|
||||
|
||||
/* calculate corners */
|
||||
vector<Point3f> corners3d;
|
||||
for(int j = 0; j < patternSize.height - 1; ++j)
|
||||
for(int i = 0; i < patternSize.width - 1; ++i)
|
||||
corners3d.push_back(zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2);
|
||||
corners.clear();
|
||||
projectPoints( Mat(corners3d), rvec, tvec, camMat, distCoeffs, corners);
|
||||
|
||||
vector<Point3f> whole3d;
|
||||
vector<Point2f> whole2d;
|
||||
generateEdge(whole[0], whole[1], whole3d);
|
||||
generateEdge(whole[1], whole[2], whole3d);
|
||||
generateEdge(whole[2], whole[3], whole3d);
|
||||
generateEdge(whole[3], whole[0], whole3d);
|
||||
projectPoints( Mat(whole3d), rvec, tvec, camMat, distCoeffs, whole2d);
|
||||
vector<Point2f> temp_whole2d;
|
||||
approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true);
|
||||
|
||||
vector< vector<Point > > whole_contour(1);
|
||||
transform(temp_whole2d.begin(), temp_whole2d.end(),
|
||||
back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));
|
||||
|
||||
Mat result;
|
||||
if (rendererResolutionMultiplier == 1)
|
||||
{
|
||||
result = bg.clone();
|
||||
drawContours(result, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
|
||||
drawContours(result, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
|
||||
}
|
||||
else
|
||||
{
|
||||
Mat tmp;
|
||||
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
|
||||
drawContours(tmp, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
|
||||
drawContours(tmp, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
|
||||
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const
|
||||
{
|
||||
cov = std::min(cov, 0.8);
|
||||
double fovx, fovy, focalLen;
|
||||
Point2d principalPoint;
|
||||
double aspect;
|
||||
calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight,
|
||||
fovx, fovy, focalLen, principalPoint, aspect);
|
||||
|
||||
RNG& rng = theRNG();
|
||||
|
||||
float d1 = static_cast<float>(rng.uniform(0.1, 10.0));
|
||||
float ah = static_cast<float>(rng.uniform(-fovx/2 * cov, fovx/2 * cov) * CV_PI / 180);
|
||||
float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);
|
||||
|
||||
Point3f p;
|
||||
p.z = cos(ah) * d1;
|
||||
p.x = sin(ah) * d1;
|
||||
p.y = p.z * tan(av);
|
||||
|
||||
Point3f pb1, pb2;
|
||||
generateBasis(pb1, pb2);
|
||||
|
||||
float cbHalfWidth = static_cast<float>(norm(p) * sin( std::min(fovx, fovy) * 0.5 * CV_PI / 180));
|
||||
float cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
|
||||
|
||||
vector<Point3f> pts3d(4);
|
||||
vector<Point2f> pts2d(4);
|
||||
for(;;)
|
||||
{
|
||||
pts3d[0] = p + pb1 * cbHalfWidth + cbHalfHeight * pb2;
|
||||
pts3d[1] = p + pb1 * cbHalfWidth - cbHalfHeight * pb2;
|
||||
pts3d[2] = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
|
||||
pts3d[3] = p - pb1 * cbHalfWidth + cbHalfHeight * pb2;
|
||||
|
||||
/* can remake with better perf */
|
||||
projectPoints( Mat(pts3d), rvec, tvec, camMat, distCoeffs, pts2d);
|
||||
|
||||
bool inrect1 = pts2d[0].x < bg.cols && pts2d[0].y < bg.rows && pts2d[0].x > 0 && pts2d[0].y > 0;
|
||||
bool inrect2 = pts2d[1].x < bg.cols && pts2d[1].y < bg.rows && pts2d[1].x > 0 && pts2d[1].y > 0;
|
||||
bool inrect3 = pts2d[2].x < bg.cols && pts2d[2].y < bg.rows && pts2d[2].x > 0 && pts2d[2].y > 0;
|
||||
bool inrect4 = pts2d[3].x < bg.cols && pts2d[3].y < bg.rows && pts2d[3].x > 0 && pts2d[3].y > 0;
|
||||
|
||||
if ( inrect1 && inrect2 && inrect3 && inrect4)
|
||||
break;
|
||||
|
||||
cbHalfWidth*=0.8f;
|
||||
cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
|
||||
}
|
||||
|
||||
cbHalfWidth *= static_cast<float>(patternSize.width)/(patternSize.width + 1);
|
||||
cbHalfHeight *= static_cast<float>(patternSize.height)/(patternSize.height + 1);
|
||||
|
||||
Point3f zero = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
|
||||
float sqWidth = 2 * cbHalfWidth/patternSize.width;
|
||||
float sqHeight = 2 * cbHalfHeight/patternSize.height;
|
||||
|
||||
return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight, pts3d, corners);
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
@@ -1,75 +0,0 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void help()
|
||||
{
|
||||
|
||||
cout << "\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
|
||||
"edge template and a query edge image.\n"
|
||||
"Usage: \n"
|
||||
"./chamfer <image edge map> <template edge map>,"
|
||||
" By default the inputs are logo_in_clutter.png logo.png\n";
|
||||
}
|
||||
|
||||
const char* keys =
|
||||
{
|
||||
"{@logo1 |logo_in_clutter.png |image edge map }"
|
||||
"{@logo2 |logo.png |template edge map}"
|
||||
};
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
{
|
||||
|
||||
help();
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
|
||||
string image = parser.get<string>(0);
|
||||
string templ = parser.get<string>(1);
|
||||
Mat img = imread(image.c_str(), 0);
|
||||
Mat tpl = imread(templ.c_str(), 0);
|
||||
|
||||
if (img.empty() || tpl.empty())
|
||||
{
|
||||
cout << "Could not read image file " << image << " or " << templ << "." << endl;
|
||||
return -1;
|
||||
}
|
||||
Mat cimg;
|
||||
cvtColor(img, cimg, COLOR_GRAY2BGR);
|
||||
|
||||
// if the image and the template are not edge maps but normal grayscale images,
|
||||
// you might want to uncomment the lines below to produce the maps. You can also
|
||||
// run Sobel instead of Canny.
|
||||
|
||||
// Canny(img, img, 5, 50, 3);
|
||||
// Canny(tpl, tpl, 5, 50, 3);
|
||||
|
||||
vector<vector<Point> > results;
|
||||
vector<float> costs;
|
||||
int best = chamerMatching( img, tpl, results, costs );
|
||||
if( best < 0 )
|
||||
{
|
||||
cout << "matching not found" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
size_t i, n = results[best].size();
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
Point pt = results[best][i];
|
||||
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
|
||||
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
|
||||
}
|
||||
|
||||
imshow("result", cimg);
|
||||
|
||||
waitKey();
|
||||
|
||||
return 0;
|
||||
}
|
@@ -23,6 +23,7 @@
|
||||
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include <iostream>
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#include <signal.h>
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include <iostream>
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <iostream>
|
||||
|
||||
|
@@ -17,8 +17,8 @@ static void help()
|
||||
<< "\n------------------------------------------------------------------\n"
|
||||
<< " This program shows the serial out capabilities of cv::Mat\n"
|
||||
<< "That is, cv::Mat M(...); cout << M; Now works.\n"
|
||||
<< "Output can be formated to OpenCV, python, numpy, csv and C styles"
|
||||
<< "Usage:\n"
|
||||
<< "Output can be formated to OpenCV, matlab, python, numpy, csv and \n"
|
||||
<< "C styles Usage:\n"
|
||||
<< "./cvout_sample\n"
|
||||
<< "------------------------------------------------------------------\n\n"
|
||||
<< endl;
|
||||
@@ -36,6 +36,7 @@ int main(int,char**)
|
||||
randu(r, Scalar::all(0), Scalar::all(255));
|
||||
|
||||
cout << "r (default) = \n" << r << ";" << endl << endl;
|
||||
cout << "r (matlab) = \n" << format(r, Formatter::FMT_MATLAB) << ";" << endl << endl;
|
||||
cout << "r (python) = \n" << format(r, Formatter::FMT_PYTHON) << ";" << endl << endl;
|
||||
cout << "r (numpy) = \n" << format(r, Formatter::FMT_NUMPY) << ";" << endl << endl;
|
||||
cout << "r (csv) = \n" << format(r, Formatter::FMT_CSV) << ";" << endl << endl;
|
||||
|
@@ -10,6 +10,7 @@
|
||||
*/
|
||||
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include <iostream>
|
||||
|
@@ -2,9 +2,10 @@
|
||||
|
||||
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
|
||||
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
|
||||
#include <opencv2/videoio/videoio.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
|
||||
#include <opencv2/features2d/features2d.hpp>
|
||||
#include <opencv2/contrib/detection_based_tracker.hpp>
|
||||
#include <opencv2/objdetect/objdetect.hpp>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string>
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
@@ -1,193 +0,0 @@
|
||||
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/objdetect.hpp>
|
||||
#include "opencv2/contrib/detection_based_tracker.hpp"
|
||||
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
#define DEBUGLOGS 1
|
||||
|
||||
|
||||
#ifdef ANDROID
|
||||
#include <android/log.h>
|
||||
#define LOG_TAG "DETECTIONBASEDTRACKER__TEST_APPLICAT"
|
||||
#define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
|
||||
#define LOGI0(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
|
||||
#define LOGW0(...) ((void)__android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__))
|
||||
#define LOGE0(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
|
||||
#else
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define LOGD0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#define LOGI0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#define LOGW0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#define LOGE0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#endif
|
||||
|
||||
#if DEBUGLOGS
|
||||
#define LOGD(_str, ...) LOGD0(_str , ## __VA_ARGS__)
|
||||
#define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
|
||||
#define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
|
||||
#define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
|
||||
#else
|
||||
#define LOGD(...) do{} while(0)
|
||||
#define LOGI(...) do{} while(0)
|
||||
#define LOGW(...) do{} while(0)
|
||||
#define LOGE(...) do{} while(0)
|
||||
#endif
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
#define ORIGINAL 0
|
||||
#define SHOULD_USE_EXTERNAL_BUFFERS 1
|
||||
|
||||
static void usage()
|
||||
{
|
||||
LOGE0("usage: filepattern outfilepattern cascadefile");
|
||||
LOGE0("\t where ");
|
||||
LOGE0("\t filepattern --- pattern for the paths to the source images");
|
||||
LOGE0("\t (e.g.\"./Videos/FACESJPG2/Faces2_%%08d.jpg\" ");
|
||||
LOGE0("\t outfilepattern --- pattern for the paths for images which will be generated");
|
||||
LOGE0("\t (e.g.\"./resFaces2_%%08d.jpg\" ");
|
||||
LOGE0("\t cascadefile --- path to the cascade file");
|
||||
LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" ");
|
||||
}
|
||||
|
||||
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
|
||||
{
|
||||
public:
|
||||
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
|
||||
Detector(detector)
|
||||
{
|
||||
CV_Assert(detector);
|
||||
}
|
||||
|
||||
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
|
||||
{
|
||||
Detector->detectMultiScale(Image, objects, 1.1, 3, 0, minObjSize, maxObjSize);
|
||||
}
|
||||
virtual ~CascadeDetectorAdapter()
|
||||
{}
|
||||
|
||||
private:
|
||||
CascadeDetectorAdapter();
|
||||
cv::Ptr<cv::CascadeClassifier> Detector;
|
||||
};
|
||||
|
||||
static int test_FaceDetector(int argc, char *argv[])
|
||||
{
|
||||
if (argc < 4)
|
||||
{
|
||||
usage();
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char* filepattern=argv[1];
|
||||
const char* outfilepattern=argv[2];
|
||||
const char* cascadefile=argv[3];
|
||||
LOGD0("filepattern='%s'", filepattern);
|
||||
LOGD0("outfilepattern='%s'", outfilepattern);
|
||||
LOGD0("cascadefile='%s'", cascadefile);
|
||||
|
||||
vector<Mat> images;
|
||||
{
|
||||
char filename[256];
|
||||
for(int n=1; ; n++)
|
||||
{
|
||||
snprintf(filename, sizeof(filename), filepattern, n);
|
||||
LOGD("filename='%s'", filename);
|
||||
Mat m0;
|
||||
m0=imread(filename);
|
||||
if (m0.empty())
|
||||
{
|
||||
LOGI0("Cannot read the file --- break");
|
||||
break;
|
||||
}
|
||||
images.push_back(m0);
|
||||
}
|
||||
LOGD("read %d images", (int)images.size());
|
||||
}
|
||||
|
||||
std::string cascadeFrontalfilename=cascadefile;
|
||||
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
|
||||
|
||||
cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
|
||||
|
||||
DetectionBasedTracker::Parameters params;
|
||||
DetectionBasedTracker fd(MainDetector, TrackingDetector, params);
|
||||
|
||||
fd.run();
|
||||
|
||||
Mat gray;
|
||||
Mat m;
|
||||
|
||||
int64 tprev=getTickCount();
|
||||
double freq=getTickFrequency();
|
||||
|
||||
int num_images=images.size();
|
||||
for(int n=1; n <= num_images; n++)
|
||||
{
|
||||
int64 tcur=getTickCount();
|
||||
int64 dt=tcur-tprev;
|
||||
tprev=tcur;
|
||||
double t_ms=((double)dt)/freq * 1000.0;
|
||||
LOGD("\n\nSTEP n=%d from prev step %f ms\n", n, t_ms);
|
||||
m=images[n-1];
|
||||
CV_Assert(! m.empty());
|
||||
cvtColor(m, gray, COLOR_BGR2GRAY);
|
||||
|
||||
fd.process(gray);
|
||||
|
||||
vector<Rect> result;
|
||||
fd.getObjects(result);
|
||||
|
||||
for(size_t i=0; i < result.size(); i++)
|
||||
{
|
||||
Rect r=result[i];
|
||||
CV_Assert(r.area() > 0);
|
||||
Point tl=r.tl();
|
||||
Point br=r.br();
|
||||
Scalar color=Scalar(0, 250, 0);
|
||||
rectangle(m, tl, br, color, 3);
|
||||
}
|
||||
}
|
||||
|
||||
char outfilename[256];
|
||||
for(int n=1; n <= num_images; n++)
|
||||
{
|
||||
snprintf(outfilename, sizeof(outfilename), outfilepattern, n);
|
||||
LOGD("outfilename='%s'", outfilename);
|
||||
m=images[n-1];
|
||||
imwrite(outfilename, m);
|
||||
}
|
||||
|
||||
fd.stop();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
return test_FaceDetector(argc, argv);
|
||||
}
|
||||
|
||||
#else // #if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
|
||||
|
||||
#include <stdio.h>
|
||||
int main()
|
||||
{
|
||||
printf("This sample works for UNIX or ANDROID only\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
@@ -1,983 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/legacy.hpp"
|
||||
|
||||
#include <limits>
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
|
||||
string data_path;
|
||||
/****************************************************************************************\
|
||||
* Functions to evaluate affine covariant detectors and descriptors. *
|
||||
\****************************************************************************************/
|
||||
|
||||
static inline Point2f applyHomography( const Mat_<double>& H, const Point2f& pt )
|
||||
{
|
||||
double z = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2);
|
||||
if( z )
|
||||
{
|
||||
double w = 1./z;
|
||||
return Point2f( (float)((H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w),
|
||||
(float)((H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w) );
|
||||
}
|
||||
return Point2f( numeric_limits<float>::max(), numeric_limits<float>::max() );
|
||||
}
|
||||
|
||||
static inline void linearizeHomographyAt( const Mat_<double>& H, const Point2f& pt, Mat_<double>& A )
|
||||
{
|
||||
A.create(2,2);
|
||||
double p1 = H(0,0)*pt.x + H(0,1)*pt.y + H(0,2),
|
||||
p2 = H(1,0)*pt.x + H(1,1)*pt.y + H(1,2),
|
||||
p3 = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2),
|
||||
p3_2 = p3*p3;
|
||||
if( p3 )
|
||||
{
|
||||
A(0,0) = H(0,0)/p3 - p1*H(2,0)/p3_2; // fxdx
|
||||
A(0,1) = H(0,1)/p3 - p1*H(2,1)/p3_2; // fxdy
|
||||
|
||||
A(1,0) = H(1,0)/p3 - p2*H(2,0)/p3_2; // fydx
|
||||
A(1,1) = H(1,1)/p3 - p2*H(2,1)/p3_2; // fydx
|
||||
}
|
||||
else
|
||||
A.setTo(Scalar::all(numeric_limits<double>::max()));
|
||||
}
|
||||
|
||||
static void calcKeyPointProjections( const vector<KeyPoint>& src, const Mat_<double>& H, vector<KeyPoint>& dst )
|
||||
{
|
||||
if( !src.empty() )
|
||||
{
|
||||
CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
|
||||
dst.resize(src.size());
|
||||
vector<KeyPoint>::const_iterator srcIt = src.begin();
|
||||
vector<KeyPoint>::iterator dstIt = dst.begin();
|
||||
for( ; srcIt != src.end(); ++srcIt, ++dstIt )
|
||||
{
|
||||
Point2f dstPt = applyHomography(H, srcIt->pt);
|
||||
|
||||
float srcSize2 = srcIt->size * srcIt->size;
|
||||
Mat_<double> M(2, 2);
|
||||
M(0,0) = M(1,1) = 1./srcSize2;
|
||||
M(1,0) = M(0,1) = 0;
|
||||
Mat_<double> invM; invert(M, invM);
|
||||
Mat_<double> Aff; linearizeHomographyAt(H, srcIt->pt, Aff);
|
||||
Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);
|
||||
Mat_<double> eval; eigen( dstM, eval );
|
||||
CV_Assert( eval(0,0) && eval(1,0) );
|
||||
float dstSize = (float)pow(1./(eval(0,0)*eval(1,0)), 0.25);
|
||||
|
||||
// TODO: check angle projection
|
||||
float srcAngleRad = (float)(srcIt->angle*CV_PI/180);
|
||||
Point2f vec1(cos(srcAngleRad), sin(srcAngleRad)), vec2;
|
||||
vec2.x = (float)(Aff(0,0)*vec1.x + Aff(0,1)*vec1.y);
|
||||
vec2.y = (float)(Aff(1,0)*vec1.x + Aff(0,1)*vec1.y);
|
||||
float dstAngleGrad = fastAtan2(vec2.y, vec2.x);
|
||||
|
||||
*dstIt = KeyPoint( dstPt, dstSize, dstAngleGrad, srcIt->response, srcIt->octave, srcIt->class_id );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void filterKeyPointsByImageSize( vector<KeyPoint>& keypoints, const Size& imgSize )
|
||||
{
|
||||
if( !keypoints.empty() )
|
||||
{
|
||||
vector<KeyPoint> filtered;
|
||||
filtered.reserve(keypoints.size());
|
||||
Rect r(0, 0, imgSize.width, imgSize.height);
|
||||
vector<KeyPoint>::const_iterator it = keypoints.begin();
|
||||
for( int i = 0; it != keypoints.end(); ++it, i++ )
|
||||
if( r.contains(it->pt) )
|
||||
filtered.push_back(*it);
|
||||
keypoints.assign(filtered.begin(), filtered.end());
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************************\
|
||||
* Detectors evaluation *
|
||||
\****************************************************************************************/
|
||||
const int DATASETS_COUNT = 8;
|
||||
const int TEST_CASE_COUNT = 5;
|
||||
|
||||
const string IMAGE_DATASETS_DIR = "detectors_descriptors_evaluation/images_datasets/";
|
||||
const string DETECTORS_DIR = "detectors_descriptors_evaluation/detectors/";
|
||||
const string DESCRIPTORS_DIR = "detectors_descriptors_evaluation/descriptors/";
|
||||
const string KEYPOINTS_DIR = "detectors_descriptors_evaluation/keypoints_datasets/";
|
||||
|
||||
const string PARAMS_POSTFIX = "_params.xml";
|
||||
const string RES_POSTFIX = "_res.xml";
|
||||
|
||||
const string REPEAT = "repeatability";
|
||||
const string CORRESP_COUNT = "correspondence_count";
|
||||
|
||||
string DATASET_NAMES[DATASETS_COUNT] = { "bark", "bikes", "boat", "graf", "leuven", "trees", "ubc", "wall"};
|
||||
|
||||
string DEFAULT_PARAMS = "default";
|
||||
|
||||
string IS_ACTIVE_PARAMS = "isActiveParams";
|
||||
string IS_SAVE_KEYPOINTS = "isSaveKeypoints";
|
||||
|
||||
|
||||
class BaseQualityEvaluator
|
||||
{
|
||||
public:
|
||||
BaseQualityEvaluator( const char* _algName, const char* _testName ) : algName(_algName), testName(_testName)
|
||||
{
|
||||
//TODO: change this
|
||||
isWriteGraphicsData = true;
|
||||
}
|
||||
|
||||
void run();
|
||||
|
||||
virtual ~BaseQualityEvaluator(){}
|
||||
|
||||
protected:
|
||||
virtual string getRunParamsFilename() const = 0;
|
||||
virtual string getResultsFilename() const = 0;
|
||||
virtual string getPlotPath() const = 0;
|
||||
|
||||
virtual void calcQualityClear( int datasetIdx ) = 0;
|
||||
virtual bool isCalcQualityEmpty( int datasetIdx ) const = 0;
|
||||
|
||||
void readAllDatasetsRunParams();
|
||||
virtual void readDatasetRunParams( FileNode& fn, int datasetIdx ) = 0;
|
||||
void writeAllDatasetsRunParams() const;
|
||||
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const = 0;
|
||||
void setDefaultAllDatasetsRunParams();
|
||||
virtual void setDefaultDatasetRunParams( int datasetIdx ) = 0;
|
||||
virtual void readDefaultRunParams( FileNode& /*fn*/ ) {}
|
||||
virtual void writeDefaultRunParams( FileStorage& /*fs*/ ) const {}
|
||||
|
||||
bool readDataset( const string& datasetName, vector<Mat>& Hs, vector<Mat>& imgs );
|
||||
|
||||
virtual void readAlgorithm() {}
|
||||
virtual void processRunParamsFile() {}
|
||||
virtual void runDatasetTest( const vector<Mat>& /*imgs*/, const vector<Mat>& /*Hs*/, int /*di*/, int& /*progress*/ ) {}
|
||||
|
||||
virtual void processResults( int datasetIdx );
|
||||
virtual void processResults();
|
||||
virtual void writePlotData( int /*datasetIdx*/ ) const {}
|
||||
|
||||
string algName, testName;
|
||||
bool isWriteParams, isWriteGraphicsData;
|
||||
};
|
||||
|
||||
void BaseQualityEvaluator::readAllDatasetsRunParams()
|
||||
{
|
||||
string filename = getRunParamsFilename();
|
||||
FileStorage fs( filename, FileStorage::READ );
|
||||
if( !fs.isOpened() )
|
||||
{
|
||||
isWriteParams = true;
|
||||
setDefaultAllDatasetsRunParams();
|
||||
printf("All runParams are default.\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
isWriteParams = false;
|
||||
FileNode topfn = fs.getFirstTopLevelNode();
|
||||
|
||||
FileNode pfn = topfn[DEFAULT_PARAMS];
|
||||
readDefaultRunParams(pfn);
|
||||
|
||||
for( int i = 0; i < DATASETS_COUNT; i++ )
|
||||
{
|
||||
FileNode fn = topfn[DATASET_NAMES[i]];
|
||||
if( fn.empty() )
|
||||
{
|
||||
printf( "%d-runParams is default.\n", i);
|
||||
setDefaultDatasetRunParams(i);
|
||||
}
|
||||
else
|
||||
readDatasetRunParams(fn, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BaseQualityEvaluator::writeAllDatasetsRunParams() const
|
||||
{
|
||||
string filename = getRunParamsFilename();
|
||||
FileStorage fs( filename, FileStorage::WRITE );
|
||||
if( fs.isOpened() )
|
||||
{
|
||||
fs << "run_params" << "{"; // top file node
|
||||
fs << DEFAULT_PARAMS << "{";
|
||||
writeDefaultRunParams(fs);
|
||||
fs << "}";
|
||||
for( int i = 0; i < DATASETS_COUNT; i++ )
|
||||
{
|
||||
fs << DATASET_NAMES[i] << "{";
|
||||
writeDatasetRunParams(fs, i);
|
||||
fs << "}";
|
||||
}
|
||||
fs << "}";
|
||||
}
|
||||
else
|
||||
printf( "File %s for writing run params can not be opened.\n", filename.c_str() );
|
||||
}
|
||||
|
||||
void BaseQualityEvaluator::setDefaultAllDatasetsRunParams()
|
||||
{
|
||||
for( int i = 0; i < DATASETS_COUNT; i++ )
|
||||
setDefaultDatasetRunParams(i);
|
||||
}
|
||||
|
||||
bool BaseQualityEvaluator::readDataset( const string& datasetName, vector<Mat>& Hs, vector<Mat>& imgs )
|
||||
{
|
||||
Hs.resize( TEST_CASE_COUNT );
|
||||
imgs.resize( TEST_CASE_COUNT+1 );
|
||||
string dirname = data_path + IMAGE_DATASETS_DIR + datasetName + "/";
|
||||
for( int i = 0; i < (int)Hs.size(); i++ )
|
||||
{
|
||||
stringstream filename; filename << "H1to" << i+2 << "p.xml";
|
||||
FileStorage fs( dirname + filename.str(), FileStorage::READ );
|
||||
if( !fs.isOpened() )
|
||||
{
|
||||
cout << "filename " << dirname + filename.str() << endl;
|
||||
FileStorage fs2( dirname + filename.str(), FileStorage::READ );
|
||||
return false;
|
||||
}
|
||||
fs.getFirstTopLevelNode() >> Hs[i];
|
||||
}
|
||||
|
||||
for( int i = 0; i < (int)imgs.size(); i++ )
|
||||
{
|
||||
stringstream filename; filename << "img" << i+1 << ".png";
|
||||
imgs[i] = imread( dirname + filename.str(), 0 );
|
||||
if( imgs[i].empty() )
|
||||
{
|
||||
cout << "filename " << filename.str() << endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void BaseQualityEvaluator::processResults( int datasetIdx )
|
||||
{
|
||||
if( isWriteGraphicsData )
|
||||
writePlotData( datasetIdx );
|
||||
}
|
||||
|
||||
void BaseQualityEvaluator::processResults()
|
||||
{
|
||||
if( isWriteParams )
|
||||
writeAllDatasetsRunParams();
|
||||
}
|
||||
|
||||
void BaseQualityEvaluator::run()
|
||||
{
|
||||
readAlgorithm ();
|
||||
processRunParamsFile ();
|
||||
|
||||
int notReadDatasets = 0;
|
||||
int progress = 0;
|
||||
|
||||
FileStorage runParamsFS( getRunParamsFilename(), FileStorage::READ );
|
||||
isWriteParams = (! runParamsFS.isOpened());
|
||||
FileNode topfn = runParamsFS.getFirstTopLevelNode();
|
||||
FileNode defaultParams = topfn[DEFAULT_PARAMS];
|
||||
readDefaultRunParams (defaultParams);
|
||||
|
||||
cout << testName << endl;
|
||||
for(int di = 0; di < DATASETS_COUNT; di++ )
|
||||
{
|
||||
cout << "Dataset " << di << " [" << DATASET_NAMES[di] << "] " << flush;
|
||||
vector<Mat> imgs, Hs;
|
||||
if( !readDataset( DATASET_NAMES[di], Hs, imgs ) )
|
||||
{
|
||||
calcQualityClear (di);
|
||||
printf( "Images or homography matrices of dataset named %s can not be read\n",
|
||||
DATASET_NAMES[di].c_str());
|
||||
notReadDatasets++;
|
||||
continue;
|
||||
}
|
||||
|
||||
FileNode fn = topfn[DATASET_NAMES[di]];
|
||||
readDatasetRunParams(fn, di);
|
||||
|
||||
runDatasetTest (imgs, Hs, di, progress);
|
||||
processResults( di );
|
||||
cout << endl;
|
||||
}
|
||||
if( notReadDatasets == DATASETS_COUNT )
|
||||
{
|
||||
printf( "All datasets were not be read\n");
|
||||
exit(-1);
|
||||
}
|
||||
else
|
||||
processResults();
|
||||
runParamsFS.release();
|
||||
}
|
||||
|
||||
|
||||
|
||||
class DetectorQualityEvaluator : public BaseQualityEvaluator
|
||||
{
|
||||
public:
|
||||
DetectorQualityEvaluator( const char* _detectorName, const char* _testName ) : BaseQualityEvaluator( _detectorName, _testName )
|
||||
{
|
||||
calcQuality.resize(DATASETS_COUNT);
|
||||
isSaveKeypoints.resize(DATASETS_COUNT);
|
||||
isActiveParams.resize(DATASETS_COUNT);
|
||||
|
||||
isSaveKeypointsDefault = false;
|
||||
isActiveParamsDefault = false;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual string getRunParamsFilename() const;
|
||||
virtual string getResultsFilename() const;
|
||||
virtual string getPlotPath() const;
|
||||
|
||||
virtual void calcQualityClear( int datasetIdx );
|
||||
virtual bool isCalcQualityEmpty( int datasetIdx ) const;
|
||||
|
||||
virtual void readDatasetRunParams( FileNode& fn, int datasetIdx );
|
||||
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const;
|
||||
virtual void setDefaultDatasetRunParams( int datasetIdx );
|
||||
virtual void readDefaultRunParams( FileNode &fn );
|
||||
virtual void writeDefaultRunParams( FileStorage &fs ) const;
|
||||
|
||||
virtual void writePlotData( int di ) const;
|
||||
|
||||
void openToWriteKeypointsFile( FileStorage& fs, int datasetIdx );
|
||||
|
||||
virtual void readAlgorithm();
|
||||
virtual void processRunParamsFile() {}
|
||||
virtual void runDatasetTest( const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress );
|
||||
|
||||
Ptr<FeatureDetector> specificDetector;
|
||||
Ptr<FeatureDetector> defaultDetector;
|
||||
|
||||
struct Quality
|
||||
{
|
||||
float repeatability;
|
||||
int correspondenceCount;
|
||||
};
|
||||
vector<vector<Quality> > calcQuality;
|
||||
|
||||
vector<bool> isSaveKeypoints;
|
||||
vector<bool> isActiveParams;
|
||||
|
||||
bool isSaveKeypointsDefault;
|
||||
bool isActiveParamsDefault;
|
||||
};
|
||||
|
||||
string DetectorQualityEvaluator::getRunParamsFilename() const
|
||||
{
|
||||
return data_path + DETECTORS_DIR + algName + PARAMS_POSTFIX;
|
||||
}
|
||||
|
||||
string DetectorQualityEvaluator::getResultsFilename() const
|
||||
{
|
||||
return data_path + DETECTORS_DIR + algName + RES_POSTFIX;
|
||||
}
|
||||
|
||||
string DetectorQualityEvaluator::getPlotPath() const
|
||||
{
|
||||
return data_path + DETECTORS_DIR + "plots/";
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::calcQualityClear( int datasetIdx )
|
||||
{
|
||||
calcQuality[datasetIdx].clear();
|
||||
}
|
||||
|
||||
bool DetectorQualityEvaluator::isCalcQualityEmpty( int datasetIdx ) const
|
||||
{
|
||||
return calcQuality[datasetIdx].empty();
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::readDefaultRunParams (FileNode &fn)
|
||||
{
|
||||
if (! fn.empty() )
|
||||
{
|
||||
isSaveKeypointsDefault = (int)fn[IS_SAVE_KEYPOINTS] != 0;
|
||||
defaultDetector->read (fn);
|
||||
}
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::writeDefaultRunParams (FileStorage &fs) const
|
||||
{
|
||||
fs << IS_SAVE_KEYPOINTS << isSaveKeypointsDefault;
|
||||
defaultDetector->write (fs);
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::readDatasetRunParams( FileNode& fn, int datasetIdx )
|
||||
{
|
||||
isActiveParams[datasetIdx] = (int)fn[IS_ACTIVE_PARAMS] != 0;
|
||||
if (isActiveParams[datasetIdx])
|
||||
{
|
||||
isSaveKeypoints[datasetIdx] = (int)fn[IS_SAVE_KEYPOINTS] != 0;
|
||||
specificDetector->read (fn);
|
||||
}
|
||||
else
|
||||
{
|
||||
setDefaultDatasetRunParams(datasetIdx);
|
||||
}
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const
|
||||
{
|
||||
fs << IS_ACTIVE_PARAMS << isActiveParams[datasetIdx];
|
||||
fs << IS_SAVE_KEYPOINTS << isSaveKeypoints[datasetIdx];
|
||||
defaultDetector->write (fs);
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::setDefaultDatasetRunParams( int datasetIdx )
|
||||
{
|
||||
isSaveKeypoints[datasetIdx] = isSaveKeypointsDefault;
|
||||
isActiveParams[datasetIdx] = isActiveParamsDefault;
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::writePlotData(int di ) const
|
||||
{
|
||||
int imgXVals[] = { 2, 3, 4, 5, 6 }; // if scale, blur or light changes
|
||||
int viewpointXVals[] = { 20, 30, 40, 50, 60 }; // if viewpoint changes
|
||||
int jpegXVals[] = { 60, 80, 90, 95, 98 }; // if jpeg compression
|
||||
|
||||
int* xVals = 0;
|
||||
if( !DATASET_NAMES[di].compare("ubc") )
|
||||
{
|
||||
xVals = jpegXVals;
|
||||
}
|
||||
else if( !DATASET_NAMES[di].compare("graf") || !DATASET_NAMES[di].compare("wall") )
|
||||
{
|
||||
xVals = viewpointXVals;
|
||||
}
|
||||
else
|
||||
xVals = imgXVals;
|
||||
|
||||
stringstream rFilename, cFilename;
|
||||
rFilename << getPlotPath() << algName << "_" << DATASET_NAMES[di] << "_repeatability.csv";
|
||||
cFilename << getPlotPath() << algName << "_" << DATASET_NAMES[di] << "_correspondenceCount.csv";
|
||||
ofstream rfile(rFilename.str().c_str()), cfile(cFilename.str().c_str());
|
||||
for( int ci = 0; ci < TEST_CASE_COUNT; ci++ )
|
||||
{
|
||||
rfile << xVals[ci] << ", " << calcQuality[di][ci].repeatability << endl;
|
||||
cfile << xVals[ci] << ", " << calcQuality[di][ci].correspondenceCount << endl;
|
||||
}
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::openToWriteKeypointsFile( FileStorage& fs, int datasetIdx )
|
||||
{
|
||||
string filename = data_path + KEYPOINTS_DIR + algName + "_"+ DATASET_NAMES[datasetIdx] + ".xml.gz" ;
|
||||
|
||||
fs.open(filename, FileStorage::WRITE);
|
||||
if( !fs.isOpened() )
|
||||
printf( "keypoints can not be written in file %s because this file can not be opened\n", filename.c_str() );
|
||||
}
|
||||
|
||||
inline void writeKeypoints( FileStorage& fs, const vector<KeyPoint>& keypoints, int imgIdx )
|
||||
{
|
||||
if( fs.isOpened() )
|
||||
{
|
||||
stringstream imgName; imgName << "img" << imgIdx;
|
||||
write( fs, imgName.str(), keypoints );
|
||||
}
|
||||
}
|
||||
|
||||
inline void readKeypoints( FileStorage& fs, vector<KeyPoint>& keypoints, int imgIdx )
|
||||
{
|
||||
CV_Assert( fs.isOpened() );
|
||||
stringstream imgName; imgName << "img" << imgIdx;
|
||||
read( fs[imgName.str()], keypoints);
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::readAlgorithm ()
|
||||
{
|
||||
defaultDetector = FeatureDetector::create( algName );
|
||||
specificDetector = FeatureDetector::create( algName );
|
||||
if( !defaultDetector )
|
||||
{
|
||||
printf( "Algorithm can not be read\n" );
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
static int update_progress( const string& /*name*/, int progress, int test_case_idx, int count, double dt )
|
||||
{
|
||||
int width = 60 /*- (int)name.length()*/;
|
||||
if( count > 0 )
|
||||
{
|
||||
int t = cvRound( ((double)test_case_idx * width)/count );
|
||||
if( t > progress )
|
||||
{
|
||||
cout << "." << flush;
|
||||
progress = t;
|
||||
}
|
||||
}
|
||||
else if( cvRound(dt) > progress )
|
||||
{
|
||||
cout << "." << flush;
|
||||
progress = cvRound(dt);
|
||||
}
|
||||
|
||||
return progress;
|
||||
}
|
||||
|
||||
void DetectorQualityEvaluator::runDatasetTest (const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress)
|
||||
{
|
||||
Ptr<FeatureDetector> detector = isActiveParams[di] ? specificDetector : defaultDetector;
|
||||
FileStorage keypontsFS;
|
||||
if( isSaveKeypoints[di] )
|
||||
openToWriteKeypointsFile( keypontsFS, di );
|
||||
|
||||
calcQuality[di].resize(TEST_CASE_COUNT);
|
||||
|
||||
vector<KeyPoint> keypoints1;
|
||||
detector->detect( imgs[0], keypoints1 );
|
||||
writeKeypoints( keypontsFS, keypoints1, 0);
|
||||
int progressCount = DATASETS_COUNT*TEST_CASE_COUNT;
|
||||
|
||||
for( int ci = 0; ci < TEST_CASE_COUNT; ci++ )
|
||||
{
|
||||
progress = update_progress( testName, progress, di*TEST_CASE_COUNT + ci + 1, progressCount, 0 );
|
||||
vector<KeyPoint> keypoints2;
|
||||
float rep;
|
||||
evaluateFeatureDetector( imgs[0], imgs[ci+1], Hs[ci], &keypoints1, &keypoints2,
|
||||
rep, calcQuality[di][ci].correspondenceCount,
|
||||
detector );
|
||||
calcQuality[di][ci].repeatability = rep == -1 ? rep : 100.f*rep;
|
||||
writeKeypoints( keypontsFS, keypoints2, ci+1);
|
||||
}
|
||||
}
|
||||
|
||||
// static void testLog( bool isBadAccuracy )
|
||||
// {
|
||||
// if( isBadAccuracy )
|
||||
// printf(" bad accuracy\n");
|
||||
// else
|
||||
// printf("\n");
|
||||
// }
|
||||
|
||||
/****************************************************************************************\
|
||||
* Descriptors evaluation *
|
||||
\****************************************************************************************/
|
||||
|
||||
const string RECALL = "recall";
|
||||
const string PRECISION = "precision";
|
||||
|
||||
const string KEYPOINTS_FILENAME = "keypointsFilename";
|
||||
const string PROJECT_KEYPOINTS_FROM_1IMAGE = "projectKeypointsFrom1Image";
|
||||
const string MATCH_FILTER = "matchFilter";
|
||||
const string RUN_PARAMS_IS_IDENTICAL = "runParamsIsIdentical";
|
||||
|
||||
const string ONE_WAY_TRAIN_DIR = "detectors_descriptors_evaluation/one_way_train_images/";
|
||||
const string ONE_WAY_IMAGES_LIST = "one_way_train_images.txt";
|
||||
|
||||
class DescriptorQualityEvaluator : public BaseQualityEvaluator
|
||||
{
|
||||
public:
|
||||
enum{ NO_MATCH_FILTER = 0 };
|
||||
DescriptorQualityEvaluator( const char* _descriptorName, const char* _testName, const char* _matcherName = 0 ) :
|
||||
BaseQualityEvaluator( _descriptorName, _testName )
|
||||
{
|
||||
calcQuality.resize(DATASETS_COUNT);
|
||||
calcDatasetQuality.resize(DATASETS_COUNT);
|
||||
commRunParams.resize(DATASETS_COUNT);
|
||||
|
||||
commRunParamsDefault.projectKeypointsFrom1Image = true;
|
||||
commRunParamsDefault.matchFilter = NO_MATCH_FILTER;
|
||||
commRunParamsDefault.isActiveParams = false;
|
||||
|
||||
if( _matcherName )
|
||||
matcherName = _matcherName;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual string getRunParamsFilename() const;
|
||||
virtual string getResultsFilename() const;
|
||||
virtual string getPlotPath() const;
|
||||
|
||||
virtual void calcQualityClear( int datasetIdx );
|
||||
virtual bool isCalcQualityEmpty( int datasetIdx ) const;
|
||||
|
||||
virtual void readDatasetRunParams( FileNode& fn, int datasetIdx ); //
|
||||
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const;
|
||||
virtual void setDefaultDatasetRunParams( int datasetIdx );
|
||||
virtual void readDefaultRunParams( FileNode &fn );
|
||||
virtual void writeDefaultRunParams( FileStorage &fs ) const;
|
||||
|
||||
virtual void readAlgorithm();
|
||||
virtual void processRunParamsFile() {}
|
||||
virtual void runDatasetTest( const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress );
|
||||
|
||||
virtual void writePlotData( int di ) const;
|
||||
void calculatePlotData( vector<vector<DMatch> > &allMatches, vector<vector<uchar> > &allCorrectMatchesMask, int di );
|
||||
|
||||
struct Quality
|
||||
{
|
||||
float recall;
|
||||
float precision;
|
||||
};
|
||||
vector<vector<Quality> > calcQuality;
|
||||
vector<vector<Quality> > calcDatasetQuality;
|
||||
|
||||
struct CommonRunParams
|
||||
{
|
||||
string keypontsFilename;
|
||||
bool projectKeypointsFrom1Image;
|
||||
int matchFilter; // not used now
|
||||
bool isActiveParams;
|
||||
};
|
||||
vector<CommonRunParams> commRunParams;
|
||||
|
||||
Ptr<GenericDescriptorMatch> specificDescMatcher;
|
||||
Ptr<GenericDescriptorMatch> defaultDescMatcher;
|
||||
|
||||
CommonRunParams commRunParamsDefault;
|
||||
string matcherName;
|
||||
};
|
||||
|
||||
string DescriptorQualityEvaluator::getRunParamsFilename() const
|
||||
{
|
||||
return data_path + DESCRIPTORS_DIR + algName + PARAMS_POSTFIX;
|
||||
}
|
||||
|
||||
string DescriptorQualityEvaluator::getResultsFilename() const
|
||||
{
|
||||
return data_path + DESCRIPTORS_DIR + algName + RES_POSTFIX;
|
||||
}
|
||||
|
||||
string DescriptorQualityEvaluator::getPlotPath() const
|
||||
{
|
||||
return data_path + DESCRIPTORS_DIR + "plots/";
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::calcQualityClear( int datasetIdx )
|
||||
{
|
||||
calcQuality[datasetIdx].clear();
|
||||
}
|
||||
|
||||
bool DescriptorQualityEvaluator::isCalcQualityEmpty( int datasetIdx ) const
|
||||
{
|
||||
return calcQuality[datasetIdx].empty();
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::readDefaultRunParams (FileNode &fn)
|
||||
{
|
||||
if (! fn.empty() )
|
||||
{
|
||||
commRunParamsDefault.projectKeypointsFrom1Image = (int)fn[PROJECT_KEYPOINTS_FROM_1IMAGE] != 0;
|
||||
commRunParamsDefault.matchFilter = (int)fn[MATCH_FILTER];
|
||||
defaultDescMatcher->read (fn);
|
||||
}
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::writeDefaultRunParams (FileStorage &fs) const
|
||||
{
|
||||
fs << PROJECT_KEYPOINTS_FROM_1IMAGE << commRunParamsDefault.projectKeypointsFrom1Image;
|
||||
fs << MATCH_FILTER << commRunParamsDefault.matchFilter;
|
||||
defaultDescMatcher->write (fs);
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::readDatasetRunParams( FileNode& fn, int datasetIdx )
|
||||
{
|
||||
commRunParams[datasetIdx].isActiveParams = (int)fn[IS_ACTIVE_PARAMS] != 0;
|
||||
if (commRunParams[datasetIdx].isActiveParams)
|
||||
{
|
||||
commRunParams[datasetIdx].keypontsFilename = (string)fn[KEYPOINTS_FILENAME];
|
||||
commRunParams[datasetIdx].projectKeypointsFrom1Image = (int)fn[PROJECT_KEYPOINTS_FROM_1IMAGE] != 0;
|
||||
commRunParams[datasetIdx].matchFilter = (int)fn[MATCH_FILTER];
|
||||
specificDescMatcher->read (fn);
|
||||
}
|
||||
else
|
||||
{
|
||||
setDefaultDatasetRunParams(datasetIdx);
|
||||
}
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const
|
||||
{
|
||||
fs << IS_ACTIVE_PARAMS << commRunParams[datasetIdx].isActiveParams;
|
||||
fs << KEYPOINTS_FILENAME << commRunParams[datasetIdx].keypontsFilename;
|
||||
fs << PROJECT_KEYPOINTS_FROM_1IMAGE << commRunParams[datasetIdx].projectKeypointsFrom1Image;
|
||||
fs << MATCH_FILTER << commRunParams[datasetIdx].matchFilter;
|
||||
|
||||
defaultDescMatcher->write (fs);
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::setDefaultDatasetRunParams( int datasetIdx )
|
||||
{
|
||||
commRunParams[datasetIdx] = commRunParamsDefault;
|
||||
commRunParams[datasetIdx].keypontsFilename = "SURF_" + DATASET_NAMES[datasetIdx] + ".xml.gz";
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::writePlotData( int di ) const
|
||||
{
|
||||
stringstream filename;
|
||||
filename << getPlotPath() << algName << "_" << DATASET_NAMES[di] << ".csv";
|
||||
FILE *file = fopen (filename.str().c_str(), "w");
|
||||
size_t size = calcDatasetQuality[di].size();
|
||||
for (size_t i=0;i<size;i++)
|
||||
{
|
||||
fprintf( file, "%f, %f\n", 1 - calcDatasetQuality[di][i].precision, calcDatasetQuality[di][i].recall);
|
||||
}
|
||||
fclose( file );
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::readAlgorithm( )
|
||||
{
|
||||
defaultDescMatcher = GenericDescriptorMatcher::create( algName );
|
||||
specificDescMatcher = GenericDescriptorMatcher::create( algName );
|
||||
|
||||
if( !defaultDescMatcher )
|
||||
{
|
||||
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create( algName );
|
||||
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create( matcherName );
|
||||
defaultDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
|
||||
specificDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
|
||||
|
||||
if( !extractor || !matcher )
|
||||
{
|
||||
printf("Algorithm can not be read\n");
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::calculatePlotData( vector<vector<DMatch> > &allMatches, vector<vector<uchar> > &allCorrectMatchesMask, int di )
|
||||
{
|
||||
vector<Point2f> recallPrecisionCurve;
|
||||
computeRecallPrecisionCurve( allMatches, allCorrectMatchesMask, recallPrecisionCurve );
|
||||
|
||||
calcDatasetQuality[di].clear();
|
||||
const float resultPrecision = 0.5;
|
||||
bool isResultCalculated = false;
|
||||
const double eps = 1e-2;
|
||||
|
||||
Quality initQuality;
|
||||
initQuality.recall = 0;
|
||||
initQuality.precision = 0;
|
||||
calcDatasetQuality[di].push_back( initQuality );
|
||||
|
||||
for( size_t i=0;i<recallPrecisionCurve.size();i++ )
|
||||
{
|
||||
Quality quality;
|
||||
quality.recall = recallPrecisionCurve[i].y;
|
||||
quality.precision = 1 - recallPrecisionCurve[i].x;
|
||||
Quality back = calcDatasetQuality[di].back();
|
||||
|
||||
if( fabs( quality.recall - back.recall ) < eps && fabs( quality.precision - back.precision ) < eps )
|
||||
continue;
|
||||
|
||||
calcDatasetQuality[di].push_back( quality );
|
||||
|
||||
if( !isResultCalculated && quality.precision < resultPrecision )
|
||||
{
|
||||
for(int ci=0;ci<TEST_CASE_COUNT;ci++)
|
||||
{
|
||||
calcQuality[di][ci].recall = quality.recall;
|
||||
calcQuality[di][ci].precision = quality.precision;
|
||||
}
|
||||
isResultCalculated = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DescriptorQualityEvaluator::runDatasetTest (const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress)
|
||||
{
|
||||
FileStorage keypontsFS( data_path + KEYPOINTS_DIR + commRunParams[di].keypontsFilename, FileStorage::READ );
|
||||
if( !keypontsFS.isOpened())
|
||||
{
|
||||
calcQuality[di].clear();
|
||||
printf( "keypoints from file %s can not be read\n", commRunParams[di].keypontsFilename.c_str() );
|
||||
return;
|
||||
}
|
||||
|
||||
Ptr<GenericDescriptorMatcher> descMatch = commRunParams[di].isActiveParams ? specificDescMatcher : defaultDescMatcher;
|
||||
calcQuality[di].resize(TEST_CASE_COUNT);
|
||||
|
||||
vector<KeyPoint> keypoints1;
|
||||
readKeypoints( keypontsFS, keypoints1, 0);
|
||||
|
||||
int progressCount = DATASETS_COUNT*TEST_CASE_COUNT;
|
||||
|
||||
vector<vector<DMatch> > allMatches1to2;
|
||||
vector<vector<uchar> > allCorrectMatchesMask;
|
||||
for( int ci = 0; ci < TEST_CASE_COUNT; ci++ )
|
||||
{
|
||||
progress = update_progress( testName, progress, di*TEST_CASE_COUNT + ci + 1, progressCount, 0 );
|
||||
|
||||
vector<KeyPoint> keypoints2;
|
||||
if( commRunParams[di].projectKeypointsFrom1Image )
|
||||
{
|
||||
// TODO need to test function calcKeyPointProjections
|
||||
calcKeyPointProjections( keypoints1, Hs[ci], keypoints2 );
|
||||
filterKeyPointsByImageSize( keypoints2, imgs[ci+1].size() );
|
||||
}
|
||||
else
|
||||
readKeypoints( keypontsFS, keypoints2, ci+1 );
|
||||
// TODO if( commRunParams[di].matchFilter )
|
||||
|
||||
vector<vector<DMatch> > matches1to2;
|
||||
vector<vector<uchar> > correctMatchesMask;
|
||||
vector<Point2f> recallPrecisionCurve; // not used because we need recallPrecisionCurve for
|
||||
// all images in dataset
|
||||
evaluateGenericDescriptorMatcher( imgs[0], imgs[ci+1], Hs[ci], keypoints1, keypoints2,
|
||||
&matches1to2, &correctMatchesMask, recallPrecisionCurve,
|
||||
descMatch );
|
||||
allMatches1to2.insert( allMatches1to2.end(), matches1to2.begin(), matches1to2.end() );
|
||||
allCorrectMatchesMask.insert( allCorrectMatchesMask.end(), correctMatchesMask.begin(), correctMatchesMask.end() );
|
||||
}
|
||||
|
||||
calculatePlotData( allMatches1to2, allCorrectMatchesMask, di );
|
||||
}
|
||||
|
||||
//--------------------------------- Calonder descriptor test --------------------------------------------
|
||||
class CalonderDescriptorQualityEvaluator : public DescriptorQualityEvaluator
|
||||
{
|
||||
public:
|
||||
CalonderDescriptorQualityEvaluator() :
|
||||
DescriptorQualityEvaluator( "Calonder", "quality-descriptor-calonder") {}
|
||||
virtual void readAlgorithm( )
|
||||
{
|
||||
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
|
||||
Ptr<DescriptorExtractor> extractor = makePtr<CalonderDescriptorExtractor<float> >( classifierFile );
|
||||
defaultDescMatcher = makePtr<VectorDescriptorMatch>(
|
||||
extractor,
|
||||
makePtr<BFMatcher>(extractor->defaultNorm()));
|
||||
specificDescMatcher = defaultDescMatcher;
|
||||
}
|
||||
};
|
||||
|
||||
//--------------------------------- One Way descriptor test --------------------------------------------
|
||||
class OneWayDescriptorQualityTest : public DescriptorQualityEvaluator
|
||||
{
|
||||
public:
|
||||
OneWayDescriptorQualityTest() :
|
||||
DescriptorQualityEvaluator("ONEWAY", "quality-descriptor-one-way")
|
||||
{
|
||||
}
|
||||
protected:
|
||||
virtual void processRunParamsFile ();
|
||||
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const;
|
||||
};
|
||||
|
||||
void OneWayDescriptorQualityTest::processRunParamsFile ()
|
||||
{
|
||||
string filename = getRunParamsFilename();
|
||||
FileStorage fs = FileStorage (filename, FileStorage::READ);
|
||||
FileNode fn = fs.getFirstTopLevelNode();
|
||||
fn = fn[DEFAULT_PARAMS];
|
||||
|
||||
string pcaFilename = data_path + (string)fn["pcaFilename"];
|
||||
string trainPath = data_path + (string)fn["trainPath"];
|
||||
string trainImagesList = (string)fn["trainImagesList"];
|
||||
int patch_width = fn["patchWidth"];
|
||||
int patch_height = fn["patchHeight"];
|
||||
Size patchSize = cvSize (patch_width, patch_height);
|
||||
int poseCount = fn["poseCount"];
|
||||
|
||||
if (trainImagesList.length () == 0 )
|
||||
return;
|
||||
|
||||
fs.release ();
|
||||
|
||||
readAllDatasetsRunParams();
|
||||
|
||||
Ptr<OneWayDescriptorBase> base(
|
||||
new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
|
||||
trainPath, trainImagesList));
|
||||
|
||||
Ptr<OneWayDescriptorMatch> match = makePtr<OneWayDescriptorMatch>();
|
||||
match->initialize( OneWayDescriptorMatch::Params (), base );
|
||||
defaultDescMatcher = match;
|
||||
writeAllDatasetsRunParams();
|
||||
}
|
||||
|
||||
void OneWayDescriptorQualityTest::writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const
|
||||
{
|
||||
fs << IS_ACTIVE_PARAMS << commRunParams[datasetIdx].isActiveParams;
|
||||
fs << KEYPOINTS_FILENAME << commRunParams[datasetIdx].keypontsFilename;
|
||||
fs << PROJECT_KEYPOINTS_FROM_1IMAGE << commRunParams[datasetIdx].projectKeypointsFrom1Image;
|
||||
fs << MATCH_FILTER << commRunParams[datasetIdx].matchFilter;
|
||||
}
|
||||
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
if( argc != 2 )
|
||||
{
|
||||
cout << "Format: " << argv[0] << " testdata path (path to testdata/cv)" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
data_path = argv[1];
|
||||
#ifdef WIN32
|
||||
if( *data_path.rbegin() != '\\' )
|
||||
data_path = data_path + "\\";
|
||||
#else
|
||||
if( *data_path.rbegin() != '/' )
|
||||
data_path = data_path + "/";
|
||||
#endif
|
||||
|
||||
Ptr<BaseQualityEvaluator> evals[] =
|
||||
{
|
||||
makePtr<DetectorQualityEvaluator>( "FAST", "quality-detector-fast" ),
|
||||
makePtr<DetectorQualityEvaluator>( "GFTT", "quality-detector-gftt" ),
|
||||
makePtr<DetectorQualityEvaluator>( "HARRIS", "quality-detector-harris" ),
|
||||
makePtr<DetectorQualityEvaluator>( "MSER", "quality-detector-mser" ),
|
||||
makePtr<DetectorQualityEvaluator>( "STAR", "quality-detector-star" ),
|
||||
makePtr<DetectorQualityEvaluator>( "SIFT", "quality-detector-sift" ),
|
||||
makePtr<DetectorQualityEvaluator>( "SURF", "quality-detector-surf" ),
|
||||
|
||||
makePtr<DescriptorQualityEvaluator>( "SIFT", "quality-descriptor-sift", "BruteForce" ),
|
||||
makePtr<DescriptorQualityEvaluator>( "SURF", "quality-descriptor-surf", "BruteForce" ),
|
||||
makePtr<DescriptorQualityEvaluator>( "FERN", "quality-descriptor-fern"),
|
||||
makePtr<CalonderDescriptorQualityEvaluator>()
|
||||
};
|
||||
|
||||
for( size_t i = 0; i < sizeof(evals)/sizeof(evals[0]); i++ )
|
||||
{
|
||||
evals[i]->run();
|
||||
cout << endl;
|
||||
}
|
||||
}
|
@@ -1,346 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <limits>
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
/*
|
||||
The algorithm:
|
||||
|
||||
for each tested combination of detector+descriptor+matcher:
|
||||
|
||||
create detector, descriptor and matcher,
|
||||
load their params if they are there, otherwise use the default ones and save them
|
||||
|
||||
for each dataset:
|
||||
|
||||
load reference image
|
||||
detect keypoints in it, compute descriptors
|
||||
|
||||
for each transformed image:
|
||||
load the image
|
||||
load the transformation matrix
|
||||
detect keypoints in it too, compute descriptors
|
||||
|
||||
find matches
|
||||
transform keypoints from the first image using the ground-truth matrix
|
||||
|
||||
compute the number of matched keypoints, i.e. for each pair (i,j) found by a matcher compare
|
||||
j-th keypoint from the second image with the transformed i-th keypoint. If they are close, +1.
|
||||
|
||||
so, we have:
|
||||
N - number of keypoints in the first image that are also visible
|
||||
(after transformation) on the second image
|
||||
|
||||
N1 - number of keypoints in the first image that have been matched.
|
||||
|
||||
n - number of the correct matches found by the matcher
|
||||
|
||||
n/N1 - precision
|
||||
n/N - recall (?)
|
||||
|
||||
we store (N, n/N1, n/N) (where N is stored primarily for tuning the detector's thresholds,
|
||||
in order to semi-equalize their keypoints counts)
|
||||
|
||||
*/
|
||||
|
||||
typedef Vec3f TVec; // (N, n/N1, n/N) - see above
|
||||
|
||||
static void saveloadDDM( const string& params_filename,
|
||||
Ptr<FeatureDetector>& detector,
|
||||
Ptr<DescriptorExtractor>& descriptor,
|
||||
Ptr<DescriptorMatcher>& matcher )
|
||||
{
|
||||
FileStorage fs(params_filename, FileStorage::READ);
|
||||
if( fs.isOpened() )
|
||||
{
|
||||
detector->read(fs["detector"]);
|
||||
descriptor->read(fs["descriptor"]);
|
||||
matcher->read(fs["matcher"]);
|
||||
}
|
||||
else
|
||||
{
|
||||
fs.open(params_filename, FileStorage::WRITE);
|
||||
fs << "detector" << "{";
|
||||
detector->write(fs);
|
||||
fs << "}" << "descriptor" << "{";
|
||||
descriptor->write(fs);
|
||||
fs << "}" << "matcher" << "{";
|
||||
matcher->write(fs);
|
||||
fs << "}";
|
||||
}
|
||||
}
|
||||
|
||||
static Mat loadMat(const string& fsname)
|
||||
{
|
||||
FileStorage fs(fsname, FileStorage::READ);
|
||||
Mat m;
|
||||
fs.getFirstTopLevelNode() >> m;
|
||||
return m;
|
||||
}
|
||||
|
||||
static void transformKeypoints( const vector<KeyPoint>& kp,
|
||||
vector<vector<Point2f> >& contours,
|
||||
const Mat& H )
|
||||
{
|
||||
const float scale = 256.f;
|
||||
size_t i, n = kp.size();
|
||||
contours.resize(n);
|
||||
vector<Point> temp;
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
ellipse2Poly(Point2f(kp[i].pt.x*scale, kp[i].pt.y*scale),
|
||||
Size2f(kp[i].size*scale, kp[i].size*scale),
|
||||
0, 0, 360, 12, temp);
|
||||
Mat(temp).convertTo(contours[i], CV_32F, 1./scale);
|
||||
perspectiveTransform(contours[i], contours[i], H);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static TVec proccessMatches( Size imgsize,
|
||||
const vector<DMatch>& matches,
|
||||
const vector<vector<Point2f> >& kp1t_contours,
|
||||
const vector<vector<Point2f> >& kp_contours,
|
||||
double overlapThreshold )
|
||||
{
|
||||
const double visibilityThreshold = 0.6;
|
||||
|
||||
// 1. [preprocessing] find bounding rect for each element of kp1t_contours and kp_contours.
|
||||
// 2. [cross-check] for each DMatch (iK, i1)
|
||||
// update best_match[i1] using DMatch::distance.
|
||||
// 3. [compute overlapping] for each i1 (keypoint from the first image) do:
|
||||
// if i1-th keypoint is outside of image, skip it
|
||||
// increment N
|
||||
// if best_match[i1] is initialized, increment N1
|
||||
// if kp_contours[best_match[i1]] and kp1t_contours[i1] overlap by overlapThreshold*100%,
|
||||
// increment n. Use bounding rects to speedup this step
|
||||
|
||||
int i, size1 = (int)kp1t_contours.size(), size = (int)kp_contours.size(), msize = (int)matches.size();
|
||||
vector<DMatch> best_match(size1);
|
||||
vector<Rect> rects1(size1), rects(size);
|
||||
|
||||
// proprocess
|
||||
for( i = 0; i < size1; i++ )
|
||||
rects1[i] = boundingRect(kp1t_contours[i]);
|
||||
|
||||
for( i = 0; i < size; i++ )
|
||||
rects[i] = boundingRect(kp_contours[i]);
|
||||
|
||||
// cross-check
|
||||
for( i = 0; i < msize; i++ )
|
||||
{
|
||||
DMatch m = matches[i];
|
||||
int i1 = m.trainIdx, iK = m.queryIdx;
|
||||
CV_Assert( 0 <= i1 && i1 < size1 && 0 <= iK && iK < size );
|
||||
if( best_match[i1].trainIdx < 0 || best_match[i1].distance > m.distance )
|
||||
best_match[i1] = m;
|
||||
}
|
||||
|
||||
int N = 0, N1 = 0, n = 0;
|
||||
|
||||
// overlapping
|
||||
for( i = 0; i < size1; i++ )
|
||||
{
|
||||
int i1 = i, iK = best_match[i].queryIdx;
|
||||
if( iK >= 0 )
|
||||
N1++;
|
||||
|
||||
Rect r = rects1[i] & Rect(0, 0, imgsize.width, imgsize.height);
|
||||
if( r.area() < visibilityThreshold*rects1[i].area() )
|
||||
continue;
|
||||
N++;
|
||||
|
||||
if( iK < 0 || (rects1[i1] & rects[iK]).area() == 0 )
|
||||
continue;
|
||||
|
||||
double n_area = intersectConvexConvex(kp1t_contours[i1], kp_contours[iK], noArray(), true);
|
||||
if( n_area == 0 )
|
||||
continue;
|
||||
|
||||
double area1 = contourArea(kp1t_contours[i1], false);
|
||||
double area = contourArea(kp_contours[iK], false);
|
||||
|
||||
double ratio = n_area/(area1 + area - n_area);
|
||||
n += ratio >= overlapThreshold;
|
||||
}
|
||||
|
||||
return TVec((float)N, (float)n/std::max(N1, 1), (float)n/std::max(N, 1));
|
||||
}
|
||||
|
||||
|
||||
static void saveResults(const string& dir, const string& name, const string& dsname,
|
||||
const vector<TVec>& results, const int* xvals)
|
||||
{
|
||||
string fname1 = format("%s%s_%s_precision.csv", dir.c_str(), name.c_str(), dsname.c_str());
|
||||
string fname2 = format("%s%s_%s_recall.csv", dir.c_str(), name.c_str(), dsname.c_str());
|
||||
FILE* f1 = fopen(fname1.c_str(), "wt");
|
||||
FILE* f2 = fopen(fname2.c_str(), "wt");
|
||||
|
||||
for( size_t i = 0; i < results.size(); i++ )
|
||||
{
|
||||
fprintf(f1, "%d, %.1f\n", xvals[i], results[i][1]*100);
|
||||
fprintf(f2, "%d, %.1f\n", xvals[i], results[i][2]*100);
|
||||
}
|
||||
fclose(f1);
|
||||
fclose(f2);
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
static const char* ddms[] =
|
||||
{
|
||||
"ORBX_BF", "ORB", "ORB", "BruteForce-Hamming",
|
||||
//"ORB_BF", "ORB", "ORB", "BruteForce-Hamming",
|
||||
//"ORB3_BF", "ORB", "ORB", "BruteForce-Hamming(2)",
|
||||
//"ORB4_BF", "ORB", "ORB", "BruteForce-Hamming(2)",
|
||||
//"ORB_LSH", "ORB", "ORB", "LSH"
|
||||
//"SURF_BF", "SURF", "SURF", "BruteForce",
|
||||
0
|
||||
};
|
||||
|
||||
static const char* datasets[] =
|
||||
{
|
||||
"bark", "bikes", "boat", "graf", "leuven", "trees", "ubc", "wall", 0
|
||||
};
|
||||
|
||||
static const int imgXVals[] = { 2, 3, 4, 5, 6 }; // if scale, blur or light changes
|
||||
static const int viewpointXVals[] = { 20, 30, 40, 50, 60 }; // if viewpoint changes
|
||||
static const int jpegXVals[] = { 60, 80, 90, 95, 98 }; // if jpeg compression
|
||||
|
||||
const double overlapThreshold = 0.6;
|
||||
|
||||
vector<vector<vector<TVec> > > results; // indexed as results[ddm][dataset][testcase]
|
||||
|
||||
string dataset_dir = string(getenv("OPENCV_TEST_DATA_PATH")) +
|
||||
"/cv/detectors_descriptors_evaluation/images_datasets";
|
||||
|
||||
string dir=argc > 1 ? argv[1] : ".";
|
||||
|
||||
if( dir[dir.size()-1] != '\\' && dir[dir.size()-1] != '/' )
|
||||
dir += "/";
|
||||
|
||||
int result = system(("mkdir " + dir).c_str());
|
||||
CV_Assert(result == 0);
|
||||
|
||||
for( int i = 0; ddms[i*4] != 0; i++ )
|
||||
{
|
||||
const char* name = ddms[i*4];
|
||||
const char* detector_name = ddms[i*4+1];
|
||||
const char* descriptor_name = ddms[i*4+2];
|
||||
const char* matcher_name = ddms[i*4+3];
|
||||
string params_filename = dir + string(name) + "_params.yml";
|
||||
|
||||
cout << "Testing " << name << endl;
|
||||
|
||||
Ptr<FeatureDetector> detector = FeatureDetector::create(detector_name);
|
||||
Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create(descriptor_name);
|
||||
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(matcher_name);
|
||||
|
||||
saveloadDDM( params_filename, detector, descriptor, matcher );
|
||||
|
||||
results.push_back(vector<vector<TVec> >());
|
||||
|
||||
for( int j = 0; datasets[j] != 0; j++ )
|
||||
{
|
||||
const char* dsname = datasets[j];
|
||||
|
||||
cout << "\ton " << dsname << " ";
|
||||
cout.flush();
|
||||
|
||||
const int* xvals = strcmp(dsname, "ubc") == 0 ? jpegXVals :
|
||||
strcmp(dsname, "graf") == 0 || strcmp(dsname, "wall") == 0 ? viewpointXVals : imgXVals;
|
||||
|
||||
vector<KeyPoint> kp1, kp;
|
||||
vector<DMatch> matches;
|
||||
vector<vector<Point2f> > kp1t_contours, kp_contours;
|
||||
Mat desc1, desc;
|
||||
|
||||
Mat img1 = imread(format("%s/%s/img1.png", dataset_dir.c_str(), dsname), 0);
|
||||
CV_Assert( !img1.empty() );
|
||||
|
||||
detector->detect(img1, kp1);
|
||||
descriptor->compute(img1, kp1, desc1);
|
||||
|
||||
results[i].push_back(vector<TVec>());
|
||||
|
||||
for( int k = 2; ; k++ )
|
||||
{
|
||||
cout << ".";
|
||||
cout.flush();
|
||||
Mat imgK = imread(format("%s/%s/img%d.png", dataset_dir.c_str(), dsname, k), 0);
|
||||
if( imgK.empty() )
|
||||
break;
|
||||
|
||||
detector->detect(imgK, kp);
|
||||
descriptor->compute(imgK, kp, desc);
|
||||
matcher->match( desc, desc1, matches );
|
||||
|
||||
Mat H = loadMat(format("%s/%s/H1to%dp.xml", dataset_dir.c_str(), dsname, k));
|
||||
|
||||
transformKeypoints( kp1, kp1t_contours, H );
|
||||
transformKeypoints( kp, kp_contours, Mat::eye(3, 3, CV_64F));
|
||||
|
||||
TVec r = proccessMatches( imgK.size(), matches, kp1t_contours, kp_contours, overlapThreshold );
|
||||
results[i][j].push_back(r);
|
||||
}
|
||||
|
||||
saveResults(dir, name, dsname, results[i][j], xvals);
|
||||
cout << endl;
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
@@ -1,7 +1,8 @@
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/legacy.hpp"
|
||||
#include "opencv2/ml.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::ml;
|
||||
|
||||
int main( int /*argc*/, char** /*argv*/ )
|
||||
{
|
||||
@@ -19,8 +20,6 @@ int main( int /*argc*/, char** /*argv*/ )
|
||||
Mat labels;
|
||||
Mat img = Mat::zeros( Size( 500, 500 ), CV_8UC3 );
|
||||
Mat sample( 1, 2, CV_32FC1 );
|
||||
CvEM em_model;
|
||||
CvEMParams params;
|
||||
|
||||
samples = samples.reshape(2, 0);
|
||||
for( i = 0; i < N; i++ )
|
||||
@@ -35,37 +34,11 @@ int main( int /*argc*/, char** /*argv*/ )
|
||||
}
|
||||
samples = samples.reshape(1, 0);
|
||||
|
||||
// initialize model parameters
|
||||
params.covs = NULL;
|
||||
params.means = NULL;
|
||||
params.weights = NULL;
|
||||
params.probs = NULL;
|
||||
params.nclusters = N;
|
||||
params.cov_mat_type = CvEM::COV_MAT_SPHERICAL;
|
||||
params.start_step = CvEM::START_AUTO_STEP;
|
||||
params.term_crit.max_iter = 300;
|
||||
params.term_crit.epsilon = 0.1;
|
||||
params.term_crit.type = TermCriteria::COUNT|TermCriteria::EPS;
|
||||
|
||||
// cluster the data
|
||||
em_model.train( samples, Mat(), params, &labels );
|
||||
Ptr<EM> em_model = EM::train( samples, noArray(), labels, noArray(),
|
||||
EM::Params(N, EM::COV_MAT_SPHERICAL,
|
||||
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)));
|
||||
|
||||
#if 0
|
||||
// the piece of code shows how to repeatedly optimize the model
|
||||
// with less-constrained parameters
|
||||
//(COV_MAT_DIAGONAL instead of COV_MAT_SPHERICAL)
|
||||
// when the output of the first stage is used as input for the second one.
|
||||
CvEM em_model2;
|
||||
params.cov_mat_type = CvEM::COV_MAT_DIAGONAL;
|
||||
params.start_step = CvEM::START_E_STEP;
|
||||
params.means = em_model.get_means();
|
||||
params.covs = em_model.get_covs();
|
||||
params.weights = em_model.get_weights();
|
||||
|
||||
em_model2.train( samples, Mat(), params, &labels );
|
||||
// to use em_model2, replace em_model.predict()
|
||||
// with em_model2.predict() below
|
||||
#endif
|
||||
// classify every image pixel
|
||||
for( i = 0; i < img.rows; i++ )
|
||||
{
|
||||
@@ -73,7 +46,7 @@ int main( int /*argc*/, char** /*argv*/ )
|
||||
{
|
||||
sample.at<float>(0) = (float)j;
|
||||
sample.at<float>(1) = (float)i;
|
||||
int response = cvRound(em_model.predict( sample ));
|
||||
int response = cvRound(em_model->predict2( sample, noArray() )[1]);
|
||||
Scalar c = colors[response];
|
||||
|
||||
circle( img, Point(j, i), 1, c*0.75, FILLED );
|
||||
|
Before Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 38 KiB |
Before Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 36 KiB |
Before Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 30 KiB |
@@ -1,216 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
// This file originates from the openFABMAP project:
|
||||
// [http://code.google.com/p/openfabmap/]
|
||||
//
|
||||
// For published work which uses all or part of OpenFABMAP, please cite:
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
|
||||
//
|
||||
// Original Algorithm by Mark Cummins and Paul Newman:
|
||||
// [http://ijr.sagepub.com/content/27/6/647.short]
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
|
||||
// [http://ijr.sagepub.com/content/30/9/1100.abstract]
|
||||
//
|
||||
// License Agreement
|
||||
//
|
||||
// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
|
||||
// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
|
||||
//
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/nonfree.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char * argv[]) {
|
||||
|
||||
/*
|
||||
|
||||
Note: the vocabulary and training data is specifically made for this openCV
|
||||
example. It is not reccomended for use with other datasets as it is
|
||||
intentionally small to reduce baggage in the openCV project.
|
||||
|
||||
A new vocabulary can be generated using the supplied BOWMSCtrainer (or other
|
||||
clustering method such as K-means
|
||||
|
||||
New training data can be generated by extracting bag-of-words using the
|
||||
openCV BOWImgDescriptorExtractor class.
|
||||
|
||||
vocabulary, chow-liu tree, training data, and test data can all be saved and
|
||||
loaded using openCV's FileStorage class and it is not necessary to generate
|
||||
data each time as done in this example
|
||||
|
||||
*/
|
||||
|
||||
cout << "This sample program demonstrates the FAB-MAP image matching "
|
||||
"algorithm" << endl << endl;
|
||||
|
||||
string dataDir;
|
||||
if (argc == 1) {
|
||||
dataDir = "fabmap/";
|
||||
} else if (argc == 2) {
|
||||
dataDir = string(argv[1]);
|
||||
dataDir += "/";
|
||||
} else {
|
||||
//incorrect arguments
|
||||
cout << "Usage: fabmap_sample <sample data directory>" <<
|
||||
endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
FileStorage fs;
|
||||
|
||||
//load/generate vocab
|
||||
cout << "Loading Vocabulary: " <<
|
||||
dataDir + string("vocab_small.yml") << endl << endl;
|
||||
fs.open(dataDir + string("vocab_small.yml"), FileStorage::READ);
|
||||
Mat vocab;
|
||||
fs["Vocabulary"] >> vocab;
|
||||
if (vocab.empty()) {
|
||||
cerr << "Vocabulary not found" << endl;
|
||||
return -1;
|
||||
}
|
||||
fs.release();
|
||||
|
||||
//load/generate training data
|
||||
|
||||
cout << "Loading Training Data: " <<
|
||||
dataDir + string("train_data_small.yml") << endl << endl;
|
||||
fs.open(dataDir + string("train_data_small.yml"), FileStorage::READ);
|
||||
Mat trainData;
|
||||
fs["BOWImageDescs"] >> trainData;
|
||||
if (trainData.empty()) {
|
||||
cerr << "Training Data not found" << endl;
|
||||
return -1;
|
||||
}
|
||||
fs.release();
|
||||
|
||||
//create Chow-liu tree
|
||||
cout << "Making Chow-Liu Tree from training data" << endl <<
|
||||
endl;
|
||||
of2::ChowLiuTree treeBuilder;
|
||||
treeBuilder.add(trainData);
|
||||
Mat tree = treeBuilder.make();
|
||||
|
||||
//generate test data
|
||||
cout << "Extracting Test Data from images" << endl <<
|
||||
endl;
|
||||
Ptr<FeatureDetector> detector(
|
||||
new DynamicAdaptedFeatureDetector(
|
||||
AdjusterAdapter::create("STAR"), 130, 150, 5));
|
||||
Ptr<DescriptorExtractor> extractor(
|
||||
new SurfDescriptorExtractor(1000, 4, 2, false, true));
|
||||
Ptr<DescriptorMatcher> matcher =
|
||||
DescriptorMatcher::create("FlannBased");
|
||||
|
||||
BOWImgDescriptorExtractor bide(extractor, matcher);
|
||||
bide.setVocabulary(vocab);
|
||||
|
||||
vector<string> imageNames;
|
||||
imageNames.push_back(string("stlucia_test_small0000.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0001.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0002.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0003.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0004.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0005.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0006.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0007.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0008.jpeg"));
|
||||
imageNames.push_back(string("stlucia_test_small0009.jpeg"));
|
||||
|
||||
Mat testData;
|
||||
Mat frame;
|
||||
Mat bow;
|
||||
vector<KeyPoint> kpts;
|
||||
|
||||
for(size_t i = 0; i < imageNames.size(); i++) {
|
||||
cout << dataDir + imageNames[i] << endl;
|
||||
frame = imread(dataDir + imageNames[i]);
|
||||
if(frame.empty()) {
|
||||
cerr << "Test images not found" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
detector->detect(frame, kpts);
|
||||
|
||||
bide.compute(frame, kpts, bow);
|
||||
|
||||
testData.push_back(bow);
|
||||
|
||||
drawKeypoints(frame, kpts, frame);
|
||||
imshow(imageNames[i], frame);
|
||||
waitKey(10);
|
||||
}
|
||||
|
||||
//run fabmap
|
||||
cout << "Running FAB-MAP algorithm" << endl <<
|
||||
endl;
|
||||
Ptr<of2::FabMap> fabmap;
|
||||
|
||||
fabmap.reset(new of2::FabMap2(tree, 0.39, 0, of2::FabMap::SAMPLED |
|
||||
of2::FabMap::CHOW_LIU));
|
||||
fabmap->addTraining(trainData);
|
||||
|
||||
vector<of2::IMatch> matches;
|
||||
fabmap->compare(testData, matches, true);
|
||||
|
||||
//display output
|
||||
Mat result_small = Mat::zeros(10, 10, CV_8UC1);
|
||||
vector<of2::IMatch>::iterator l;
|
||||
|
||||
for(l = matches.begin(); l != matches.end(); l++) {
|
||||
if(l->imgIdx < 0) {
|
||||
result_small.at<char>(l->queryIdx, l->queryIdx) =
|
||||
(char)(l->match*255);
|
||||
|
||||
} else {
|
||||
result_small.at<char>(l->queryIdx, l->imgIdx) =
|
||||
(char)(l->match*255);
|
||||
}
|
||||
}
|
||||
|
||||
Mat result_large(100, 100, CV_8UC1);
|
||||
resize(result_small, result_large, Size(500, 500), 0, 0, INTER_NEAREST);
|
||||
|
||||
cout << endl << "Press any key to exit" << endl;
|
||||
imshow("Confusion Matrix", result_large);
|
||||
waitKey();
|
||||
|
||||
return 0;
|
||||
}
|
284
samples/cpp/facedetect.cpp
Normal file
@@ -0,0 +1,284 @@
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
|
||||
#include <cctype>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
cout << "\nThis program demonstrates the cascade recognizer. Now you can use Haar or LBP features.\n"
|
||||
"This classifier can recognize many kinds of rigid objects, once the appropriate classifier is trained.\n"
|
||||
"It's most known use is for faces.\n"
|
||||
"Usage:\n"
|
||||
"./facedetect [--cascade=<cascade_path> this is the primary trained classifier such as frontal face]\n"
|
||||
" [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n"
|
||||
" [--scale=<image scale greater or equal to 1, try 1.3 for example>]\n"
|
||||
" [--try-flip]\n"
|
||||
" [filename|camera_index]\n\n"
|
||||
"see facedetect.cmd for one call:\n"
|
||||
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3\n\n"
|
||||
"During execution:\n\tHit any key to quit.\n"
|
||||
"\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
|
||||
}
|
||||
|
||||
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
|
||||
CascadeClassifier& nestedCascade,
|
||||
double scale, bool tryflip );
|
||||
|
||||
string cascadeName = "../../data/haarcascades/haarcascade_frontalface_alt.xml";
|
||||
string nestedCascadeName = "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
{
|
||||
CvCapture* capture = 0;
|
||||
Mat frame, frameCopy, image;
|
||||
const string scaleOpt = "--scale=";
|
||||
size_t scaleOptLen = scaleOpt.length();
|
||||
const string cascadeOpt = "--cascade=";
|
||||
size_t cascadeOptLen = cascadeOpt.length();
|
||||
const string nestedCascadeOpt = "--nested-cascade";
|
||||
size_t nestedCascadeOptLen = nestedCascadeOpt.length();
|
||||
const string tryFlipOpt = "--try-flip";
|
||||
size_t tryFlipOptLen = tryFlipOpt.length();
|
||||
string inputName;
|
||||
bool tryflip = false;
|
||||
|
||||
help();
|
||||
|
||||
CascadeClassifier cascade, nestedCascade;
|
||||
double scale = 1;
|
||||
|
||||
for( int i = 1; i < argc; i++ )
|
||||
{
|
||||
cout << "Processing " << i << " " << argv[i] << endl;
|
||||
if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
|
||||
{
|
||||
cascadeName.assign( argv[i] + cascadeOptLen );
|
||||
cout << " from which we have cascadeName= " << cascadeName << endl;
|
||||
}
|
||||
else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
|
||||
{
|
||||
if( argv[i][nestedCascadeOpt.length()] == '=' )
|
||||
nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
|
||||
if( !nestedCascade.load( nestedCascadeName ) )
|
||||
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
|
||||
}
|
||||
else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
|
||||
{
|
||||
if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
|
||||
scale = 1;
|
||||
cout << " from which we read scale = " << scale << endl;
|
||||
}
|
||||
else if( tryFlipOpt.compare( 0, tryFlipOptLen, argv[i], tryFlipOptLen ) == 0 )
|
||||
{
|
||||
tryflip = true;
|
||||
cout << " will try to flip image horizontally to detect assymetric objects\n";
|
||||
}
|
||||
else if( argv[i][0] == '-' )
|
||||
{
|
||||
cerr << "WARNING: Unknown option %s" << argv[i] << endl;
|
||||
}
|
||||
else
|
||||
inputName.assign( argv[i] );
|
||||
}
|
||||
|
||||
if( !cascade.load( cascadeName ) )
|
||||
{
|
||||
cerr << "ERROR: Could not load classifier cascade" << endl;
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
|
||||
{
|
||||
capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
|
||||
int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
|
||||
if(!capture) cout << "Capture from CAM " << c << " didn't work" << endl;
|
||||
}
|
||||
else if( inputName.size() )
|
||||
{
|
||||
image = imread( inputName, 1 );
|
||||
if( image.empty() )
|
||||
{
|
||||
capture = cvCaptureFromAVI( inputName.c_str() );
|
||||
if(!capture) cout << "Capture from AVI didn't work" << endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
image = imread( "lena.jpg", 1 );
|
||||
if(image.empty()) cout << "Couldn't read lena.jpg" << endl;
|
||||
}
|
||||
|
||||
cvNamedWindow( "result", 1 );
|
||||
|
||||
if( capture )
|
||||
{
|
||||
cout << "In capture ..." << endl;
|
||||
for(;;)
|
||||
{
|
||||
IplImage* iplImg = cvQueryFrame( capture );
|
||||
frame = cv::cvarrToMat(iplImg);
|
||||
if( frame.empty() )
|
||||
break;
|
||||
if( iplImg->origin == IPL_ORIGIN_TL )
|
||||
frame.copyTo( frameCopy );
|
||||
else
|
||||
flip( frame, frameCopy, 0 );
|
||||
|
||||
detectAndDraw( frameCopy, cascade, nestedCascade, scale, tryflip );
|
||||
|
||||
if( waitKey( 10 ) >= 0 )
|
||||
goto _cleanup_;
|
||||
}
|
||||
|
||||
waitKey(0);
|
||||
|
||||
_cleanup_:
|
||||
cvReleaseCapture( &capture );
|
||||
}
|
||||
else
|
||||
{
|
||||
cout << "In image read" << endl;
|
||||
if( !image.empty() )
|
||||
{
|
||||
detectAndDraw( image, cascade, nestedCascade, scale, tryflip );
|
||||
waitKey(0);
|
||||
}
|
||||
else if( !inputName.empty() )
|
||||
{
|
||||
/* assume it is a text file containing the
|
||||
list of the image filenames to be processed - one per line */
|
||||
FILE* f = fopen( inputName.c_str(), "rt" );
|
||||
if( f )
|
||||
{
|
||||
char buf[1000+1];
|
||||
while( fgets( buf, 1000, f ) )
|
||||
{
|
||||
int len = (int)strlen(buf), c;
|
||||
while( len > 0 && isspace(buf[len-1]) )
|
||||
len--;
|
||||
buf[len] = '\0';
|
||||
cout << "file " << buf << endl;
|
||||
image = imread( buf, 1 );
|
||||
if( !image.empty() )
|
||||
{
|
||||
detectAndDraw( image, cascade, nestedCascade, scale, tryflip );
|
||||
c = waitKey(0);
|
||||
if( c == 27 || c == 'q' || c == 'Q' )
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
cerr << "Aw snap, couldn't read image " << buf << endl;
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cvDestroyWindow("result");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
|
||||
CascadeClassifier& nestedCascade,
|
||||
double scale, bool tryflip )
|
||||
{
|
||||
int i = 0;
|
||||
double t = 0;
|
||||
vector<Rect> faces, faces2;
|
||||
const static Scalar colors[] = { CV_RGB(0,0,255),
|
||||
CV_RGB(0,128,255),
|
||||
CV_RGB(0,255,255),
|
||||
CV_RGB(0,255,0),
|
||||
CV_RGB(255,128,0),
|
||||
CV_RGB(255,255,0),
|
||||
CV_RGB(255,0,0),
|
||||
CV_RGB(255,0,255)} ;
|
||||
Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
|
||||
|
||||
cvtColor( img, gray, COLOR_BGR2GRAY );
|
||||
resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
|
||||
equalizeHist( smallImg, smallImg );
|
||||
|
||||
t = (double)cvGetTickCount();
|
||||
cascade.detectMultiScale( smallImg, faces,
|
||||
1.1, 2, 0
|
||||
//|CASCADE_FIND_BIGGEST_OBJECT
|
||||
//|CASCADE_DO_ROUGH_SEARCH
|
||||
|CASCADE_SCALE_IMAGE
|
||||
,
|
||||
Size(30, 30) );
|
||||
if( tryflip )
|
||||
{
|
||||
flip(smallImg, smallImg, 1);
|
||||
cascade.detectMultiScale( smallImg, faces2,
|
||||
1.1, 2, 0
|
||||
//|CASCADE_FIND_BIGGEST_OBJECT
|
||||
//|CASCADE_DO_ROUGH_SEARCH
|
||||
|CASCADE_SCALE_IMAGE
|
||||
,
|
||||
Size(30, 30) );
|
||||
for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ )
|
||||
{
|
||||
faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
|
||||
}
|
||||
}
|
||||
t = (double)cvGetTickCount() - t;
|
||||
printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
|
||||
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
|
||||
{
|
||||
Mat smallImgROI;
|
||||
vector<Rect> nestedObjects;
|
||||
Point center;
|
||||
Scalar color = colors[i%8];
|
||||
int radius;
|
||||
|
||||
double aspect_ratio = (double)r->width/r->height;
|
||||
if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
|
||||
{
|
||||
center.x = cvRound((r->x + r->width*0.5)*scale);
|
||||
center.y = cvRound((r->y + r->height*0.5)*scale);
|
||||
radius = cvRound((r->width + r->height)*0.25*scale);
|
||||
circle( img, center, radius, color, 3, 8, 0 );
|
||||
}
|
||||
else
|
||||
rectangle( img, cvPoint(cvRound(r->x*scale), cvRound(r->y*scale)),
|
||||
cvPoint(cvRound((r->x + r->width-1)*scale), cvRound((r->y + r->height-1)*scale)),
|
||||
color, 3, 8, 0);
|
||||
if( nestedCascade.empty() )
|
||||
continue;
|
||||
smallImgROI = smallImg(*r);
|
||||
nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
|
||||
1.1, 2, 0
|
||||
//|CASCADE_FIND_BIGGEST_OBJECT
|
||||
//|CASCADE_DO_ROUGH_SEARCH
|
||||
//|CASCADE_DO_CANNY_PRUNING
|
||||
|CASCADE_SCALE_IMAGE
|
||||
,
|
||||
Size(30, 30) );
|
||||
for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
|
||||
{
|
||||
center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
|
||||
center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
|
||||
radius = cvRound((nr->width + nr->height)*0.25*scale);
|
||||
circle( img, center, radius, color, 3, 8, 0 );
|
||||
}
|
||||
}
|
||||
cv::imshow( "result", img );
|
||||
}
|
@@ -1,400 +0,0 @@
|
||||
/path/to/at/s13/2.pgm;12
|
||||
/path/to/at/s13/7.pgm;12
|
||||
/path/to/at/s13/6.pgm;12
|
||||
/path/to/at/s13/9.pgm;12
|
||||
/path/to/at/s13/5.pgm;12
|
||||
/path/to/at/s13/3.pgm;12
|
||||
/path/to/at/s13/4.pgm;12
|
||||
/path/to/at/s13/10.pgm;12
|
||||
/path/to/at/s13/8.pgm;12
|
||||
/path/to/at/s13/1.pgm;12
|
||||
/path/to/at/s17/2.pgm;16
|
||||
/path/to/at/s17/7.pgm;16
|
||||
/path/to/at/s17/6.pgm;16
|
||||
/path/to/at/s17/9.pgm;16
|
||||
/path/to/at/s17/5.pgm;16
|
||||
/path/to/at/s17/3.pgm;16
|
||||
/path/to/at/s17/4.pgm;16
|
||||
/path/to/at/s17/10.pgm;16
|
||||
/path/to/at/s17/8.pgm;16
|
||||
/path/to/at/s17/1.pgm;16
|
||||
/path/to/at/s32/2.pgm;31
|
||||
/path/to/at/s32/7.pgm;31
|
||||
/path/to/at/s32/6.pgm;31
|
||||
/path/to/at/s32/9.pgm;31
|
||||
/path/to/at/s32/5.pgm;31
|
||||
/path/to/at/s32/3.pgm;31
|
||||
/path/to/at/s32/4.pgm;31
|
||||
/path/to/at/s32/10.pgm;31
|
||||
/path/to/at/s32/8.pgm;31
|
||||
/path/to/at/s32/1.pgm;31
|
||||
/path/to/at/s10/2.pgm;9
|
||||
/path/to/at/s10/7.pgm;9
|
||||
/path/to/at/s10/6.pgm;9
|
||||
/path/to/at/s10/9.pgm;9
|
||||
/path/to/at/s10/5.pgm;9
|
||||
/path/to/at/s10/3.pgm;9
|
||||
/path/to/at/s10/4.pgm;9
|
||||
/path/to/at/s10/10.pgm;9
|
||||
/path/to/at/s10/8.pgm;9
|
||||
/path/to/at/s10/1.pgm;9
|
||||
/path/to/at/s27/2.pgm;26
|
||||
/path/to/at/s27/7.pgm;26
|
||||
/path/to/at/s27/6.pgm;26
|
||||
/path/to/at/s27/9.pgm;26
|
||||
/path/to/at/s27/5.pgm;26
|
||||
/path/to/at/s27/3.pgm;26
|
||||
/path/to/at/s27/4.pgm;26
|
||||
/path/to/at/s27/10.pgm;26
|
||||
/path/to/at/s27/8.pgm;26
|
||||
/path/to/at/s27/1.pgm;26
|
||||
/path/to/at/s5/2.pgm;4
|
||||
/path/to/at/s5/7.pgm;4
|
||||
/path/to/at/s5/6.pgm;4
|
||||
/path/to/at/s5/9.pgm;4
|
||||
/path/to/at/s5/5.pgm;4
|
||||
/path/to/at/s5/3.pgm;4
|
||||
/path/to/at/s5/4.pgm;4
|
||||
/path/to/at/s5/10.pgm;4
|
||||
/path/to/at/s5/8.pgm;4
|
||||
/path/to/at/s5/1.pgm;4
|
||||
/path/to/at/s20/2.pgm;19
|
||||
/path/to/at/s20/7.pgm;19
|
||||
/path/to/at/s20/6.pgm;19
|
||||
/path/to/at/s20/9.pgm;19
|
||||
/path/to/at/s20/5.pgm;19
|
||||
/path/to/at/s20/3.pgm;19
|
||||
/path/to/at/s20/4.pgm;19
|
||||
/path/to/at/s20/10.pgm;19
|
||||
/path/to/at/s20/8.pgm;19
|
||||
/path/to/at/s20/1.pgm;19
|
||||
/path/to/at/s30/2.pgm;29
|
||||
/path/to/at/s30/7.pgm;29
|
||||
/path/to/at/s30/6.pgm;29
|
||||
/path/to/at/s30/9.pgm;29
|
||||
/path/to/at/s30/5.pgm;29
|
||||
/path/to/at/s30/3.pgm;29
|
||||
/path/to/at/s30/4.pgm;29
|
||||
/path/to/at/s30/10.pgm;29
|
||||
/path/to/at/s30/8.pgm;29
|
||||
/path/to/at/s30/1.pgm;29
|
||||
/path/to/at/s39/2.pgm;38
|
||||
/path/to/at/s39/7.pgm;38
|
||||
/path/to/at/s39/6.pgm;38
|
||||
/path/to/at/s39/9.pgm;38
|
||||
/path/to/at/s39/5.pgm;38
|
||||
/path/to/at/s39/3.pgm;38
|
||||
/path/to/at/s39/4.pgm;38
|
||||
/path/to/at/s39/10.pgm;38
|
||||
/path/to/at/s39/8.pgm;38
|
||||
/path/to/at/s39/1.pgm;38
|
||||
/path/to/at/s35/2.pgm;34
|
||||
/path/to/at/s35/7.pgm;34
|
||||
/path/to/at/s35/6.pgm;34
|
||||
/path/to/at/s35/9.pgm;34
|
||||
/path/to/at/s35/5.pgm;34
|
||||
/path/to/at/s35/3.pgm;34
|
||||
/path/to/at/s35/4.pgm;34
|
||||
/path/to/at/s35/10.pgm;34
|
||||
/path/to/at/s35/8.pgm;34
|
||||
/path/to/at/s35/1.pgm;34
|
||||
/path/to/at/s23/2.pgm;22
|
||||
/path/to/at/s23/7.pgm;22
|
||||
/path/to/at/s23/6.pgm;22
|
||||
/path/to/at/s23/9.pgm;22
|
||||
/path/to/at/s23/5.pgm;22
|
||||
/path/to/at/s23/3.pgm;22
|
||||
/path/to/at/s23/4.pgm;22
|
||||
/path/to/at/s23/10.pgm;22
|
||||
/path/to/at/s23/8.pgm;22
|
||||
/path/to/at/s23/1.pgm;22
|
||||
/path/to/at/s4/2.pgm;3
|
||||
/path/to/at/s4/7.pgm;3
|
||||
/path/to/at/s4/6.pgm;3
|
||||
/path/to/at/s4/9.pgm;3
|
||||
/path/to/at/s4/5.pgm;3
|
||||
/path/to/at/s4/3.pgm;3
|
||||
/path/to/at/s4/4.pgm;3
|
||||
/path/to/at/s4/10.pgm;3
|
||||
/path/to/at/s4/8.pgm;3
|
||||
/path/to/at/s4/1.pgm;3
|
||||
/path/to/at/s9/2.pgm;8
|
||||
/path/to/at/s9/7.pgm;8
|
||||
/path/to/at/s9/6.pgm;8
|
||||
/path/to/at/s9/9.pgm;8
|
||||
/path/to/at/s9/5.pgm;8
|
||||
/path/to/at/s9/3.pgm;8
|
||||
/path/to/at/s9/4.pgm;8
|
||||
/path/to/at/s9/10.pgm;8
|
||||
/path/to/at/s9/8.pgm;8
|
||||
/path/to/at/s9/1.pgm;8
|
||||
/path/to/at/s37/2.pgm;36
|
||||
/path/to/at/s37/7.pgm;36
|
||||
/path/to/at/s37/6.pgm;36
|
||||
/path/to/at/s37/9.pgm;36
|
||||
/path/to/at/s37/5.pgm;36
|
||||
/path/to/at/s37/3.pgm;36
|
||||
/path/to/at/s37/4.pgm;36
|
||||
/path/to/at/s37/10.pgm;36
|
||||
/path/to/at/s37/8.pgm;36
|
||||
/path/to/at/s37/1.pgm;36
|
||||
/path/to/at/s24/2.pgm;23
|
||||
/path/to/at/s24/7.pgm;23
|
||||
/path/to/at/s24/6.pgm;23
|
||||
/path/to/at/s24/9.pgm;23
|
||||
/path/to/at/s24/5.pgm;23
|
||||
/path/to/at/s24/3.pgm;23
|
||||
/path/to/at/s24/4.pgm;23
|
||||
/path/to/at/s24/10.pgm;23
|
||||
/path/to/at/s24/8.pgm;23
|
||||
/path/to/at/s24/1.pgm;23
|
||||
/path/to/at/s19/2.pgm;18
|
||||
/path/to/at/s19/7.pgm;18
|
||||
/path/to/at/s19/6.pgm;18
|
||||
/path/to/at/s19/9.pgm;18
|
||||
/path/to/at/s19/5.pgm;18
|
||||
/path/to/at/s19/3.pgm;18
|
||||
/path/to/at/s19/4.pgm;18
|
||||
/path/to/at/s19/10.pgm;18
|
||||
/path/to/at/s19/8.pgm;18
|
||||
/path/to/at/s19/1.pgm;18
|
||||
/path/to/at/s8/2.pgm;7
|
||||
/path/to/at/s8/7.pgm;7
|
||||
/path/to/at/s8/6.pgm;7
|
||||
/path/to/at/s8/9.pgm;7
|
||||
/path/to/at/s8/5.pgm;7
|
||||
/path/to/at/s8/3.pgm;7
|
||||
/path/to/at/s8/4.pgm;7
|
||||
/path/to/at/s8/10.pgm;7
|
||||
/path/to/at/s8/8.pgm;7
|
||||
/path/to/at/s8/1.pgm;7
|
||||
/path/to/at/s21/2.pgm;20
|
||||
/path/to/at/s21/7.pgm;20
|
||||
/path/to/at/s21/6.pgm;20
|
||||
/path/to/at/s21/9.pgm;20
|
||||
/path/to/at/s21/5.pgm;20
|
||||
/path/to/at/s21/3.pgm;20
|
||||
/path/to/at/s21/4.pgm;20
|
||||
/path/to/at/s21/10.pgm;20
|
||||
/path/to/at/s21/8.pgm;20
|
||||
/path/to/at/s21/1.pgm;20
|
||||
/path/to/at/s1/2.pgm;0
|
||||
/path/to/at/s1/7.pgm;0
|
||||
/path/to/at/s1/6.pgm;0
|
||||
/path/to/at/s1/9.pgm;0
|
||||
/path/to/at/s1/5.pgm;0
|
||||
/path/to/at/s1/3.pgm;0
|
||||
/path/to/at/s1/4.pgm;0
|
||||
/path/to/at/s1/10.pgm;0
|
||||
/path/to/at/s1/8.pgm;0
|
||||
/path/to/at/s1/1.pgm;0
|
||||
/path/to/at/s7/2.pgm;6
|
||||
/path/to/at/s7/7.pgm;6
|
||||
/path/to/at/s7/6.pgm;6
|
||||
/path/to/at/s7/9.pgm;6
|
||||
/path/to/at/s7/5.pgm;6
|
||||
/path/to/at/s7/3.pgm;6
|
||||
/path/to/at/s7/4.pgm;6
|
||||
/path/to/at/s7/10.pgm;6
|
||||
/path/to/at/s7/8.pgm;6
|
||||
/path/to/at/s7/1.pgm;6
|
||||
/path/to/at/s16/2.pgm;15
|
||||
/path/to/at/s16/7.pgm;15
|
||||
/path/to/at/s16/6.pgm;15
|
||||
/path/to/at/s16/9.pgm;15
|
||||
/path/to/at/s16/5.pgm;15
|
||||
/path/to/at/s16/3.pgm;15
|
||||
/path/to/at/s16/4.pgm;15
|
||||
/path/to/at/s16/10.pgm;15
|
||||
/path/to/at/s16/8.pgm;15
|
||||
/path/to/at/s16/1.pgm;15
|
||||
/path/to/at/s36/2.pgm;35
|
||||
/path/to/at/s36/7.pgm;35
|
||||
/path/to/at/s36/6.pgm;35
|
||||
/path/to/at/s36/9.pgm;35
|
||||
/path/to/at/s36/5.pgm;35
|
||||
/path/to/at/s36/3.pgm;35
|
||||
/path/to/at/s36/4.pgm;35
|
||||
/path/to/at/s36/10.pgm;35
|
||||
/path/to/at/s36/8.pgm;35
|
||||
/path/to/at/s36/1.pgm;35
|
||||
/path/to/at/s25/2.pgm;24
|
||||
/path/to/at/s25/7.pgm;24
|
||||
/path/to/at/s25/6.pgm;24
|
||||
/path/to/at/s25/9.pgm;24
|
||||
/path/to/at/s25/5.pgm;24
|
||||
/path/to/at/s25/3.pgm;24
|
||||
/path/to/at/s25/4.pgm;24
|
||||
/path/to/at/s25/10.pgm;24
|
||||
/path/to/at/s25/8.pgm;24
|
||||
/path/to/at/s25/1.pgm;24
|
||||
/path/to/at/s14/2.pgm;13
|
||||
/path/to/at/s14/7.pgm;13
|
||||
/path/to/at/s14/6.pgm;13
|
||||
/path/to/at/s14/9.pgm;13
|
||||
/path/to/at/s14/5.pgm;13
|
||||
/path/to/at/s14/3.pgm;13
|
||||
/path/to/at/s14/4.pgm;13
|
||||
/path/to/at/s14/10.pgm;13
|
||||
/path/to/at/s14/8.pgm;13
|
||||
/path/to/at/s14/1.pgm;13
|
||||
/path/to/at/s34/2.pgm;33
|
||||
/path/to/at/s34/7.pgm;33
|
||||
/path/to/at/s34/6.pgm;33
|
||||
/path/to/at/s34/9.pgm;33
|
||||
/path/to/at/s34/5.pgm;33
|
||||
/path/to/at/s34/3.pgm;33
|
||||
/path/to/at/s34/4.pgm;33
|
||||
/path/to/at/s34/10.pgm;33
|
||||
/path/to/at/s34/8.pgm;33
|
||||
/path/to/at/s34/1.pgm;33
|
||||
/path/to/at/s11/2.pgm;10
|
||||
/path/to/at/s11/7.pgm;10
|
||||
/path/to/at/s11/6.pgm;10
|
||||
/path/to/at/s11/9.pgm;10
|
||||
/path/to/at/s11/5.pgm;10
|
||||
/path/to/at/s11/3.pgm;10
|
||||
/path/to/at/s11/4.pgm;10
|
||||
/path/to/at/s11/10.pgm;10
|
||||
/path/to/at/s11/8.pgm;10
|
||||
/path/to/at/s11/1.pgm;10
|
||||
/path/to/at/s26/2.pgm;25
|
||||
/path/to/at/s26/7.pgm;25
|
||||
/path/to/at/s26/6.pgm;25
|
||||
/path/to/at/s26/9.pgm;25
|
||||
/path/to/at/s26/5.pgm;25
|
||||
/path/to/at/s26/3.pgm;25
|
||||
/path/to/at/s26/4.pgm;25
|
||||
/path/to/at/s26/10.pgm;25
|
||||
/path/to/at/s26/8.pgm;25
|
||||
/path/to/at/s26/1.pgm;25
|
||||
/path/to/at/s18/2.pgm;17
|
||||
/path/to/at/s18/7.pgm;17
|
||||
/path/to/at/s18/6.pgm;17
|
||||
/path/to/at/s18/9.pgm;17
|
||||
/path/to/at/s18/5.pgm;17
|
||||
/path/to/at/s18/3.pgm;17
|
||||
/path/to/at/s18/4.pgm;17
|
||||
/path/to/at/s18/10.pgm;17
|
||||
/path/to/at/s18/8.pgm;17
|
||||
/path/to/at/s18/1.pgm;17
|
||||
/path/to/at/s29/2.pgm;28
|
||||
/path/to/at/s29/7.pgm;28
|
||||
/path/to/at/s29/6.pgm;28
|
||||
/path/to/at/s29/9.pgm;28
|
||||
/path/to/at/s29/5.pgm;28
|
||||
/path/to/at/s29/3.pgm;28
|
||||
/path/to/at/s29/4.pgm;28
|
||||
/path/to/at/s29/10.pgm;28
|
||||
/path/to/at/s29/8.pgm;28
|
||||
/path/to/at/s29/1.pgm;28
|
||||
/path/to/at/s33/2.pgm;32
|
||||
/path/to/at/s33/7.pgm;32
|
||||
/path/to/at/s33/6.pgm;32
|
||||
/path/to/at/s33/9.pgm;32
|
||||
/path/to/at/s33/5.pgm;32
|
||||
/path/to/at/s33/3.pgm;32
|
||||
/path/to/at/s33/4.pgm;32
|
||||
/path/to/at/s33/10.pgm;32
|
||||
/path/to/at/s33/8.pgm;32
|
||||
/path/to/at/s33/1.pgm;32
|
||||
/path/to/at/s12/2.pgm;11
|
||||
/path/to/at/s12/7.pgm;11
|
||||
/path/to/at/s12/6.pgm;11
|
||||
/path/to/at/s12/9.pgm;11
|
||||
/path/to/at/s12/5.pgm;11
|
||||
/path/to/at/s12/3.pgm;11
|
||||
/path/to/at/s12/4.pgm;11
|
||||
/path/to/at/s12/10.pgm;11
|
||||
/path/to/at/s12/8.pgm;11
|
||||
/path/to/at/s12/1.pgm;11
|
||||
/path/to/at/s6/2.pgm;5
|
||||
/path/to/at/s6/7.pgm;5
|
||||
/path/to/at/s6/6.pgm;5
|
||||
/path/to/at/s6/9.pgm;5
|
||||
/path/to/at/s6/5.pgm;5
|
||||
/path/to/at/s6/3.pgm;5
|
||||
/path/to/at/s6/4.pgm;5
|
||||
/path/to/at/s6/10.pgm;5
|
||||
/path/to/at/s6/8.pgm;5
|
||||
/path/to/at/s6/1.pgm;5
|
||||
/path/to/at/s22/2.pgm;21
|
||||
/path/to/at/s22/7.pgm;21
|
||||
/path/to/at/s22/6.pgm;21
|
||||
/path/to/at/s22/9.pgm;21
|
||||
/path/to/at/s22/5.pgm;21
|
||||
/path/to/at/s22/3.pgm;21
|
||||
/path/to/at/s22/4.pgm;21
|
||||
/path/to/at/s22/10.pgm;21
|
||||
/path/to/at/s22/8.pgm;21
|
||||
/path/to/at/s22/1.pgm;21
|
||||
/path/to/at/s15/2.pgm;14
|
||||
/path/to/at/s15/7.pgm;14
|
||||
/path/to/at/s15/6.pgm;14
|
||||
/path/to/at/s15/9.pgm;14
|
||||
/path/to/at/s15/5.pgm;14
|
||||
/path/to/at/s15/3.pgm;14
|
||||
/path/to/at/s15/4.pgm;14
|
||||
/path/to/at/s15/10.pgm;14
|
||||
/path/to/at/s15/8.pgm;14
|
||||
/path/to/at/s15/1.pgm;14
|
||||
/path/to/at/s2/2.pgm;1
|
||||
/path/to/at/s2/7.pgm;1
|
||||
/path/to/at/s2/6.pgm;1
|
||||
/path/to/at/s2/9.pgm;1
|
||||
/path/to/at/s2/5.pgm;1
|
||||
/path/to/at/s2/3.pgm;1
|
||||
/path/to/at/s2/4.pgm;1
|
||||
/path/to/at/s2/10.pgm;1
|
||||
/path/to/at/s2/8.pgm;1
|
||||
/path/to/at/s2/1.pgm;1
|
||||
/path/to/at/s31/2.pgm;30
|
||||
/path/to/at/s31/7.pgm;30
|
||||
/path/to/at/s31/6.pgm;30
|
||||
/path/to/at/s31/9.pgm;30
|
||||
/path/to/at/s31/5.pgm;30
|
||||
/path/to/at/s31/3.pgm;30
|
||||
/path/to/at/s31/4.pgm;30
|
||||
/path/to/at/s31/10.pgm;30
|
||||
/path/to/at/s31/8.pgm;30
|
||||
/path/to/at/s31/1.pgm;30
|
||||
/path/to/at/s28/2.pgm;27
|
||||
/path/to/at/s28/7.pgm;27
|
||||
/path/to/at/s28/6.pgm;27
|
||||
/path/to/at/s28/9.pgm;27
|
||||
/path/to/at/s28/5.pgm;27
|
||||
/path/to/at/s28/3.pgm;27
|
||||
/path/to/at/s28/4.pgm;27
|
||||
/path/to/at/s28/10.pgm;27
|
||||
/path/to/at/s28/8.pgm;27
|
||||
/path/to/at/s28/1.pgm;27
|
||||
/path/to/at/s40/2.pgm;39
|
||||
/path/to/at/s40/7.pgm;39
|
||||
/path/to/at/s40/6.pgm;39
|
||||
/path/to/at/s40/9.pgm;39
|
||||
/path/to/at/s40/5.pgm;39
|
||||
/path/to/at/s40/3.pgm;39
|
||||
/path/to/at/s40/4.pgm;39
|
||||
/path/to/at/s40/10.pgm;39
|
||||
/path/to/at/s40/8.pgm;39
|
||||
/path/to/at/s40/1.pgm;39
|
||||
/path/to/at/s3/2.pgm;2
|
||||
/path/to/at/s3/7.pgm;2
|
||||
/path/to/at/s3/6.pgm;2
|
||||
/path/to/at/s3/9.pgm;2
|
||||
/path/to/at/s3/5.pgm;2
|
||||
/path/to/at/s3/3.pgm;2
|
||||
/path/to/at/s3/4.pgm;2
|
||||
/path/to/at/s3/10.pgm;2
|
||||
/path/to/at/s3/8.pgm;2
|
||||
/path/to/at/s3/1.pgm;2
|
||||
/path/to/at/s38/2.pgm;37
|
||||
/path/to/at/s38/7.pgm;37
|
||||
/path/to/at/s38/6.pgm;37
|
||||
/path/to/at/s38/9.pgm;37
|
||||
/path/to/at/s38/5.pgm;37
|
||||
/path/to/at/s38/3.pgm;37
|
||||
/path/to/at/s38/4.pgm;37
|
||||
/path/to/at/s38/10.pgm;37
|
||||
/path/to/at/s38/8.pgm;37
|
||||
/path/to/at/s38/1.pgm;37
|
@@ -1,163 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static Mat toGrayscale(InputArray _src) {
|
||||
Mat src = _src.getMat();
|
||||
// only allow one channel
|
||||
if(src.channels() != 1) {
|
||||
CV_Error(Error::StsBadArg, "Only Matrices with one channel are supported");
|
||||
}
|
||||
// create and return normalized image
|
||||
Mat dst;
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
|
||||
return dst;
|
||||
}
|
||||
|
||||
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
stringstream liness(line);
|
||||
getline(liness, path, separator);
|
||||
getline(liness, classlabel);
|
||||
if(!path.empty() && !classlabel.empty()) {
|
||||
images.push_back(imread(path, 0));
|
||||
labels.push_back(atoi(classlabel.c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Check for valid command line arguments, print usage
|
||||
// if no arguments were given.
|
||||
if (argc != 2) {
|
||||
cout << "usage: " << argv[0] << " <csv.ext>" << endl;
|
||||
exit(1);
|
||||
}
|
||||
// Get the path to your CSV.
|
||||
string fn_csv = string(argv[1]);
|
||||
// These vectors hold the images and corresponding labels.
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// Read in the data. This can fail if no valid
|
||||
// input filename is given.
|
||||
try {
|
||||
read_csv(fn_csv, images, labels);
|
||||
} catch (cv::Exception& e) {
|
||||
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
|
||||
// nothing more we can do
|
||||
exit(1);
|
||||
}
|
||||
// Quit if there are not enough images for this demo.
|
||||
if(images.size() <= 1) {
|
||||
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
|
||||
CV_Error(Error::StsError, error_message);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
// size:
|
||||
int height = images[0].rows;
|
||||
// The following lines simply get the last images from
|
||||
// your dataset and remove it from the vector. This is
|
||||
// done, so that the training data (which we learn the
|
||||
// cv::FaceRecognizer on) and the test data we test
|
||||
// the model with, do not overlap.
|
||||
Mat testSample = images[images.size() - 1];
|
||||
int testLabel = labels[labels.size() - 1];
|
||||
images.pop_back();
|
||||
labels.pop_back();
|
||||
// The following lines create an Eigenfaces model for
|
||||
// face recognition and train it with the images and
|
||||
// labels read from the given CSV file.
|
||||
// This here is a full PCA, if you just want to keep
|
||||
// 10 principal components (read Eigenfaces), then call
|
||||
// the factory method like this:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10);
|
||||
//
|
||||
// If you want to create a FaceRecognizer with a
|
||||
// confidennce threshold, call it with:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10, 123.0);
|
||||
//
|
||||
Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
|
||||
model->train(images, labels);
|
||||
// The following line predicts the label of a given
|
||||
// test image:
|
||||
int predictedLabel = model->predict(testSample);
|
||||
//
|
||||
// To get the confidence of a prediction call the model with:
|
||||
//
|
||||
// int predictedLabel = -1;
|
||||
// double confidence = 0.0;
|
||||
// model->predict(testSample, predictedLabel, confidence);
|
||||
//
|
||||
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
|
||||
cout << result_message << endl;
|
||||
// Sometimes you'll need to get/set internal model data,
|
||||
// which isn't exposed by the public cv::FaceRecognizer.
|
||||
// Since each cv::FaceRecognizer is derived from a
|
||||
// cv::Algorithm, you can query the data.
|
||||
//
|
||||
// First we'll use it to set the threshold of the FaceRecognizer
|
||||
// to 0.0 without retraining the model. This can be useful if
|
||||
// you are evaluating the model:
|
||||
//
|
||||
model->set("threshold", 0.0);
|
||||
// Now the threshold of this model is set to 0.0. A prediction
|
||||
// now returns -1, as it's impossible to have a distance below
|
||||
// it
|
||||
predictedLabel = model->predict(testSample);
|
||||
cout << "Predicted class = " << predictedLabel << endl;
|
||||
// Here is how to get the eigenvalues of this Eigenfaces model:
|
||||
Mat eigenvalues = model->getMat("eigenvalues");
|
||||
// And we can do the same to display the Eigenvectors (read Eigenfaces):
|
||||
Mat W = model->getMat("eigenvectors");
|
||||
// From this we will display the (at most) first 10 Eigenfaces:
|
||||
for (int i = 0; i < min(10, W.cols); i++) {
|
||||
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
|
||||
cout << msg << endl;
|
||||
// get eigenvector #i
|
||||
Mat ev = W.col(i).clone();
|
||||
// Reshape to original size & normalize to [0...255] for imshow.
|
||||
Mat grayscale = toGrayscale(ev.reshape(1, height));
|
||||
// Show the image & apply a Jet colormap for better sensing.
|
||||
Mat cgrayscale;
|
||||
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
|
||||
imshow(format("%d", i), cgrayscale);
|
||||
}
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -1,5 +1,6 @@
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
@@ -1,10 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<opencv_storage>
|
||||
<nclasses>0</nclasses>
|
||||
<patchSize>31</patchSize>
|
||||
<signatureSize>INT_MAX</signatureSize>
|
||||
<nstructs>50</nstructs>
|
||||
<structSize>9</structSize>
|
||||
<nviews>1000</nviews>
|
||||
<compressionMethod>0</compressionMethod>
|
||||
</opencv_storage>
|
@@ -1,4 +1,6 @@
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
@@ -15,6 +15,7 @@
|
||||
*
|
||||
********************************************************************************/
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include <iostream>
|
||||
using namespace cv;
|
||||
|
@@ -1,128 +0,0 @@
|
||||
// demo.cpp
|
||||
//
|
||||
// Here is an example on how to use the descriptor presented in the following paper:
|
||||
// A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012.
|
||||
// CVPR 2012 Open Source Award winner
|
||||
//
|
||||
// Copyright (C) 2011-2012 Signal processing laboratory 2, EPFL,
|
||||
// Kirell Benzi (kirell.benzi@epfl.ch),
|
||||
// Raphael Ortiz (raphael.ortiz@a3.epfl.ch),
|
||||
// Alexandre Alahi (alexandre.alahi@epfl.ch)
|
||||
// and Pierre Vandergheynst (pierre.vandergheynst@epfl.ch)
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/features2d.hpp>
|
||||
#include <opencv2/nonfree.hpp>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
static void help( char** argv )
|
||||
{
|
||||
std::cout << "\nUsage: " << argv[0] << " [path/to/image1] [path/to/image2] \n"
|
||||
<< "This is an example on how to use the keypoint descriptor presented in the following paper: \n"
|
||||
<< "A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. \n"
|
||||
<< "In IEEE Conference on Computer Vision and Pattern Recognition, 2012. CVPR 2012 Open Source Award winner \n"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
int main( int argc, char** argv ) {
|
||||
// check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
|
||||
// for OpenCV general detection/matching framework details
|
||||
|
||||
if( argc != 3 ) {
|
||||
help(argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Load images
|
||||
Mat imgA = imread(argv[1], IMREAD_GRAYSCALE );
|
||||
if( !imgA.data ) {
|
||||
std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat imgB = imread(argv[2], IMREAD_GRAYSCALE );
|
||||
if( !imgB.data ) {
|
||||
std::cout << " --(!) Error reading image " << argv[2] << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::vector<KeyPoint> keypointsA, keypointsB;
|
||||
Mat descriptorsA, descriptorsB;
|
||||
std::vector<DMatch> matches;
|
||||
|
||||
// DETECTION
|
||||
// Any openCV detector such as
|
||||
SurfFeatureDetector detector(2000,4);
|
||||
|
||||
// DESCRIPTOR
|
||||
// Our proposed FREAK descriptor
|
||||
// (rotation invariance, scale invariance, pattern radius corresponding to SMALLEST_KP_SIZE,
|
||||
// number of octaves, optional vector containing the selected pairs)
|
||||
// FREAK extractor(true, true, 22, 4, std::vector<int>());
|
||||
FREAK extractor;
|
||||
|
||||
// MATCHER
|
||||
// The standard Hamming distance can be used such as
|
||||
// BFMatcher matcher(NORM_HAMMING);
|
||||
// or the proposed cascade of hamming distance using SSSE3
|
||||
BFMatcher matcher(extractor.defaultNorm());
|
||||
|
||||
// detect
|
||||
double t = (double)getTickCount();
|
||||
detector.detect( imgA, keypointsA );
|
||||
detector.detect( imgB, keypointsB );
|
||||
t = ((double)getTickCount() - t)/getTickFrequency();
|
||||
std::cout << "detection time [s]: " << t/1.0 << std::endl;
|
||||
|
||||
// extract
|
||||
t = (double)getTickCount();
|
||||
extractor.compute( imgA, keypointsA, descriptorsA );
|
||||
extractor.compute( imgB, keypointsB, descriptorsB );
|
||||
t = ((double)getTickCount() - t)/getTickFrequency();
|
||||
std::cout << "extraction time [s]: " << t << std::endl;
|
||||
|
||||
// match
|
||||
t = (double)getTickCount();
|
||||
matcher.match(descriptorsA, descriptorsB, matches);
|
||||
t = ((double)getTickCount() - t)/getTickFrequency();
|
||||
std::cout << "matching time [s]: " << t << std::endl;
|
||||
|
||||
// Draw matches
|
||||
Mat imgMatch;
|
||||
drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch);
|
||||
|
||||
namedWindow("matches", WINDOW_KEEPRATIO);
|
||||
imshow("matches", imgMatch);
|
||||
waitKey(0);
|
||||
}
|
@@ -1,36 +0,0 @@
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
#include <ctime>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
if( argc != 2 )
|
||||
{
|
||||
cout << "Colors count should be passed." << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int colorsCount = atoi(argv[1]);
|
||||
vector<Scalar> colors;
|
||||
theRNG() = (uint64)time(0);
|
||||
generateColors( colors, colorsCount );
|
||||
|
||||
int stripWidth = 20;
|
||||
Mat strips(300, colorsCount*stripWidth, CV_8UC3);
|
||||
for( int i = 0; i < colorsCount; i++ )
|
||||
{
|
||||
strips.colRange(i*stripWidth, (i+1)*stripWidth) = colors[i];
|
||||
}
|
||||
|
||||
imshow( "strips", strips );
|
||||
waitKey();
|
||||
|
||||
return 0;
|
||||
}
|
@@ -1,94 +0,0 @@
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
printf("Use the SURF descriptor for matching keypoints between 2 images\n");
|
||||
printf("Format: \n./generic_descriptor_match <image1> <image2> <algorithm> <XML params>\n");
|
||||
printf("For example: ./generic_descriptor_match ../c/scene_l.bmp ../c/scene_r.bmp FERN fern_params.xml\n");
|
||||
}
|
||||
|
||||
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
|
||||
const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx);
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
if (argc != 5)
|
||||
{
|
||||
help();
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::string img1_name = std::string(argv[1]);
|
||||
std::string img2_name = std::string(argv[2]);
|
||||
std::string alg_name = std::string(argv[3]);
|
||||
std::string params_filename = std::string(argv[4]);
|
||||
|
||||
Ptr<GenericDescriptorMatcher> descriptorMatcher = GenericDescriptorMatcher::create(alg_name, params_filename);
|
||||
if( !descriptorMatcher )
|
||||
{
|
||||
printf ("Cannot create descriptor\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
//printf("Reading the images...\n");
|
||||
Mat img1 = imread(img1_name, IMREAD_GRAYSCALE);
|
||||
Mat img2 = imread(img2_name, IMREAD_GRAYSCALE);
|
||||
|
||||
// extract keypoints from the first image
|
||||
SURF surf_extractor(5.0e3);
|
||||
vector<KeyPoint> keypoints1;
|
||||
|
||||
// printf("Extracting keypoints\n");
|
||||
surf_extractor(img1, Mat(), keypoints1);
|
||||
|
||||
printf("Extracted %d keypoints from the first image\n", (int)keypoints1.size());
|
||||
|
||||
vector<KeyPoint> keypoints2;
|
||||
surf_extractor(img2, Mat(), keypoints2);
|
||||
printf("Extracted %d keypoints from the second image\n", (int)keypoints2.size());
|
||||
|
||||
printf("Finding nearest neighbors... \n");
|
||||
// find NN for each of keypoints2 in keypoints1
|
||||
vector<DMatch> matches2to1;
|
||||
descriptorMatcher->match( img2, keypoints2, img1, keypoints1, matches2to1 );
|
||||
printf("Done\n");
|
||||
|
||||
Mat img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, matches2to1);
|
||||
|
||||
imshow("correspondences", img_corr);
|
||||
waitKey(0);
|
||||
}
|
||||
|
||||
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
|
||||
const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx)
|
||||
{
|
||||
Mat part, img_corr(Size(img1.cols + img2.cols, MAX(img1.rows, img2.rows)), CV_8UC3);
|
||||
img_corr = Scalar::all(0);
|
||||
part = img_corr(Rect(0, 0, img1.cols, img1.rows));
|
||||
cvtColor(img1, part, COLOR_GRAY2RGB);
|
||||
part = img_corr(Rect(img1.cols, 0, img2.cols, img2.rows));
|
||||
cvtColor(img1, part, COLOR_GRAY2RGB);
|
||||
|
||||
for (size_t i = 0; i < features1.size(); i++)
|
||||
{
|
||||
circle(img_corr, features1[i].pt, 3, Scalar(0, 0, 255));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < features2.size(); i++)
|
||||
{
|
||||
Point pt(cvRound(features2[i].pt.x + img1.cols), cvRound(features2[i].pt.y));
|
||||
circle(img_corr, pt, 3, Scalar(0, 0, 255));
|
||||
line(img_corr, features1[desc_idx[i].trainIdx].pt, pt, Scalar(0, 255, 0));
|
||||
}
|
||||
|
||||
return img_corr;
|
||||
}
|
@@ -1,3 +1,4 @@
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
||||
|
BIN
samples/cpp/graf1.png
Executable file
After Width: | Height: | Size: 929 KiB |
BIN
samples/cpp/graf3.png
Executable file
After Width: | Height: | Size: 953 KiB |
@@ -1,3 +1,4 @@
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
||||
|
@@ -1,182 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Hybrid Tracking in OpenCV
|
||||
* Usage: ./hybridtrackingsample live
|
||||
*
|
||||
* For Benchmarking against the Bonn benchmark dataset
|
||||
* wget http://www.iai.uni-bonn.de/~kleind/tracking/datasets/seqG.zip
|
||||
* unzip seqG.zip -d ./seqG
|
||||
* ffmpeg -i seqG/Vid_G_rubikscube.avi seqG/%04d.png
|
||||
* ./hytrack seqG/Vid_G_rubikscube.txt
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <time.h>
|
||||
#include <iostream>
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/contrib/hybridtracker.hpp"
|
||||
|
||||
#ifndef _CRT_SECURE_NO_WARNINGS
|
||||
# define _CRT_SECURE_NO_WARNINGS
|
||||
#endif
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
Mat frame, image;
|
||||
Rect selection;
|
||||
Point origin;
|
||||
bool selectObject = false;
|
||||
int trackObject = 0;
|
||||
int live = 1;
|
||||
|
||||
static void drawRectangle(Mat* img, Rect win) {
|
||||
rectangle(*img, Point(win.x, win.y), Point(win.x + win.width, win.y
|
||||
+ win.height), Scalar(0, 255, 0), 2, CV_AA);
|
||||
}
|
||||
|
||||
static void onMouse(int event, int x, int y, int, void*) {
|
||||
if (selectObject) {
|
||||
selection.x = MIN(x, origin.x);
|
||||
selection.y = MIN(y, origin.y);
|
||||
selection.width = std::abs(x - origin.x);
|
||||
selection.height = std::abs(y - origin.y);
|
||||
selection &= Rect(0, 0, image.cols, image.rows);
|
||||
}
|
||||
|
||||
switch (event) {
|
||||
case EVENT_LBUTTONDOWN:
|
||||
origin = Point(x, y);
|
||||
selection = Rect(x, y, 0, 0);
|
||||
selectObject = true;
|
||||
break;
|
||||
case EVENT_LBUTTONUP:
|
||||
selectObject = false;
|
||||
trackObject = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void help()
|
||||
{
|
||||
printf("Usage: ./hytrack live or ./hytrack <test_file> \n\
|
||||
For Live View or Benchmarking. Read documentation is source code.\n\n");
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
if(argc != 2) {
|
||||
help();
|
||||
return 1;
|
||||
}
|
||||
|
||||
FILE* f = 0;
|
||||
VideoCapture cap;
|
||||
char test_file[20] = "";
|
||||
|
||||
if (strcmp(argv[1], "live") != 0)
|
||||
{
|
||||
sprintf(test_file, "%s", argv[1]);
|
||||
f = fopen(test_file, "r");
|
||||
char vid[20];
|
||||
int values_read = fscanf(f, "%s\n", vid);
|
||||
CV_Assert(values_read == 1);
|
||||
cout << "Benchmarking against " << vid << endl;
|
||||
live = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
cap.open(0);
|
||||
if (!cap.isOpened())
|
||||
{
|
||||
cout << "Failed to open camera" << endl;
|
||||
return 0;
|
||||
}
|
||||
cout << "Opened camera" << endl;
|
||||
cap.set(CAP_PROP_FRAME_WIDTH, 640);
|
||||
cap.set(CAP_PROP_FRAME_HEIGHT, 480);
|
||||
cap >> frame;
|
||||
}
|
||||
|
||||
HybridTrackerParams params;
|
||||
// motion model params
|
||||
params.motion_model = CvMotionModel::LOW_PASS_FILTER;
|
||||
params.low_pass_gain = 0.1f;
|
||||
// mean shift params
|
||||
params.ms_tracker_weight = 0.8f;
|
||||
params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
|
||||
// feature tracking params
|
||||
params.ft_tracker_weight = 0.2f;
|
||||
params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
|
||||
params.ft_params.window_size = 0;
|
||||
|
||||
HybridTracker tracker(params);
|
||||
char img_file[20] = "seqG/0001.png";
|
||||
char img_file_num[10];
|
||||
namedWindow("Win", 1);
|
||||
|
||||
setMouseCallback("Win", onMouse, 0);
|
||||
|
||||
int i = 0;
|
||||
float w[4];
|
||||
for(;;)
|
||||
{
|
||||
i++;
|
||||
if (live)
|
||||
{
|
||||
cap >> frame;
|
||||
if( frame.empty() )
|
||||
break;
|
||||
frame.copyTo(image);
|
||||
}
|
||||
else
|
||||
{
|
||||
int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
|
||||
CV_Assert(values_read == 5);
|
||||
sprintf(img_file, "seqG/%04d.png", i);
|
||||
image = imread(img_file, IMREAD_COLOR);
|
||||
if (image.empty())
|
||||
break;
|
||||
selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
|
||||
cvRound(w[2]*image.cols), cvRound(w[3]*image.rows));
|
||||
}
|
||||
|
||||
sprintf(img_file_num, "Frame: %d", i);
|
||||
putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
|
||||
if (!image.empty())
|
||||
{
|
||||
|
||||
if (trackObject < 0)
|
||||
{
|
||||
tracker.newTracker(image, selection);
|
||||
trackObject = 1;
|
||||
}
|
||||
|
||||
if (trackObject)
|
||||
{
|
||||
tracker.updateTracker(image);
|
||||
drawRectangle(&image, tracker.getTrackingWindow());
|
||||
}
|
||||
|
||||
if (selectObject && selection.width > 0 && selection.height > 0)
|
||||
{
|
||||
Mat roi(image, selection);
|
||||
bitwise_not(roi, roi);
|
||||
}
|
||||
|
||||
drawRectangle(&image, Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
|
||||
cvRound(w[2]*image.cols), cvRound(w[3]*image.rows)));
|
||||
imshow("Win", image);
|
||||
|
||||
waitKey(100);
|
||||
}
|
||||
else
|
||||
i = 0;
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
return 0;
|
||||
}
|
@@ -2,12 +2,10 @@
|
||||
#include <iostream>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/flann/miniflann.hpp>
|
||||
#include <opencv2/core/utility.hpp>
|
||||
|
||||
using namespace cv; // all the new API is put into "cv" namespace. Export its content
|
||||
using namespace std;
|
||||
using namespace cv::flann;
|
||||
|
||||
static void help()
|
||||
{
|
||||
@@ -24,6 +22,7 @@ static void help()
|
||||
|
||||
#ifdef DEMO_MIXED_API_USE
|
||||
# include <opencv2/highgui/highgui_c.h>
|
||||
# include <opencv2/imgcodecs/imgcodecs_c.h>
|
||||
#endif
|
||||
|
||||
int main( int argc, char** argv )
|
||||
|
@@ -13,6 +13,7 @@
|
||||
* Authors: G. Evangelidis, INRIA, Grenoble, France
|
||||
* M. Asbach, Fraunhofer IAIS, St. Augustin, Germany
|
||||
*/
|
||||
#include <opencv2/imgcodecs.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/video.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/videoio/videoio.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
|
||||
#include <iostream>
|
||||
|
@@ -2,6 +2,7 @@
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/photo/photo.hpp"
|
||||
|
@@ -1,6 +1,7 @@
|
||||
// testOpenCVCam.cpp : Defines the entry point for the console application.
|
||||
//
|
||||
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
23
samples/cpp/intrinsics.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
%YAML:1.0
|
||||
M1: !!opencv-matrix
|
||||
rows: 3
|
||||
cols: 3
|
||||
dt: d
|
||||
data: [ 5.3480326845051309e+02, 0., 3.3568643204394891e+02, 0.,
|
||||
5.3480326845051309e+02, 2.4066183054066337e+02, 0., 0., 1. ]
|
||||
D1: !!opencv-matrix
|
||||
rows: 1
|
||||
cols: 5
|
||||
dt: d
|
||||
data: [ 2.9589439552724328e-01, -1.0354662043042675e+00, 0., 0., 0. ]
|
||||
M2: !!opencv-matrix
|
||||
rows: 3
|
||||
cols: 3
|
||||
dt: d
|
||||
data: [ 5.3480326845051309e+02, 0., 3.3455744527912015e+02, 0.,
|
||||
5.3480326845051309e+02, 2.4205324573376600e+02, 0., 0., 1. ]
|
||||
D2: !!opencv-matrix
|
||||
rows: 1
|
||||
cols: 5
|
||||
dt: d
|
||||
data: [ -1.6916358306948096e-01, -1.1214173641213163e-01, 0., 0., 0. ]
|
@@ -33,7 +33,7 @@ int main( int /*argc*/, char** /*argv*/ )
|
||||
{
|
||||
int k, clusterCount = rng.uniform(2, MAX_CLUSTERS+1);
|
||||
int i, sampleCount = rng.uniform(1, 1001);
|
||||
Mat points(sampleCount, 2, CV_32F), labels;
|
||||
Mat points(sampleCount, 1, CV_32FC2), labels;
|
||||
|
||||
clusterCount = MIN(clusterCount, sampleCount);
|
||||
Mat centers;
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
||||
|
@@ -1,165 +0,0 @@
|
||||
#include <iostream>
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
#include <io.h>
|
||||
#else
|
||||
#include <dirent.h>
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
cout << "This program demonstrated the use of the latentSVM detector." << endl <<
|
||||
"It reads in a trained object models and then uses them to detect the objects in an images." << endl <<
|
||||
endl <<
|
||||
"Call:" << endl <<
|
||||
"./latentsvm_multidetect <imagesFolder> <modelsFolder> [<overlapThreshold>][<threadsNumber>]" << endl <<
|
||||
"<overlapThreshold> - threshold for the non-maximum suppression algorithm." << endl <<
|
||||
"Example of <modelsFolder> is opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007" << endl <<
|
||||
endl <<
|
||||
"Keys:" << endl <<
|
||||
"'n' - to go to the next image;" << endl <<
|
||||
"'esc' - to quit." << endl <<
|
||||
endl;
|
||||
}
|
||||
|
||||
static void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
|
||||
{
|
||||
vector<LatentSvmDetector::ObjectDetection> detections;
|
||||
|
||||
TickMeter tm;
|
||||
tm.start();
|
||||
detector.detect( image, detections, overlapThreshold, numThreads);
|
||||
tm.stop();
|
||||
|
||||
cout << "Detection time = " << tm.getTimeSec() << " sec" << endl;
|
||||
|
||||
const vector<String> classNames = detector.getClassNames();
|
||||
CV_Assert( colors.size() == classNames.size() );
|
||||
|
||||
for( size_t i = 0; i < detections.size(); i++ )
|
||||
{
|
||||
const LatentSvmDetector::ObjectDetection& od = detections[i];
|
||||
rectangle( image, od.rect, colors[od.classID], 3 );
|
||||
}
|
||||
// put text over the all rectangles
|
||||
for( size_t i = 0; i < detections.size(); i++ )
|
||||
{
|
||||
const LatentSvmDetector::ObjectDetection& od = detections[i];
|
||||
putText( image, classNames[od.classID], Point(od.rect.x+4,od.rect.y+13), FONT_HERSHEY_SIMPLEX, 0.55, colors[od.classID], 2 );
|
||||
}
|
||||
}
|
||||
|
||||
static void readDirectory( const string& directoryName, vector<String>& filenames, bool addDirectoryName=true )
|
||||
{
|
||||
filenames.clear();
|
||||
|
||||
#if defined(WIN32) | defined(_WIN32)
|
||||
struct _finddata_t s_file;
|
||||
string str = directoryName + "\\*.*";
|
||||
|
||||
intptr_t h_file = _findfirst( str.c_str(), &s_file );
|
||||
if( h_file != static_cast<intptr_t>(-1.0) )
|
||||
{
|
||||
do
|
||||
{
|
||||
if( addDirectoryName )
|
||||
filenames.push_back(directoryName + "\\" + s_file.name);
|
||||
else
|
||||
filenames.push_back((string)s_file.name);
|
||||
}
|
||||
while( _findnext( h_file, &s_file ) == 0 );
|
||||
}
|
||||
_findclose( h_file );
|
||||
#else
|
||||
DIR* dir = opendir( directoryName.c_str() );
|
||||
if( dir != NULL )
|
||||
{
|
||||
struct dirent* dent;
|
||||
while( (dent = readdir(dir)) != NULL )
|
||||
{
|
||||
if( addDirectoryName )
|
||||
filenames.push_back( directoryName + "/" + string(dent->d_name) );
|
||||
else
|
||||
filenames.push_back( string(dent->d_name) );
|
||||
}
|
||||
|
||||
closedir( dir );
|
||||
}
|
||||
#endif
|
||||
|
||||
sort( filenames.begin(), filenames.end() );
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
help();
|
||||
|
||||
string images_folder, models_folder;
|
||||
float overlapThreshold = 0.2f;
|
||||
int numThreads = -1;
|
||||
if( argc > 2 )
|
||||
{
|
||||
images_folder = argv[1];
|
||||
models_folder = argv[2];
|
||||
if( argc > 3 ) overlapThreshold = (float)atof(argv[3]);
|
||||
if( overlapThreshold < 0 || overlapThreshold > 1)
|
||||
{
|
||||
cout << "overlapThreshold must be in interval (0,1)." << endl;
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if( argc > 4 ) numThreads = atoi(argv[4]);
|
||||
}
|
||||
|
||||
vector<String> images_filenames, models_filenames;
|
||||
readDirectory( images_folder, images_filenames );
|
||||
readDirectory( models_folder, models_filenames );
|
||||
|
||||
LatentSvmDetector detector( models_filenames );
|
||||
if( detector.empty() )
|
||||
{
|
||||
cout << "Models cann't be loaded" << endl;
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
const vector<String>& classNames = detector.getClassNames();
|
||||
cout << "Loaded " << classNames.size() << " models:" << endl;
|
||||
for( size_t i = 0; i < classNames.size(); i++ )
|
||||
{
|
||||
cout << i << ") " << classNames[i] << "; ";
|
||||
}
|
||||
cout << endl;
|
||||
|
||||
cout << "overlapThreshold = " << overlapThreshold << endl;
|
||||
|
||||
vector<Scalar> colors;
|
||||
generateColors( colors, detector.getClassNames().size() );
|
||||
|
||||
for( size_t i = 0; i < images_filenames.size(); i++ )
|
||||
{
|
||||
Mat image = imread( images_filenames[i] );
|
||||
if( image.empty() ) continue;
|
||||
|
||||
cout << "Process image " << images_filenames[i] << endl;
|
||||
detectAndDrawObjects( image, detector, colors, overlapThreshold, numThreads );
|
||||
|
||||
imshow( "result", image );
|
||||
|
||||
for(;;)
|
||||
{
|
||||
int c = waitKey();
|
||||
if( (char)c == 'n')
|
||||
break;
|
||||
else if( (char)c == '\x1b' )
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -1,704 +0,0 @@
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <opencv2/imgproc/imgproc_c.h> // cvFindContours
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/objdetect.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <iterator>
|
||||
#include <set>
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
|
||||
// Function prototypes
|
||||
void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f);
|
||||
|
||||
std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
|
||||
int num_modalities, cv::Point offset, cv::Size size,
|
||||
cv::Mat& mask, cv::Mat& dst);
|
||||
|
||||
void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
|
||||
int num_modalities, cv::Point offset, cv::Size size,
|
||||
cv::Mat& dst);
|
||||
|
||||
void drawResponse(const std::vector<cv::linemod::Template>& templates,
|
||||
int num_modalities, cv::Mat& dst, cv::Point offset, int T);
|
||||
|
||||
cv::Mat displayQuantized(const cv::Mat& quantized);
|
||||
|
||||
// Copy of cv_mouse from cv_utilities
|
||||
class Mouse
|
||||
{
|
||||
public:
|
||||
static void start(const std::string& a_img_name)
|
||||
{
|
||||
cv::setMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
|
||||
}
|
||||
static int event(void)
|
||||
{
|
||||
int l_event = m_event;
|
||||
m_event = -1;
|
||||
return l_event;
|
||||
}
|
||||
static int x(void)
|
||||
{
|
||||
return m_x;
|
||||
}
|
||||
static int y(void)
|
||||
{
|
||||
return m_y;
|
||||
}
|
||||
|
||||
private:
|
||||
static void cv_on_mouse(int a_event, int a_x, int a_y, int, void *)
|
||||
{
|
||||
m_event = a_event;
|
||||
m_x = a_x;
|
||||
m_y = a_y;
|
||||
}
|
||||
|
||||
static int m_event;
|
||||
static int m_x;
|
||||
static int m_y;
|
||||
};
|
||||
int Mouse::m_event;
|
||||
int Mouse::m_x;
|
||||
int Mouse::m_y;
|
||||
|
||||
static void help()
|
||||
{
|
||||
printf("Usage: openni_demo [templates.yml]\n\n"
|
||||
"Place your object on a planar, featureless surface. With the mouse,\n"
|
||||
"frame it in the 'color' window and right click to learn a first template.\n"
|
||||
"Then press 'l' to enter online learning mode, and move the camera around.\n"
|
||||
"When the match score falls between 90-95%% the demo will add a new template.\n\n"
|
||||
"Keys:\n"
|
||||
"\t h -- This help page\n"
|
||||
"\t l -- Toggle online learning\n"
|
||||
"\t m -- Toggle printing match result\n"
|
||||
"\t t -- Toggle printing timings\n"
|
||||
"\t w -- Write learned templates to disk\n"
|
||||
"\t [ ] -- Adjust matching threshold: '[' down, ']' up\n"
|
||||
"\t q -- Quit\n\n");
|
||||
}
|
||||
|
||||
// Adapted from cv_timer in cv_utilities
|
||||
class Timer
|
||||
{
|
||||
public:
|
||||
Timer() : start_(0), time_(0) {}
|
||||
|
||||
void start()
|
||||
{
|
||||
start_ = cv::getTickCount();
|
||||
}
|
||||
|
||||
void stop()
|
||||
{
|
||||
CV_Assert(start_ != 0);
|
||||
int64 end = cv::getTickCount();
|
||||
time_ += end - start_;
|
||||
start_ = 0;
|
||||
}
|
||||
|
||||
double time()
|
||||
{
|
||||
double ret = time_ / cv::getTickFrequency();
|
||||
time_ = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
private:
|
||||
int64 start_, time_;
|
||||
};
|
||||
|
||||
// Functions to store detector and templates in single XML/YAML file
|
||||
static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
|
||||
{
|
||||
cv::Ptr<cv::linemod::Detector> detector = cv::makePtr<cv::linemod::Detector>();
|
||||
cv::FileStorage fs(filename, cv::FileStorage::READ);
|
||||
detector->read(fs.root());
|
||||
|
||||
cv::FileNode fn = fs["classes"];
|
||||
for (cv::FileNodeIterator i = fn.begin(), iend = fn.end(); i != iend; ++i)
|
||||
detector->readClass(*i);
|
||||
|
||||
return detector;
|
||||
}
|
||||
|
||||
static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
|
||||
{
|
||||
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
|
||||
detector->write(fs);
|
||||
|
||||
std::vector<cv::String> ids = detector->classIds();
|
||||
fs << "classes" << "[";
|
||||
for (int i = 0; i < (int)ids.size(); ++i)
|
||||
{
|
||||
fs << "{";
|
||||
detector->writeClass(ids[i], fs);
|
||||
fs << "}"; // current class
|
||||
}
|
||||
fs << "]"; // classes
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char * argv[])
|
||||
{
|
||||
// Various settings and flags
|
||||
bool show_match_result = true;
|
||||
bool show_timings = false;
|
||||
bool learn_online = false;
|
||||
int num_classes = 0;
|
||||
int matching_threshold = 80;
|
||||
/// @todo Keys for changing these?
|
||||
cv::Size roi_size(200, 200);
|
||||
int learning_lower_bound = 90;
|
||||
int learning_upper_bound = 95;
|
||||
|
||||
// Timers
|
||||
Timer extract_timer;
|
||||
Timer match_timer;
|
||||
|
||||
// Initialize HighGUI
|
||||
help();
|
||||
cv::namedWindow("color");
|
||||
cv::namedWindow("normals");
|
||||
Mouse::start("color");
|
||||
|
||||
// Initialize LINEMOD data structures
|
||||
cv::Ptr<cv::linemod::Detector> detector;
|
||||
std::string filename;
|
||||
if (argc == 1)
|
||||
{
|
||||
filename = "linemod_templates.yml";
|
||||
detector = cv::linemod::getDefaultLINEMOD();
|
||||
}
|
||||
else
|
||||
{
|
||||
detector = readLinemod(argv[1]);
|
||||
|
||||
std::vector<cv::String> ids = detector->classIds();
|
||||
num_classes = detector->numClasses();
|
||||
printf("Loaded %s with %d classes and %d templates\n",
|
||||
argv[1], num_classes, detector->numTemplates());
|
||||
if (!ids.empty())
|
||||
{
|
||||
printf("Class ids:\n");
|
||||
std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
|
||||
}
|
||||
}
|
||||
int num_modalities = (int)detector->getModalities().size();
|
||||
|
||||
// Open Kinect sensor
|
||||
cv::VideoCapture capture( cv::CAP_OPENNI );
|
||||
if (!capture.isOpened())
|
||||
{
|
||||
printf("Could not open OpenNI-capable sensor\n");
|
||||
return -1;
|
||||
}
|
||||
capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1);
|
||||
double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
|
||||
//printf("Focal length = %f\n", focal_length);
|
||||
|
||||
// Main loop
|
||||
cv::Mat color, depth;
|
||||
for(;;)
|
||||
{
|
||||
// Capture next color/depth pair
|
||||
capture.grab();
|
||||
capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP);
|
||||
capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE);
|
||||
|
||||
std::vector<cv::Mat> sources;
|
||||
sources.push_back(color);
|
||||
sources.push_back(depth);
|
||||
cv::Mat display = color.clone();
|
||||
|
||||
if (!learn_online)
|
||||
{
|
||||
cv::Point mouse(Mouse::x(), Mouse::y());
|
||||
int event = Mouse::event();
|
||||
|
||||
// Compute ROI centered on current mouse location
|
||||
cv::Point roi_offset(roi_size.width / 2, roi_size.height / 2);
|
||||
cv::Point pt1 = mouse - roi_offset; // top left
|
||||
cv::Point pt2 = mouse + roi_offset; // bottom right
|
||||
|
||||
if (event == cv::EVENT_RBUTTONDOWN)
|
||||
{
|
||||
// Compute object mask by subtracting the plane within the ROI
|
||||
std::vector<CvPoint> chain(4);
|
||||
chain[0] = pt1;
|
||||
chain[1] = cv::Point(pt2.x, pt1.y);
|
||||
chain[2] = pt2;
|
||||
chain[3] = cv::Point(pt1.x, pt2.y);
|
||||
cv::Mat mask;
|
||||
subtractPlane(depth, mask, chain, focal_length);
|
||||
|
||||
cv::imshow("mask", mask);
|
||||
|
||||
// Extract template
|
||||
std::string class_id = cv::format("class%d", num_classes);
|
||||
cv::Rect bb;
|
||||
extract_timer.start();
|
||||
int template_id = detector->addTemplate(sources, class_id, mask, &bb);
|
||||
extract_timer.stop();
|
||||
if (template_id != -1)
|
||||
{
|
||||
printf("*** Added template (id %d) for new object class %d***\n",
|
||||
template_id, num_classes);
|
||||
//printf("Extracted at (%d, %d) size %dx%d\n", bb.x, bb.y, bb.width, bb.height);
|
||||
}
|
||||
|
||||
++num_classes;
|
||||
}
|
||||
|
||||
// Draw ROI for display
|
||||
cv::rectangle(display, pt1, pt2, CV_RGB(0,0,0), 3);
|
||||
cv::rectangle(display, pt1, pt2, CV_RGB(255,255,0), 1);
|
||||
}
|
||||
|
||||
// Perform matching
|
||||
std::vector<cv::linemod::Match> matches;
|
||||
std::vector<cv::String> class_ids;
|
||||
std::vector<cv::Mat> quantized_images;
|
||||
match_timer.start();
|
||||
detector->match(sources, (float)matching_threshold, matches, class_ids, quantized_images);
|
||||
match_timer.stop();
|
||||
|
||||
int classes_visited = 0;
|
||||
std::set<std::string> visited;
|
||||
|
||||
for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
|
||||
{
|
||||
cv::linemod::Match m = matches[i];
|
||||
|
||||
if (visited.insert(m.class_id).second)
|
||||
{
|
||||
++classes_visited;
|
||||
|
||||
if (show_match_result)
|
||||
{
|
||||
printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
|
||||
m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
|
||||
}
|
||||
|
||||
// Draw matching template
|
||||
const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
|
||||
drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
|
||||
|
||||
if (learn_online == true)
|
||||
{
|
||||
/// @todo Online learning possibly broken by new gradient feature extraction,
|
||||
/// which assumes an accurate object outline.
|
||||
|
||||
// Compute masks based on convex hull of matched template
|
||||
cv::Mat color_mask, depth_mask;
|
||||
std::vector<CvPoint> chain = maskFromTemplate(templates, num_modalities,
|
||||
cv::Point(m.x, m.y), color.size(),
|
||||
color_mask, display);
|
||||
subtractPlane(depth, depth_mask, chain, focal_length);
|
||||
|
||||
cv::imshow("mask", depth_mask);
|
||||
|
||||
// If pretty sure (but not TOO sure), add new template
|
||||
if (learning_lower_bound < m.similarity && m.similarity < learning_upper_bound)
|
||||
{
|
||||
extract_timer.start();
|
||||
int template_id = detector->addTemplate(sources, m.class_id, depth_mask);
|
||||
extract_timer.stop();
|
||||
if (template_id != -1)
|
||||
{
|
||||
printf("*** Added template (id %d) for existing object class %s***\n",
|
||||
template_id, m.class_id.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (show_match_result && matches.empty())
|
||||
printf("No matches found...\n");
|
||||
if (show_timings)
|
||||
{
|
||||
printf("Training: %.2fs\n", extract_timer.time());
|
||||
printf("Matching: %.2fs\n", match_timer.time());
|
||||
}
|
||||
if (show_match_result || show_timings)
|
||||
printf("------------------------------------------------------------\n");
|
||||
|
||||
cv::imshow("color", display);
|
||||
cv::imshow("normals", quantized_images[1]);
|
||||
|
||||
cv::FileStorage fs;
|
||||
char key = (char)cv::waitKey(10);
|
||||
if( key == 'q' )
|
||||
break;
|
||||
|
||||
switch (key)
|
||||
{
|
||||
case 'h':
|
||||
help();
|
||||
break;
|
||||
case 'm':
|
||||
// toggle printing match result
|
||||
show_match_result = !show_match_result;
|
||||
printf("Show match result %s\n", show_match_result ? "ON" : "OFF");
|
||||
break;
|
||||
case 't':
|
||||
// toggle printing timings
|
||||
show_timings = !show_timings;
|
||||
printf("Show timings %s\n", show_timings ? "ON" : "OFF");
|
||||
break;
|
||||
case 'l':
|
||||
// toggle online learning
|
||||
learn_online = !learn_online;
|
||||
printf("Online learning %s\n", learn_online ? "ON" : "OFF");
|
||||
break;
|
||||
case '[':
|
||||
// decrement threshold
|
||||
matching_threshold = std::max(matching_threshold - 1, -100);
|
||||
printf("New threshold: %d\n", matching_threshold);
|
||||
break;
|
||||
case ']':
|
||||
// increment threshold
|
||||
matching_threshold = std::min(matching_threshold + 1, +100);
|
||||
printf("New threshold: %d\n", matching_threshold);
|
||||
break;
|
||||
case 'w':
|
||||
// write model to disk
|
||||
writeLinemod(detector, filename);
|
||||
printf("Wrote detector and templates to %s\n", filename.c_str());
|
||||
break;
|
||||
default:
|
||||
;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
|
||||
{
|
||||
real.resize(proj.size());
|
||||
double f_inv = 1.0 / f;
|
||||
|
||||
for (int i = 0; i < (int)proj.size(); ++i)
|
||||
{
|
||||
double Z = proj[i].z;
|
||||
real[i].x = (proj[i].x - 320.) * (f_inv * Z);
|
||||
real[i].y = (proj[i].y - 240.) * (f_inv * Z);
|
||||
real[i].z = Z;
|
||||
}
|
||||
}
|
||||
|
||||
static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
|
||||
{
|
||||
const int l_num_cost_pts = 200;
|
||||
|
||||
float l_thres = 4;
|
||||
|
||||
IplImage * lp_mask = cvCreateImage(cvGetSize(ap_depth), IPL_DEPTH_8U, 1);
|
||||
cvSet(lp_mask, cvRealScalar(0));
|
||||
|
||||
std::vector<CvPoint> l_chain_vector;
|
||||
|
||||
float l_chain_length = 0;
|
||||
float * lp_seg_length = new float[a_chain.size()];
|
||||
|
||||
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
|
||||
{
|
||||
float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
|
||||
float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
|
||||
lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
|
||||
l_chain_length += lp_seg_length[l_i];
|
||||
}
|
||||
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
|
||||
{
|
||||
if (lp_seg_length[l_i] > 0)
|
||||
{
|
||||
int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
|
||||
float l_cur_len = lp_seg_length[l_i] / l_cur_num;
|
||||
|
||||
for (int l_j = 0; l_j < l_cur_num; ++l_j)
|
||||
{
|
||||
float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);
|
||||
|
||||
CvPoint l_pts;
|
||||
|
||||
l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
|
||||
l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
|
||||
|
||||
l_chain_vector.push_back(l_pts);
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());
|
||||
|
||||
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
|
||||
{
|
||||
lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
|
||||
lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
|
||||
lp_src_3Dpts[l_i].z = CV_IMAGE_ELEM(ap_depth, unsigned short, cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].x));
|
||||
//CV_IMAGE_ELEM(lp_mask,unsigned char,(int)lp_src_3Dpts[l_i].Y,(int)lp_src_3Dpts[l_i].X)=255;
|
||||
}
|
||||
//cv_show_image(lp_mask,"hallo2");
|
||||
|
||||
reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);
|
||||
|
||||
CvMat * lp_pts = cvCreateMat((int)l_chain_vector.size(), 4, CV_32F);
|
||||
CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
|
||||
CvMat * lp_w = cvCreateMat(4, 1, CV_32F);
|
||||
|
||||
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
|
||||
{
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 0) = (float)lp_src_3Dpts[l_i].x;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 1) = (float)lp_src_3Dpts[l_i].y;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 2) = (float)lp_src_3Dpts[l_i].z;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0f;
|
||||
}
|
||||
cvSVD(lp_pts, lp_w, 0, lp_v);
|
||||
|
||||
float l_n[4] = {CV_MAT_ELEM(*lp_v, float, 0, 3),
|
||||
CV_MAT_ELEM(*lp_v, float, 1, 3),
|
||||
CV_MAT_ELEM(*lp_v, float, 2, 3),
|
||||
CV_MAT_ELEM(*lp_v, float, 3, 3)};
|
||||
|
||||
float l_norm = sqrt(l_n[0] * l_n[0] + l_n[1] * l_n[1] + l_n[2] * l_n[2]);
|
||||
|
||||
l_n[0] /= l_norm;
|
||||
l_n[1] /= l_norm;
|
||||
l_n[2] /= l_norm;
|
||||
l_n[3] /= l_norm;
|
||||
|
||||
float l_max_dist = 0;
|
||||
|
||||
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
|
||||
{
|
||||
float l_dist = l_n[0] * CV_MAT_ELEM(*lp_pts, float, l_i, 0) +
|
||||
l_n[1] * CV_MAT_ELEM(*lp_pts, float, l_i, 1) +
|
||||
l_n[2] * CV_MAT_ELEM(*lp_pts, float, l_i, 2) +
|
||||
l_n[3] * CV_MAT_ELEM(*lp_pts, float, l_i, 3);
|
||||
|
||||
if (fabs(l_dist) > l_max_dist)
|
||||
l_max_dist = l_dist;
|
||||
}
|
||||
//std::cerr << "plane: " << l_n[0] << ";" << l_n[1] << ";" << l_n[2] << ";" << l_n[3] << " maxdist: " << l_max_dist << " end" << std::endl;
|
||||
int l_minx = ap_depth->width;
|
||||
int l_miny = ap_depth->height;
|
||||
int l_maxx = 0;
|
||||
int l_maxy = 0;
|
||||
|
||||
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
|
||||
{
|
||||
l_minx = std::min(l_minx, a_chain[l_i].x);
|
||||
l_miny = std::min(l_miny, a_chain[l_i].y);
|
||||
l_maxx = std::max(l_maxx, a_chain[l_i].x);
|
||||
l_maxy = std::max(l_maxy, a_chain[l_i].y);
|
||||
}
|
||||
int l_w = l_maxx - l_minx + 1;
|
||||
int l_h = l_maxy - l_miny + 1;
|
||||
int l_nn = (int)a_chain.size();
|
||||
|
||||
CvPoint * lp_chain = new CvPoint[l_nn];
|
||||
|
||||
for (int l_i = 0; l_i < l_nn; ++l_i)
|
||||
lp_chain[l_i] = a_chain[l_i];
|
||||
|
||||
cvFillPoly(lp_mask, &lp_chain, &l_nn, 1, cvScalar(255, 255, 255));
|
||||
|
||||
delete[] lp_chain;
|
||||
|
||||
//cv_show_image(lp_mask,"hallo1");
|
||||
|
||||
std::vector<cv::Point3d> lp_dst_3Dpts(l_h * l_w);
|
||||
|
||||
int l_ind = 0;
|
||||
|
||||
for (int l_r = 0; l_r < l_h; ++l_r)
|
||||
{
|
||||
for (int l_c = 0; l_c < l_w; ++l_c)
|
||||
{
|
||||
lp_dst_3Dpts[l_ind].x = l_c + l_minx;
|
||||
lp_dst_3Dpts[l_ind].y = l_r + l_miny;
|
||||
lp_dst_3Dpts[l_ind].z = CV_IMAGE_ELEM(ap_depth, unsigned short, l_r + l_miny, l_c + l_minx);
|
||||
++l_ind;
|
||||
}
|
||||
}
|
||||
reprojectPoints(lp_dst_3Dpts, lp_dst_3Dpts, f);
|
||||
|
||||
l_ind = 0;
|
||||
|
||||
for (int l_r = 0; l_r < l_h; ++l_r)
|
||||
{
|
||||
for (int l_c = 0; l_c < l_w; ++l_c)
|
||||
{
|
||||
float l_dist = (float)(l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
|
||||
|
||||
++l_ind;
|
||||
|
||||
if (CV_IMAGE_ELEM(lp_mask, unsigned char, l_r + l_miny, l_c + l_minx) != 0)
|
||||
{
|
||||
if (fabs(l_dist) < std::max(l_thres, (l_max_dist * 2.0f)))
|
||||
{
|
||||
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
|
||||
{
|
||||
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
|
||||
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
|
||||
|
||||
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
|
||||
{
|
||||
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
|
||||
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
|
||||
|
||||
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 255;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
cvReleaseImage(&lp_mask);
|
||||
cvReleaseMat(&lp_pts);
|
||||
cvReleaseMat(&lp_w);
|
||||
cvReleaseMat(&lp_v);
|
||||
}
|
||||
|
||||
void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f)
|
||||
{
|
||||
mask = cv::Mat::zeros(depth.size(), CV_8U);
|
||||
std::vector<IplImage*> tmp;
|
||||
IplImage mask_ipl = mask;
|
||||
tmp.push_back(&mask_ipl);
|
||||
IplImage depth_ipl = depth;
|
||||
filterPlane(&depth_ipl, tmp, chain, f);
|
||||
}
|
||||
|
||||
std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
|
||||
int num_modalities, cv::Point offset, cv::Size size,
|
||||
cv::Mat& mask, cv::Mat& dst)
|
||||
{
|
||||
templateConvexHull(templates, num_modalities, offset, size, mask);
|
||||
|
||||
const int OFFSET = 30;
|
||||
cv::dilate(mask, mask, cv::Mat(), cv::Point(-1,-1), OFFSET);
|
||||
|
||||
CvMemStorage * lp_storage = cvCreateMemStorage(0);
|
||||
CvTreeNodeIterator l_iterator;
|
||||
CvSeqReader l_reader;
|
||||
CvSeq * lp_contour = 0;
|
||||
|
||||
cv::Mat mask_copy = mask.clone();
|
||||
IplImage mask_copy_ipl = mask_copy;
|
||||
cvFindContours(&mask_copy_ipl, lp_storage, &lp_contour, sizeof(CvContour),
|
||||
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
|
||||
|
||||
std::vector<CvPoint> l_pts1; // to use as input to cv_primesensor::filter_plane
|
||||
|
||||
cvInitTreeNodeIterator(&l_iterator, lp_contour, 1);
|
||||
while ((lp_contour = (CvSeq *)cvNextTreeNode(&l_iterator)) != 0)
|
||||
{
|
||||
CvPoint l_pt0;
|
||||
cvStartReadSeq(lp_contour, &l_reader, 0);
|
||||
CV_READ_SEQ_ELEM(l_pt0, l_reader);
|
||||
l_pts1.push_back(l_pt0);
|
||||
|
||||
for (int i = 0; i < lp_contour->total; ++i)
|
||||
{
|
||||
CvPoint l_pt1;
|
||||
CV_READ_SEQ_ELEM(l_pt1, l_reader);
|
||||
/// @todo Really need dst at all? Can just as well do this outside
|
||||
cv::line(dst, l_pt0, l_pt1, CV_RGB(0, 255, 0), 2);
|
||||
|
||||
l_pt0 = l_pt1;
|
||||
l_pts1.push_back(l_pt0);
|
||||
}
|
||||
}
|
||||
cvReleaseMemStorage(&lp_storage);
|
||||
|
||||
return l_pts1;
|
||||
}
|
||||
|
||||
// Adapted from cv_show_angles
|
||||
cv::Mat displayQuantized(const cv::Mat& quantized)
|
||||
{
|
||||
cv::Mat color(quantized.size(), CV_8UC3);
|
||||
for (int r = 0; r < quantized.rows; ++r)
|
||||
{
|
||||
const uchar* quant_r = quantized.ptr(r);
|
||||
cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
|
||||
|
||||
for (int c = 0; c < quantized.cols; ++c)
|
||||
{
|
||||
cv::Vec3b& bgr = color_r[c];
|
||||
switch (quant_r[c])
|
||||
{
|
||||
case 0: bgr[0]= 0; bgr[1]= 0; bgr[2]= 0; break;
|
||||
case 1: bgr[0]= 55; bgr[1]= 55; bgr[2]= 55; break;
|
||||
case 2: bgr[0]= 80; bgr[1]= 80; bgr[2]= 80; break;
|
||||
case 4: bgr[0]=105; bgr[1]=105; bgr[2]=105; break;
|
||||
case 8: bgr[0]=130; bgr[1]=130; bgr[2]=130; break;
|
||||
case 16: bgr[0]=155; bgr[1]=155; bgr[2]=155; break;
|
||||
case 32: bgr[0]=180; bgr[1]=180; bgr[2]=180; break;
|
||||
case 64: bgr[0]=205; bgr[1]=205; bgr[2]=205; break;
|
||||
case 128: bgr[0]=230; bgr[1]=230; bgr[2]=230; break;
|
||||
case 255: bgr[0]= 0; bgr[1]= 0; bgr[2]=255; break;
|
||||
default: bgr[0]= 0; bgr[1]=255; bgr[2]= 0; break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return color;
|
||||
}
|
||||
|
||||
// Adapted from cv_line_template::convex_hull
|
||||
void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
|
||||
int num_modalities, cv::Point offset, cv::Size size,
|
||||
cv::Mat& dst)
|
||||
{
|
||||
std::vector<cv::Point> points;
|
||||
for (int m = 0; m < num_modalities; ++m)
|
||||
{
|
||||
for (int i = 0; i < (int)templates[m].features.size(); ++i)
|
||||
{
|
||||
cv::linemod::Feature f = templates[m].features[i];
|
||||
points.push_back(cv::Point(f.x, f.y) + offset);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<cv::Point> hull;
|
||||
cv::convexHull(points, hull);
|
||||
|
||||
dst = cv::Mat::zeros(size, CV_8U);
|
||||
const int hull_count = (int)hull.size();
|
||||
const cv::Point* hull_pts = &hull[0];
|
||||
cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
|
||||
}
|
||||
|
||||
void drawResponse(const std::vector<cv::linemod::Template>& templates,
|
||||
int num_modalities, cv::Mat& dst, cv::Point offset, int T)
|
||||
{
|
||||
static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
|
||||
CV_RGB(0, 255, 0),
|
||||
CV_RGB(255, 255, 0),
|
||||
CV_RGB(255, 140, 0),
|
||||
CV_RGB(255, 0, 0) };
|
||||
|
||||
for (int m = 0; m < num_modalities; ++m)
|
||||
{
|
||||
// NOTE: Original demo recalculated max response for each feature in the TxT
|
||||
// box around it and chose the display color based on that response. Here
|
||||
// the display color just depends on the modality.
|
||||
cv::Scalar color = COLORS[m];
|
||||
|
||||
for (int i = 0; i < (int)templates[m].features.size(); ++i)
|
||||
{
|
||||
cv::linemod::Feature f = templates[m].features[i];
|
||||
cv::Point pt(f.x + offset.x, f.y + offset.y);
|
||||
cv::circle(dst, pt, T / 2, color);
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,5 +1,6 @@
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
@@ -1,84 +0,0 @@
|
||||
/*Authors
|
||||
* Manuela Chessa, Fabio Solari, Fabio Tatti, Silvio P. Sabatini
|
||||
*
|
||||
* manuela.chessa@unige.it, fabio.solari@unige.it
|
||||
*
|
||||
* PSPC-lab - University of Genoa
|
||||
*/
|
||||
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <cmath>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void help()
|
||||
{
|
||||
cout << "LogPolar Blind Spot Model sample.\nShortcuts:"
|
||||
"\n\tn for nearest pixel technique"
|
||||
"\n\tb for bilinear interpolation technique"
|
||||
"\n\to for overlapping circular receptive fields"
|
||||
"\n\ta for adjacent receptive fields"
|
||||
"\n\tq or ESC quit\n";
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
Mat img = imread(argc > 1 ? argv[1] : "lena.jpg",1); // open the image
|
||||
if(img.empty()) // check if we succeeded
|
||||
{
|
||||
cout << "can not load image\n";
|
||||
return 0;
|
||||
}
|
||||
help();
|
||||
|
||||
Size s=img.size();
|
||||
int w=s.width, h=s.height;
|
||||
int ro0=3; //radius of the blind spot
|
||||
int R=120; //number of rings
|
||||
|
||||
//Creation of the four different objects that implement the four log-polar transformations
|
||||
//Off-line computation
|
||||
Point2i center(w/2,h/2);
|
||||
LogPolar_Interp nearest(w, h, center, R, ro0, INTER_NEAREST);
|
||||
LogPolar_Interp bilin(w,h, center,R,ro0);
|
||||
LogPolar_Overlapping overlap(w,h,center,R,ro0);
|
||||
LogPolar_Adjacent adj(w,h,center,R,ro0,0.25);
|
||||
|
||||
namedWindow("Cartesian",1);
|
||||
namedWindow("retinal",1);
|
||||
namedWindow("cortical",1);
|
||||
int wk='n';
|
||||
Mat Cortical, Retinal;
|
||||
|
||||
//On-line computation
|
||||
for(;;)
|
||||
{
|
||||
if(wk=='n'){
|
||||
Cortical=nearest.to_cortical(img);
|
||||
Retinal=nearest.to_cartesian(Cortical);
|
||||
}else if (wk=='b'){
|
||||
Cortical=bilin.to_cortical(img);
|
||||
Retinal=bilin.to_cartesian(Cortical);
|
||||
}else if (wk=='o'){
|
||||
Cortical=overlap.to_cortical(img);
|
||||
Retinal=overlap.to_cartesian(Cortical);
|
||||
}else if (wk=='a'){
|
||||
Cortical=adj.to_cortical(img);
|
||||
Retinal=adj.to_cartesian(Cortical);
|
||||
}
|
||||
|
||||
imshow("Cartesian", img);
|
||||
imshow("cortical", Cortical);
|
||||
imshow("retinal", Retinal);
|
||||
|
||||
int c=waitKey(15);
|
||||
if (c>0) wk=c;
|
||||
if(wk =='q' || (wk & 255) == 27) break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -4,6 +4,7 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@@ -1,59 +0,0 @@
|
||||
#include <stdio.h>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
printf("\nThis program demonstrates using features2d detector, descriptor extractor and simple matcher\n"
|
||||
"Using the SURF desriptor:\n"
|
||||
"\n"
|
||||
"Usage:\n matcher_simple <image1> <image2>\n");
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
if(argc != 3)
|
||||
{
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
|
||||
Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
|
||||
if(img1.empty() || img2.empty())
|
||||
{
|
||||
printf("Can't read one of the images\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// detecting keypoints
|
||||
SurfFeatureDetector detector(400);
|
||||
vector<KeyPoint> keypoints1, keypoints2;
|
||||
detector.detect(img1, keypoints1);
|
||||
detector.detect(img2, keypoints2);
|
||||
|
||||
// computing descriptors
|
||||
SurfDescriptorExtractor extractor;
|
||||
Mat descriptors1, descriptors2;
|
||||
extractor.compute(img1, keypoints1, descriptors1);
|
||||
extractor.compute(img2, keypoints2, descriptors2);
|
||||
|
||||
// matching descriptors
|
||||
BFMatcher matcher(extractor.defaultNorm());
|
||||
vector<DMatch> matches;
|
||||
matcher.match(descriptors1, descriptors2, matches);
|
||||
|
||||
// drawing the results
|
||||
namedWindow("matches", 1);
|
||||
Mat img_matches;
|
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
|
||||
imshow("matches", img_matches);
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -1,262 +0,0 @@
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
const string defaultDetectorType = "SURF";
|
||||
const string defaultDescriptorType = "SURF";
|
||||
const string defaultMatcherType = "FlannBased";
|
||||
const string defaultQueryImageName = "../../opencv/samples/cpp/matching_to_many_images/query.png";
|
||||
const string defaultFileWithTrainImages = "../../opencv/samples/cpp/matching_to_many_images/train/trainImages.txt";
|
||||
const string defaultDirToSaveResImages = "../../opencv/samples/cpp/matching_to_many_images/results";
|
||||
|
||||
static void printPrompt( const string& applName )
|
||||
{
|
||||
cout << "/*\n"
|
||||
<< " * This is a sample on matching descriptors detected on one image to descriptors detected in image set.\n"
|
||||
<< " * So we have one query image and several train images. For each keypoint descriptor of query image\n"
|
||||
<< " * the one nearest train descriptor is found the entire collection of train images. To visualize the result\n"
|
||||
<< " * of matching we save images, each of which combines query and train image with matches between them (if they exist).\n"
|
||||
<< " * Match is drawn as line between corresponding points. Count of all matches is equel to count of\n"
|
||||
<< " * query keypoints, so we have the same count of lines in all set of result images (but not for each result\n"
|
||||
<< " * (train) image).\n"
|
||||
<< " */\n" << endl;
|
||||
|
||||
cout << endl << "Format:\n" << endl;
|
||||
cout << "./" << applName << " [detectorType] [descriptorType] [matcherType] [queryImage] [fileWithTrainImages] [dirToSaveResImages]" << endl;
|
||||
cout << endl;
|
||||
|
||||
cout << "\nExample:" << endl
|
||||
<< "./" << applName << " " << defaultDetectorType << " " << defaultDescriptorType << " " << defaultMatcherType << " "
|
||||
<< defaultQueryImageName << " " << defaultFileWithTrainImages << " " << defaultDirToSaveResImages << endl;
|
||||
}
|
||||
|
||||
static void maskMatchesByTrainImgIdx( const vector<DMatch>& matches, int trainImgIdx, vector<char>& mask )
|
||||
{
|
||||
mask.resize( matches.size() );
|
||||
fill( mask.begin(), mask.end(), 0 );
|
||||
for( size_t i = 0; i < matches.size(); i++ )
|
||||
{
|
||||
if( matches[i].imgIdx == trainImgIdx )
|
||||
mask[i] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void readTrainFilenames( const string& filename, string& dirName, vector<string>& trainFilenames )
|
||||
{
|
||||
trainFilenames.clear();
|
||||
|
||||
ifstream file( filename.c_str() );
|
||||
if ( !file.is_open() )
|
||||
return;
|
||||
|
||||
size_t pos = filename.rfind('\\');
|
||||
char dlmtr = '\\';
|
||||
if (pos == string::npos)
|
||||
{
|
||||
pos = filename.rfind('/');
|
||||
dlmtr = '/';
|
||||
}
|
||||
dirName = pos == string::npos ? "" : filename.substr(0, pos) + dlmtr;
|
||||
|
||||
while( !file.eof() )
|
||||
{
|
||||
string str; getline( file, str );
|
||||
if( str.empty() ) break;
|
||||
trainFilenames.push_back(str);
|
||||
}
|
||||
file.close();
|
||||
}
|
||||
|
||||
static bool createDetectorDescriptorMatcher( const string& detectorType, const string& descriptorType, const string& matcherType,
|
||||
Ptr<FeatureDetector>& featureDetector,
|
||||
Ptr<DescriptorExtractor>& descriptorExtractor,
|
||||
Ptr<DescriptorMatcher>& descriptorMatcher )
|
||||
{
|
||||
cout << "< Creating feature detector, descriptor extractor and descriptor matcher ..." << endl;
|
||||
featureDetector = FeatureDetector::create( detectorType );
|
||||
descriptorExtractor = DescriptorExtractor::create( descriptorType );
|
||||
descriptorMatcher = DescriptorMatcher::create( matcherType );
|
||||
cout << ">" << endl;
|
||||
|
||||
bool isCreated = featureDetector && descriptorExtractor && descriptorMatcher;
|
||||
if( !isCreated )
|
||||
cout << "Can not create feature detector or descriptor extractor or descriptor matcher of given types." << endl << ">" << endl;
|
||||
|
||||
return isCreated;
|
||||
}
|
||||
|
||||
static bool readImages( const string& queryImageName, const string& trainFilename,
|
||||
Mat& queryImage, vector <Mat>& trainImages, vector<string>& trainImageNames )
|
||||
{
|
||||
cout << "< Reading the images..." << endl;
|
||||
queryImage = imread( queryImageName, IMREAD_GRAYSCALE);
|
||||
if( queryImage.empty() )
|
||||
{
|
||||
cout << "Query image can not be read." << endl << ">" << endl;
|
||||
return false;
|
||||
}
|
||||
string trainDirName;
|
||||
readTrainFilenames( trainFilename, trainDirName, trainImageNames );
|
||||
if( trainImageNames.empty() )
|
||||
{
|
||||
cout << "Train image filenames can not be read." << endl << ">" << endl;
|
||||
return false;
|
||||
}
|
||||
int readImageCount = 0;
|
||||
for( size_t i = 0; i < trainImageNames.size(); i++ )
|
||||
{
|
||||
string filename = trainDirName + trainImageNames[i];
|
||||
Mat img = imread( filename, IMREAD_GRAYSCALE );
|
||||
if( img.empty() )
|
||||
cout << "Train image " << filename << " can not be read." << endl;
|
||||
else
|
||||
readImageCount++;
|
||||
trainImages.push_back( img );
|
||||
}
|
||||
if( !readImageCount )
|
||||
{
|
||||
cout << "All train images can not be read." << endl << ">" << endl;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
cout << readImageCount << " train images were read." << endl;
|
||||
cout << ">" << endl;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void detectKeypoints( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
|
||||
const vector<Mat>& trainImages, vector<vector<KeyPoint> >& trainKeypoints,
|
||||
Ptr<FeatureDetector>& featureDetector )
|
||||
{
|
||||
cout << endl << "< Extracting keypoints from images..." << endl;
|
||||
featureDetector->detect( queryImage, queryKeypoints );
|
||||
featureDetector->detect( trainImages, trainKeypoints );
|
||||
cout << ">" << endl;
|
||||
}
|
||||
|
||||
static void computeDescriptors( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, Mat& queryDescriptors,
|
||||
const vector<Mat>& trainImages, vector<vector<KeyPoint> >& trainKeypoints, vector<Mat>& trainDescriptors,
|
||||
Ptr<DescriptorExtractor>& descriptorExtractor )
|
||||
{
|
||||
cout << "< Computing descriptors for keypoints..." << endl;
|
||||
descriptorExtractor->compute( queryImage, queryKeypoints, queryDescriptors );
|
||||
descriptorExtractor->compute( trainImages, trainKeypoints, trainDescriptors );
|
||||
|
||||
int totalTrainDesc = 0;
|
||||
for( vector<Mat>::const_iterator tdIter = trainDescriptors.begin(); tdIter != trainDescriptors.end(); tdIter++ )
|
||||
totalTrainDesc += tdIter->rows;
|
||||
|
||||
cout << "Query descriptors count: " << queryDescriptors.rows << "; Total train descriptors count: " << totalTrainDesc << endl;
|
||||
cout << ">" << endl;
|
||||
}
|
||||
|
||||
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
|
||||
vector<DMatch>& matches, Ptr<DescriptorMatcher>& descriptorMatcher )
|
||||
{
|
||||
cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;
|
||||
TickMeter tm;
|
||||
|
||||
tm.start();
|
||||
descriptorMatcher->add( trainDescriptors );
|
||||
descriptorMatcher->train();
|
||||
tm.stop();
|
||||
double buildTime = tm.getTimeMilli();
|
||||
|
||||
tm.start();
|
||||
descriptorMatcher->match( queryDescriptors, matches );
|
||||
tm.stop();
|
||||
double matchTime = tm.getTimeMilli();
|
||||
|
||||
CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );
|
||||
|
||||
cout << "Number of matches: " << matches.size() << endl;
|
||||
cout << "Build time: " << buildTime << " ms; Match time: " << matchTime << " ms" << endl;
|
||||
cout << ">" << endl;
|
||||
}
|
||||
|
||||
static void saveResultImages( const Mat& queryImage, const vector<KeyPoint>& queryKeypoints,
|
||||
const vector<Mat>& trainImages, const vector<vector<KeyPoint> >& trainKeypoints,
|
||||
const vector<DMatch>& matches, const vector<string>& trainImagesNames, const string& resultDir )
|
||||
{
|
||||
cout << "< Save results..." << endl;
|
||||
Mat drawImg;
|
||||
vector<char> mask;
|
||||
for( size_t i = 0; i < trainImages.size(); i++ )
|
||||
{
|
||||
if( !trainImages[i].empty() )
|
||||
{
|
||||
maskMatchesByTrainImgIdx( matches, (int)i, mask );
|
||||
drawMatches( queryImage, queryKeypoints, trainImages[i], trainKeypoints[i],
|
||||
matches, drawImg, Scalar(255, 0, 0), Scalar(0, 255, 255), mask );
|
||||
string filename = resultDir + "/res_" + trainImagesNames[i];
|
||||
if( !imwrite( filename, drawImg ) )
|
||||
cout << "Image " << filename << " can not be saved (may be because directory " << resultDir << " does not exist)." << endl;
|
||||
}
|
||||
}
|
||||
cout << ">" << endl;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
string detectorType = defaultDetectorType;
|
||||
string descriptorType = defaultDescriptorType;
|
||||
string matcherType = defaultMatcherType;
|
||||
string queryImageName = defaultQueryImageName;
|
||||
string fileWithTrainImages = defaultFileWithTrainImages;
|
||||
string dirToSaveResImages = defaultDirToSaveResImages;
|
||||
|
||||
if( argc != 7 && argc != 1 )
|
||||
{
|
||||
printPrompt( argv[0] );
|
||||
return -1;
|
||||
}
|
||||
|
||||
if( argc != 1 )
|
||||
{
|
||||
detectorType = argv[1]; descriptorType = argv[2]; matcherType = argv[3];
|
||||
queryImageName = argv[4]; fileWithTrainImages = argv[5];
|
||||
dirToSaveResImages = argv[6];
|
||||
}
|
||||
|
||||
Ptr<FeatureDetector> featureDetector;
|
||||
Ptr<DescriptorExtractor> descriptorExtractor;
|
||||
Ptr<DescriptorMatcher> descriptorMatcher;
|
||||
if( !createDetectorDescriptorMatcher( detectorType, descriptorType, matcherType, featureDetector, descriptorExtractor, descriptorMatcher ) )
|
||||
{
|
||||
printPrompt( argv[0] );
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat queryImage;
|
||||
vector<Mat> trainImages;
|
||||
vector<string> trainImagesNames;
|
||||
if( !readImages( queryImageName, fileWithTrainImages, queryImage, trainImages, trainImagesNames ) )
|
||||
{
|
||||
printPrompt( argv[0] );
|
||||
return -1;
|
||||
}
|
||||
|
||||
vector<KeyPoint> queryKeypoints;
|
||||
vector<vector<KeyPoint> > trainKeypoints;
|
||||
detectKeypoints( queryImage, queryKeypoints, trainImages, trainKeypoints, featureDetector );
|
||||
|
||||
Mat queryDescriptors;
|
||||
vector<Mat> trainDescriptors;
|
||||
computeDescriptors( queryImage, queryKeypoints, queryDescriptors,
|
||||
trainImages, trainKeypoints, trainDescriptors,
|
||||
descriptorExtractor );
|
||||
|
||||
vector<DMatch> matches;
|
||||
matchDescriptors( queryDescriptors, trainDescriptors, matches, descriptorMatcher );
|
||||
|
||||
saveResultImages( queryImage, queryKeypoints, trainImages, trainKeypoints,
|
||||
matches, trainImagesNames, dirToSaveResImages );
|
||||
return 0;
|
||||
}
|
Before Width: | Height: | Size: 119 KiB |
Before Width: | Height: | Size: 116 KiB |
Before Width: | Height: | Size: 112 KiB |
Before Width: | Height: | Size: 118 KiB |
@@ -1,3 +0,0 @@
|
||||
1.png
|
||||
2.png
|
||||
3.png
|
@@ -1,77 +0,0 @@
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void help(char** argv)
|
||||
{
|
||||
cout << "\nDemonstrate mean-shift based color segmentation in spatial pyramid.\n"
|
||||
<< "Call:\n " << argv[0] << " image\n"
|
||||
<< "This program allows you to set the spatial and color radius\n"
|
||||
<< "of the mean shift window as well as the number of pyramid reduction levels explored\n"
|
||||
<< endl;
|
||||
}
|
||||
|
||||
//This colors the segmentations
|
||||
static void floodFillPostprocess( Mat& img, const Scalar& colorDiff=Scalar::all(1) )
|
||||
{
|
||||
CV_Assert( !img.empty() );
|
||||
RNG rng = theRNG();
|
||||
Mat mask( img.rows+2, img.cols+2, CV_8UC1, Scalar::all(0) );
|
||||
for( int y = 0; y < img.rows; y++ )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x++ )
|
||||
{
|
||||
if( mask.at<uchar>(y+1, x+1) == 0 )
|
||||
{
|
||||
Scalar newVal( rng(256), rng(256), rng(256) );
|
||||
floodFill( img, mask, Point(x,y), newVal, 0, colorDiff, colorDiff );
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
string winName = "meanshift";
|
||||
int spatialRad, colorRad, maxPyrLevel;
|
||||
Mat img, res;
|
||||
|
||||
static void meanShiftSegmentation( int, void* )
|
||||
{
|
||||
cout << "spatialRad=" << spatialRad << "; "
|
||||
<< "colorRad=" << colorRad << "; "
|
||||
<< "maxPyrLevel=" << maxPyrLevel << endl;
|
||||
pyrMeanShiftFiltering( img, res, spatialRad, colorRad, maxPyrLevel );
|
||||
floodFillPostprocess( res, Scalar::all(2) );
|
||||
imshow( winName, res );
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
if( argc !=2 )
|
||||
{
|
||||
help(argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
img = imread( argv[1] );
|
||||
if( img.empty() )
|
||||
return -1;
|
||||
|
||||
spatialRad = 10;
|
||||
colorRad = 10;
|
||||
maxPyrLevel = 1;
|
||||
|
||||
namedWindow( winName, WINDOW_AUTOSIZE );
|
||||
|
||||
createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation );
|
||||
createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation );
|
||||
createTrackbar( "maxPyrLevel", winName, &maxPyrLevel, 5, meanShiftSegmentation );
|
||||
|
||||
meanShiftSegmentation(0, 0);
|
||||
waitKey();
|
||||
return 0;
|
||||
}
|
@@ -1,4 +1,5 @@
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
204
samples/cpp/motempl.cpp
Normal file
@@ -0,0 +1,204 @@
|
||||
#include "opencv2/video/tracking_c.h"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include <time.h>
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
|
||||
static void help(void)
|
||||
{
|
||||
printf(
|
||||
"\nThis program demonstrated the use of motion templates -- basically using the gradients\n"
|
||||
"of thresholded layers of decaying frame differencing. New movements are stamped on top with floating system\n"
|
||||
"time code and motions too old are thresholded away. This is the 'motion history file'. The program reads from the camera of your choice or from\n"
|
||||
"a file. Gradients of motion history are used to detect direction of motoin etc\n"
|
||||
"Usage :\n"
|
||||
"./motempl [camera number 0-n or file name, default is camera 0]\n"
|
||||
);
|
||||
}
|
||||
// various tracking parameters (in seconds)
|
||||
const double MHI_DURATION = 1;
|
||||
const double MAX_TIME_DELTA = 0.5;
|
||||
const double MIN_TIME_DELTA = 0.05;
|
||||
// number of cyclic frame buffer used for motion detection
|
||||
// (should, probably, depend on FPS)
|
||||
const int N = 4;
|
||||
|
||||
// ring image buffer
|
||||
IplImage **buf = 0;
|
||||
int last = 0;
|
||||
|
||||
// temporary images
|
||||
IplImage *mhi = 0; // MHI
|
||||
IplImage *orient = 0; // orientation
|
||||
IplImage *mask = 0; // valid orientation mask
|
||||
IplImage *segmask = 0; // motion segmentation map
|
||||
CvMemStorage* storage = 0; // temporary storage
|
||||
|
||||
// parameters:
|
||||
// img - input video frame
|
||||
// dst - resultant motion picture
|
||||
// args - optional parameters
|
||||
static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
|
||||
{
|
||||
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
|
||||
CvSize size = cvSize(img->width,img->height); // get current frame size
|
||||
int i, idx1 = last, idx2;
|
||||
IplImage* silh;
|
||||
CvSeq* seq;
|
||||
CvRect comp_rect;
|
||||
double count;
|
||||
double angle;
|
||||
CvPoint center;
|
||||
double magnitude;
|
||||
CvScalar color;
|
||||
|
||||
// allocate images at the beginning or
|
||||
// reallocate them if the frame size is changed
|
||||
if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
|
||||
if( buf == 0 ) {
|
||||
buf = (IplImage**)malloc(N*sizeof(buf[0]));
|
||||
memset( buf, 0, N*sizeof(buf[0]));
|
||||
}
|
||||
|
||||
for( i = 0; i < N; i++ ) {
|
||||
cvReleaseImage( &buf[i] );
|
||||
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
|
||||
cvZero( buf[i] );
|
||||
}
|
||||
cvReleaseImage( &mhi );
|
||||
cvReleaseImage( &orient );
|
||||
cvReleaseImage( &segmask );
|
||||
cvReleaseImage( &mask );
|
||||
|
||||
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
cvZero( mhi ); // clear MHI at the beginning
|
||||
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
|
||||
}
|
||||
|
||||
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
|
||||
|
||||
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
|
||||
last = idx2;
|
||||
|
||||
silh = buf[idx2];
|
||||
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
|
||||
|
||||
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
|
||||
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI
|
||||
|
||||
// convert MHI to blue 8u image
|
||||
cvCvtScale( mhi, mask, 255./MHI_DURATION,
|
||||
(MHI_DURATION - timestamp)*255./MHI_DURATION );
|
||||
cvZero( dst );
|
||||
cvMerge( mask, 0, 0, 0, dst );
|
||||
|
||||
// calculate motion gradient orientation and valid orientation mask
|
||||
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
|
||||
|
||||
if( !storage )
|
||||
storage = cvCreateMemStorage(0);
|
||||
else
|
||||
cvClearMemStorage(storage);
|
||||
|
||||
// segment motion: get sequence of motion components
|
||||
// segmask is marked motion components map. It is not used further
|
||||
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );
|
||||
|
||||
// iterate through the motion components,
|
||||
// One more iteration (i == -1) corresponds to the whole image (global motion)
|
||||
for( i = -1; i < seq->total; i++ ) {
|
||||
|
||||
if( i < 0 ) { // case of the whole image
|
||||
comp_rect = cvRect( 0, 0, size.width, size.height );
|
||||
color = CV_RGB(255,255,255);
|
||||
magnitude = 100;
|
||||
}
|
||||
else { // i-th motion component
|
||||
comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
|
||||
if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
|
||||
continue;
|
||||
color = CV_RGB(255,0,0);
|
||||
magnitude = 30;
|
||||
}
|
||||
|
||||
// select component ROI
|
||||
cvSetImageROI( silh, comp_rect );
|
||||
cvSetImageROI( mhi, comp_rect );
|
||||
cvSetImageROI( orient, comp_rect );
|
||||
cvSetImageROI( mask, comp_rect );
|
||||
|
||||
// calculate orientation
|
||||
angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
|
||||
angle = 360.0 - angle; // adjust for images with top-left origin
|
||||
|
||||
count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
|
||||
|
||||
cvResetImageROI( mhi );
|
||||
cvResetImageROI( orient );
|
||||
cvResetImageROI( mask );
|
||||
cvResetImageROI( silh );
|
||||
|
||||
// check for the case of little motion
|
||||
if( count < comp_rect.width*comp_rect.height * 0.05 )
|
||||
continue;
|
||||
|
||||
// draw a clock with arrow indicating the direction
|
||||
center = cvPoint( (comp_rect.x + comp_rect.width/2),
|
||||
(comp_rect.y + comp_rect.height/2) );
|
||||
|
||||
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
|
||||
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
|
||||
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
IplImage* motion = 0;
|
||||
CvCapture* capture = 0;
|
||||
|
||||
help();
|
||||
|
||||
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
|
||||
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
|
||||
else if( argc == 2 )
|
||||
capture = cvCaptureFromFile( argv[1] );
|
||||
|
||||
if( capture )
|
||||
{
|
||||
cvNamedWindow( "Motion", 1 );
|
||||
|
||||
for(;;)
|
||||
{
|
||||
IplImage* image = cvQueryFrame( capture );
|
||||
if( !image )
|
||||
break;
|
||||
|
||||
if( !motion )
|
||||
{
|
||||
motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
|
||||
cvZero( motion );
|
||||
motion->origin = image->origin;
|
||||
}
|
||||
|
||||
update_mhi( image, motion, 30 );
|
||||
cvShowImage( "Motion", motion );
|
||||
|
||||
if( cvWaitKey(10) >= 0 )
|
||||
break;
|
||||
}
|
||||
cvReleaseCapture( &capture );
|
||||
cvDestroyWindow( "Motion" );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef _EiC
|
||||
main(1,"motempl.c");
|
||||
#endif
|
@@ -17,6 +17,7 @@
|
||||
#include <signal.h>
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include <iostream>
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
||||
|
@@ -43,6 +43,7 @@
|
||||
#include <sstream>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
|
||||
using namespace cv;
|
||||
|
@@ -1,180 +0,0 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/objdetect.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/softcascade.hpp>
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
|
||||
void filter_rects(const std::vector<cv::Rect>& candidates, std::vector<cv::Rect>& objects);
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const std::string keys =
|
||||
"{help h usage ? | | print this message and exit }"
|
||||
"{cascade c | | path to cascade xml, if empty HOG detector will be executed }"
|
||||
"{frame f | | wildchart pattern to frame source}"
|
||||
"{min_scale |0.4 | minimum scale to detect }"
|
||||
"{max_scale |5.0 | maxamum scale to detect }"
|
||||
"{total_scales |55 | prefered number of scales between min and max }"
|
||||
"{write_file wf |0 | write to .txt. Disabled by default.}"
|
||||
"{write_image wi |0 | write to image. Disabled by default.}"
|
||||
"{show_image si |1 | show image. Enabled by default.}"
|
||||
"{threshold thr |-1 | detection threshold. Detections with score less then threshold will be ignored.}"
|
||||
;
|
||||
|
||||
cv::CommandLineParser parser(argc, argv, keys);
|
||||
parser.about("Soft cascade training application.");
|
||||
|
||||
if (parser.has("help"))
|
||||
{
|
||||
parser.printMessage();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!parser.check())
|
||||
{
|
||||
parser.printErrors();
|
||||
return 1;
|
||||
}
|
||||
|
||||
int wf = parser.get<int>("write_file");
|
||||
if (wf) std::cout << "resulte will be stored to .txt file with the same name as image." << std::endl;
|
||||
|
||||
int wi = parser.get<int>("write_image");
|
||||
if (wi) std::cout << "resulte will be stored to image with the same name as input plus dt." << std::endl;
|
||||
|
||||
int si = parser.get<int>("show_image");
|
||||
|
||||
float minScale = parser.get<float>("min_scale");
|
||||
float maxScale = parser.get<float>("max_scale");
|
||||
int scales = parser.get<int>("total_scales");
|
||||
int thr = parser.get<int>("threshold");
|
||||
|
||||
cv::HOGDescriptor hog;
|
||||
cv::softcascade::Detector cascade;
|
||||
|
||||
bool useHOG = false;
|
||||
std::string cascadePath = parser.get<std::string>("cascade");
|
||||
if (cascadePath.empty())
|
||||
{
|
||||
useHOG = true;
|
||||
hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
|
||||
std::cout << "going to use HOG detector." << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::FileStorage fs(cascadePath, cv::FileStorage::READ);
|
||||
if( !fs.isOpened())
|
||||
{
|
||||
std::cout << "Soft Cascade file " << cascadePath << " can't be opened." << std::endl << std::flush;
|
||||
return 1;
|
||||
}
|
||||
|
||||
cascade = cv::softcascade::Detector(minScale, maxScale, scales, cv::softcascade::Detector::DOLLAR);
|
||||
|
||||
if (!cascade.load(fs.getFirstTopLevelNode()))
|
||||
{
|
||||
std::cout << "Soft Cascade can't be parsed." << std::endl << std::flush;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
std::string src = parser.get<std::string>("frame");
|
||||
std::vector<cv::String> frames;
|
||||
cv::glob(parser.get<std::string>("frame"), frames);
|
||||
std::cout << "collected " << src << " " << frames.size() << " frames." << std::endl;
|
||||
|
||||
for (int i = 0; i < (int)frames.size(); ++i)
|
||||
{
|
||||
std::string frame_sourse = frames[i];
|
||||
cv::Mat frame = cv::imread(frame_sourse);
|
||||
|
||||
if(frame.empty())
|
||||
{
|
||||
std::cout << "Frame source " << frame_sourse << " can't be opened." << std::endl << std::flush;
|
||||
continue;
|
||||
}
|
||||
|
||||
std::ofstream myfile;
|
||||
if (wf)
|
||||
myfile.open((frame_sourse.replace(frame_sourse.end() - 3, frame_sourse.end(), "txt")).c_str(), std::ios::out);
|
||||
|
||||
////
|
||||
if (useHOG)
|
||||
{
|
||||
std::vector<cv::Rect> found, found_filtered;
|
||||
// run the detector with default parameters. to get a higher hit-rate
|
||||
// (and more false alarms, respectively), decrease the hitThreshold and
|
||||
// groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
|
||||
hog.detectMultiScale(frame, found, 0, cv::Size(8,8), cv::Size(32,32), 1.05, 2);
|
||||
|
||||
filter_rects(found, found_filtered);
|
||||
std::cout << "collected: " << (int)found_filtered.size() << " detections." << std::endl;
|
||||
|
||||
for (size_t ff = 0; ff < found_filtered.size(); ++ff)
|
||||
{
|
||||
cv::Rect r = found_filtered[ff];
|
||||
cv::rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
|
||||
|
||||
if (wf) myfile << r.x << "," << r.y << "," << r.width << "," << r.height << "," << 0.f << "\n";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<cv::softcascade::Detection> objects;
|
||||
cascade.detect(frame, cv::noArray(), objects);
|
||||
std::cout << "collected: " << (int)objects.size() << " detections." << std::endl;
|
||||
|
||||
for (int obj = 0; obj < (int)objects.size(); ++obj)
|
||||
{
|
||||
cv::softcascade::Detection d = objects[obj];
|
||||
|
||||
if(d.confidence > thr)
|
||||
{
|
||||
float b = d.confidence * 1.5f;
|
||||
|
||||
std::stringstream conf(std::stringstream::in | std::stringstream::out);
|
||||
conf << d.confidence;
|
||||
|
||||
cv::rectangle(frame, cv::Rect((int)d.x, (int)d.y, (int)d.w, (int)d.h), cv::Scalar(b, 0, 255 - b, 255), 2);
|
||||
cv::putText(frame, conf.str() , cv::Point((int)d.x + 10, (int)d.y - 5),1, 1.1, cv::Scalar(25, 133, 255, 0), 1, cv::LINE_AA);
|
||||
|
||||
if (wf)
|
||||
myfile << d.x << "," << d.y << "," << d.w << "," << d.h << "," << d.confidence << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (wi) cv::imwrite(frame_sourse + ".dt.png", frame);
|
||||
if (wf) myfile.close();
|
||||
|
||||
if (si)
|
||||
{
|
||||
cv::imshow("pedestrian detector", frame);
|
||||
cv::waitKey(10);
|
||||
}
|
||||
}
|
||||
|
||||
if (si) cv::waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void filter_rects(const std::vector<cv::Rect>& candidates, std::vector<cv::Rect>& objects)
|
||||
{
|
||||
size_t i, j;
|
||||
for (i = 0; i < candidates.size(); ++i)
|
||||
{
|
||||
cv::Rect r = candidates[i];
|
||||
|
||||
for (j = 0; j < candidates.size(); ++j)
|
||||
if (j != i && (r & candidates[j]) == r)
|
||||
break;
|
||||
|
||||
if (j == candidates.size())
|
||||
objects.push_back(r);
|
||||
}
|
||||
}
|
@@ -1,4 +1,5 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/videoio/videoio.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
|
||||
|
@@ -12,6 +12,7 @@
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::ml;
|
||||
|
||||
const Scalar WHITE_COLOR = Scalar(255,255,255);
|
||||
const string winName = "points";
|
||||
@@ -22,18 +23,20 @@ RNG rng;
|
||||
|
||||
vector<Point> trainedPoints;
|
||||
vector<int> trainedPointsMarkers;
|
||||
vector<Scalar> classColors;
|
||||
const int MAX_CLASSES = 2;
|
||||
vector<Vec3b> classColors(MAX_CLASSES);
|
||||
int currentClass = 0;
|
||||
vector<int> classCounters(MAX_CLASSES);
|
||||
|
||||
#define _NBC_ 0 // normal Bayessian classifier
|
||||
#define _KNN_ 0 // k nearest neighbors classifier
|
||||
#define _SVM_ 0 // support vectors machine
|
||||
#define _NBC_ 1 // normal Bayessian classifier
|
||||
#define _KNN_ 1 // k nearest neighbors classifier
|
||||
#define _SVM_ 1 // support vectors machine
|
||||
#define _DT_ 1 // decision tree
|
||||
#define _BT_ 0 // ADA Boost
|
||||
#define _BT_ 1 // ADA Boost
|
||||
#define _GBT_ 0 // gradient boosted trees
|
||||
#define _RF_ 0 // random forest
|
||||
#define _ERT_ 0 // extremely randomized trees
|
||||
#define _ANN_ 0 // artificial neural networks
|
||||
#define _EM_ 0 // expectation-maximization
|
||||
#define _RF_ 1 // random forest
|
||||
#define _ANN_ 1 // artificial neural networks
|
||||
#define _EM_ 1 // expectation-maximization
|
||||
|
||||
static void on_mouse( int event, int x, int y, int /*flags*/, void* )
|
||||
{
|
||||
@@ -44,76 +47,43 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* )
|
||||
|
||||
if( event == EVENT_LBUTTONUP )
|
||||
{
|
||||
if( classColors.empty() )
|
||||
return;
|
||||
|
||||
trainedPoints.push_back( Point(x,y) );
|
||||
trainedPointsMarkers.push_back( (int)(classColors.size()-1) );
|
||||
trainedPointsMarkers.push_back( currentClass );
|
||||
classCounters[currentClass]++;
|
||||
updateFlag = true;
|
||||
}
|
||||
else if( event == EVENT_RBUTTONUP )
|
||||
{
|
||||
#if _BT_
|
||||
if( classColors.size() < 2 )
|
||||
{
|
||||
#endif
|
||||
classColors.push_back( Scalar((uchar)rng(256), (uchar)rng(256), (uchar)rng(256)) );
|
||||
updateFlag = true;
|
||||
#if _BT_
|
||||
}
|
||||
else
|
||||
cout << "New class can not be added, because CvBoost can only be used for 2-class classification" << endl;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
//draw
|
||||
if( updateFlag )
|
||||
{
|
||||
img = Scalar::all(0);
|
||||
|
||||
// put the text
|
||||
stringstream text;
|
||||
text << "current class " << classColors.size()-1;
|
||||
putText( img, text.str(), Point(10,25), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
|
||||
|
||||
text.str("");
|
||||
text << "total classes " << classColors.size();
|
||||
putText( img, text.str(), Point(10,50), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
|
||||
|
||||
text.str("");
|
||||
text << "total points " << trainedPoints.size();
|
||||
putText(img, text.str(), Point(10,75), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
|
||||
|
||||
// draw points
|
||||
for( size_t i = 0; i < trainedPoints.size(); i++ )
|
||||
circle( img, trainedPoints[i], 5, classColors[trainedPointsMarkers[i]], -1 );
|
||||
{
|
||||
Vec3b c = classColors[trainedPointsMarkers[i]];
|
||||
circle( img, trainedPoints[i], 5, Scalar(c), -1 );
|
||||
}
|
||||
|
||||
imshow( winName, img );
|
||||
}
|
||||
}
|
||||
|
||||
static void prepare_train_data( Mat& samples, Mat& classes )
|
||||
static Mat prepare_train_samples(const vector<Point>& pts)
|
||||
{
|
||||
Mat( trainedPoints ).copyTo( samples );
|
||||
Mat( trainedPointsMarkers ).copyTo( classes );
|
||||
|
||||
// reshape trainData and change its type
|
||||
samples = samples.reshape( 1, samples.rows );
|
||||
samples.convertTo( samples, CV_32FC1 );
|
||||
Mat samples;
|
||||
Mat(pts).reshape(1, (int)pts.size()).convertTo(samples, CV_32F);
|
||||
return samples;
|
||||
}
|
||||
|
||||
#if _NBC_
|
||||
static void find_decision_boundary_NBC()
|
||||
static Ptr<TrainData> prepare_train_data()
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// learn classifier
|
||||
CvNormalBayesClassifier normalBayesClassifier( trainSamples, trainClasses );
|
||||
Mat samples = prepare_train_samples(trainedPoints);
|
||||
return TrainData::create(samples, ROW_SAMPLE, Mat(trainedPointsMarkers));
|
||||
}
|
||||
|
||||
static void predict_and_paint(const Ptr<StatModel>& model, Mat& dst)
|
||||
{
|
||||
Mat testSample( 1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
@@ -122,378 +92,171 @@ static void find_decision_boundary_NBC()
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
int response = (int)normalBayesClassifier.predict( testSample );
|
||||
circle( imgDst, Point(x,y), 1, classColors[response] );
|
||||
int response = (int)model->predict( testSample );
|
||||
dst.at<Vec3b>(y, x) = classColors[response];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if _NBC_
|
||||
static void find_decision_boundary_NBC()
|
||||
{
|
||||
// learn classifier
|
||||
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data(), NormalBayesClassifier::Params());
|
||||
|
||||
predict_and_paint(normalBayesClassifier, imgDst);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if _KNN_
|
||||
static void find_decision_boundary_KNN( int K )
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// learn classifier
|
||||
#if defined HAVE_OPENCV_OCL && _OCL_KNN_
|
||||
cv::ocl::KNearestNeighbour knnClassifier;
|
||||
Mat temp, result;
|
||||
knnClassifier.train(trainSamples, trainClasses, temp, false, K);
|
||||
cv::ocl::oclMat testSample_ocl, reslut_ocl;
|
||||
#else
|
||||
CvKNearest knnClassifier( trainSamples, trainClasses, Mat(), false, K );
|
||||
#endif
|
||||
|
||||
Mat testSample( 1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
#if defined HAVE_OPENCV_OCL && _OCL_KNN_
|
||||
testSample_ocl.upload(testSample);
|
||||
|
||||
knnClassifier.find_nearest(testSample_ocl, K, reslut_ocl);
|
||||
|
||||
reslut_ocl.download(result);
|
||||
int response = saturate_cast<int>(result.at<float>(0));
|
||||
circle(imgDst, Point(x, y), 1, classColors[response]);
|
||||
#else
|
||||
|
||||
int response = (int)knnClassifier.find_nearest( testSample, K );
|
||||
circle( imgDst, Point(x,y), 1, classColors[response] );
|
||||
#endif
|
||||
}
|
||||
}
|
||||
Ptr<KNearest> knn = StatModel::train<KNearest>(prepare_train_data(), KNearest::Params(K, true));
|
||||
predict_and_paint(knn, imgDst);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _SVM_
|
||||
static void find_decision_boundary_SVM( CvSVMParams params )
|
||||
static void find_decision_boundary_SVM( SVM::Params params )
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
Ptr<SVM> svm = StatModel::train<SVM>(prepare_train_data(), params);
|
||||
predict_and_paint(svm, imgDst);
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// learn classifier
|
||||
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
|
||||
cv::ocl::CvSVM_OCL svmClassifier(trainSamples, trainClasses, Mat(), Mat(), params);
|
||||
#else
|
||||
CvSVM svmClassifier( trainSamples, trainClasses, Mat(), Mat(), params );
|
||||
#endif
|
||||
|
||||
Mat testSample( 1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
Mat sv = svm->getSupportVectors();
|
||||
for( int i = 0; i < sv.rows; i++ )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
int response = (int)svmClassifier.predict( testSample );
|
||||
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
|
||||
}
|
||||
const float* supportVector = sv.ptr<float>(i);
|
||||
circle( imgDst, Point(saturate_cast<int>(supportVector[0]),saturate_cast<int>(supportVector[1])), 5, Scalar(255,255,255), -1 );
|
||||
}
|
||||
|
||||
|
||||
for( int i = 0; i < svmClassifier.get_support_vector_count(); i++ )
|
||||
{
|
||||
const float* supportVector = svmClassifier.get_support_vector(i);
|
||||
circle( imgDst, Point(saturate_cast<int>(supportVector[0]),saturate_cast<int>(supportVector[1])), 5, CV_RGB(255,255,255), -1 );
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _DT_
|
||||
static void find_decision_boundary_DT()
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
DTrees::Params params;
|
||||
params.maxDepth = 8;
|
||||
params.minSampleCount = 2;
|
||||
params.useSurrogates = false;
|
||||
params.CVFolds = 0; // the number of cross-validation folds
|
||||
params.use1SERule = false;
|
||||
params.truncatePrunedTree = false;
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
Ptr<DTrees> dtree = StatModel::train<DTrees>(prepare_train_data(), params);
|
||||
|
||||
// learn classifier
|
||||
CvDTree dtree;
|
||||
|
||||
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
|
||||
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
|
||||
|
||||
CvDTreeParams params;
|
||||
params.max_depth = 8;
|
||||
params.min_sample_count = 2;
|
||||
params.use_surrogates = false;
|
||||
params.cv_folds = 0; // the number of cross-validation folds
|
||||
params.use_1se_rule = false;
|
||||
params.truncate_pruned_tree = false;
|
||||
|
||||
dtree.train( trainSamples, CV_ROW_SAMPLE, trainClasses,
|
||||
Mat(), Mat(), var_types, Mat(), params );
|
||||
|
||||
Mat testSample(1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
int response = (int)dtree.predict( testSample )->value;
|
||||
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
|
||||
}
|
||||
}
|
||||
predict_and_paint(dtree, imgDst);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _BT_
|
||||
void find_decision_boundary_BT()
|
||||
static void find_decision_boundary_BT()
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
Boost::Params params( Boost::DISCRETE, // boost_type
|
||||
100, // weak_count
|
||||
0.95, // weight_trim_rate
|
||||
2, // max_depth
|
||||
false, //use_surrogates
|
||||
Mat() // priors
|
||||
);
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// learn classifier
|
||||
CvBoost boost;
|
||||
|
||||
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
|
||||
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
|
||||
|
||||
CvBoostParams params( CvBoost::DISCRETE, // boost_type
|
||||
100, // weak_count
|
||||
0.95, // weight_trim_rate
|
||||
2, // max_depth
|
||||
false, //use_surrogates
|
||||
0 // priors
|
||||
);
|
||||
|
||||
boost.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), var_types, Mat(), params );
|
||||
|
||||
Mat testSample(1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
int response = (int)boost.predict( testSample );
|
||||
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
|
||||
}
|
||||
}
|
||||
Ptr<Boost> boost = StatModel::train<Boost>(prepare_train_data(), params);
|
||||
predict_and_paint(boost, imgDst);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if _GBT_
|
||||
void find_decision_boundary_GBT()
|
||||
static void find_decision_boundary_GBT()
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
GBTrees::Params params( GBTrees::DEVIANCE_LOSS, // loss_function_type
|
||||
100, // weak_count
|
||||
0.1f, // shrinkage
|
||||
1.0f, // subsample_portion
|
||||
2, // max_depth
|
||||
false // use_surrogates )
|
||||
);
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// learn classifier
|
||||
CvGBTrees gbtrees;
|
||||
|
||||
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
|
||||
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
|
||||
|
||||
CvGBTreesParams params( CvGBTrees::DEVIANCE_LOSS, // loss_function_type
|
||||
100, // weak_count
|
||||
0.1f, // shrinkage
|
||||
1.0f, // subsample_portion
|
||||
2, // max_depth
|
||||
false // use_surrogates )
|
||||
);
|
||||
|
||||
gbtrees.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), var_types, Mat(), params );
|
||||
|
||||
Mat testSample(1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
int response = (int)gbtrees.predict( testSample );
|
||||
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
|
||||
}
|
||||
}
|
||||
Ptr<GBTrees> gbtrees = StatModel::train<GBTrees>(prepare_train_data(), params);
|
||||
predict_and_paint(gbtrees, imgDst);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if _RF_
|
||||
void find_decision_boundary_RF()
|
||||
static void find_decision_boundary_RF()
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// learn classifier
|
||||
CvRTrees rtrees;
|
||||
CvRTParams params( 4, // max_depth,
|
||||
RTrees::Params params( 4, // max_depth,
|
||||
2, // min_sample_count,
|
||||
0.f, // regression_accuracy,
|
||||
false, // use_surrogates,
|
||||
16, // max_categories,
|
||||
0, // priors,
|
||||
Mat(), // priors,
|
||||
false, // calc_var_importance,
|
||||
1, // nactive_vars,
|
||||
5, // max_num_of_trees_in_the_forest,
|
||||
0, // forest_accuracy,
|
||||
CV_TERMCRIT_ITER // termcrit_type
|
||||
TermCriteria(TermCriteria::MAX_ITER, 5, 0) // max_num_of_trees_in_the_forest,
|
||||
);
|
||||
|
||||
rtrees.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), Mat(), Mat(), params );
|
||||
|
||||
Mat testSample(1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
int response = (int)rtrees.predict( testSample );
|
||||
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
|
||||
}
|
||||
}
|
||||
Ptr<RTrees> rtrees = StatModel::train<RTrees>(prepare_train_data(), params);
|
||||
predict_and_paint(rtrees, imgDst);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if _ERT_
|
||||
void find_decision_boundary_ERT()
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// learn classifier
|
||||
CvERTrees ertrees;
|
||||
|
||||
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
|
||||
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
|
||||
|
||||
CvRTParams params( 4, // max_depth,
|
||||
2, // min_sample_count,
|
||||
0.f, // regression_accuracy,
|
||||
false, // use_surrogates,
|
||||
16, // max_categories,
|
||||
0, // priors,
|
||||
false, // calc_var_importance,
|
||||
1, // nactive_vars,
|
||||
5, // max_num_of_trees_in_the_forest,
|
||||
0, // forest_accuracy,
|
||||
CV_TERMCRIT_ITER // termcrit_type
|
||||
);
|
||||
|
||||
ertrees.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), var_types, Mat(), params );
|
||||
|
||||
Mat testSample(1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
int response = (int)ertrees.predict( testSample );
|
||||
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _ANN_
|
||||
void find_decision_boundary_ANN( const Mat& layer_sizes )
|
||||
static void find_decision_boundary_ANN( const Mat& layer_sizes )
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
ANN_MLP::Params params(layer_sizes, ANN_MLP::SIGMOID_SYM, 1, 1, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON),
|
||||
ANN_MLP::Params::BACKPROP, 0.001);
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
|
||||
// prerare trainClasses
|
||||
trainClasses.create( trainedPoints.size(), classColors.size(), CV_32FC1 );
|
||||
for( int i = 0; i < trainClasses.rows; i++ )
|
||||
Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 );
|
||||
for( int i = 0; i < trainClasses.rows; i++ )
|
||||
{
|
||||
for( int k = 0; k < trainClasses.cols; k++ )
|
||||
{
|
||||
if( k == trainedPointsMarkers[i] )
|
||||
trainClasses.at<float>(i,k) = 1;
|
||||
else
|
||||
trainClasses.at<float>(i,k) = 0;
|
||||
}
|
||||
trainClasses.at<float>(i, trainedPointsMarkers[i]) = 1.f;
|
||||
}
|
||||
|
||||
Mat weights( 1, trainedPoints.size(), CV_32FC1, Scalar::all(1) );
|
||||
Mat samples = prepare_train_samples(trainedPoints);
|
||||
Ptr<TrainData> tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses);
|
||||
|
||||
// learn classifier
|
||||
CvANN_MLP ann( layer_sizes, CvANN_MLP::SIGMOID_SYM, 1, 1 );
|
||||
ann.train( trainSamples, trainClasses, weights );
|
||||
|
||||
Mat testSample( 1, 2, CV_32FC1 );
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
{
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
Mat outputs( 1, classColors.size(), CV_32FC1, testSample.data );
|
||||
ann.predict( testSample, outputs );
|
||||
Point maxLoc;
|
||||
minMaxLoc( outputs, 0, 0, 0, &maxLoc );
|
||||
circle( imgDst, Point(x,y), 2, classColors[maxLoc.x], 1 );
|
||||
}
|
||||
}
|
||||
Ptr<ANN_MLP> ann = StatModel::train<ANN_MLP>(tdata, params);
|
||||
predict_and_paint(ann, imgDst);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _EM_
|
||||
void find_decision_boundary_EM()
|
||||
static void find_decision_boundary_EM()
|
||||
{
|
||||
img.copyTo( imgDst );
|
||||
|
||||
Mat trainSamples, trainClasses;
|
||||
prepare_train_data( trainSamples, trainClasses );
|
||||
Mat samples = prepare_train_samples(trainedPoints);
|
||||
|
||||
vector<cv::EM> em_models(classColors.size());
|
||||
int i, j, nmodels = (int)classColors.size();
|
||||
vector<Ptr<EM> > em_models(nmodels);
|
||||
Mat modelSamples;
|
||||
|
||||
CV_Assert((int)trainClasses.total() == trainSamples.rows);
|
||||
CV_Assert((int)trainClasses.type() == CV_32SC1);
|
||||
|
||||
for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
|
||||
for( i = 0; i < nmodels; i++ )
|
||||
{
|
||||
const int componentCount = 3;
|
||||
em_models[modelIndex] = EM(componentCount, cv::EM::COV_MAT_DIAGONAL);
|
||||
|
||||
Mat modelSamples;
|
||||
for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
|
||||
modelSamples.release();
|
||||
for( j = 0; j < samples.rows; j++ )
|
||||
{
|
||||
if(trainClasses.at<int>(sampleIndex) == (int)modelIndex)
|
||||
modelSamples.push_back(trainSamples.row(sampleIndex));
|
||||
if( trainedPointsMarkers[j] == i )
|
||||
modelSamples.push_back(samples.row(j));
|
||||
}
|
||||
|
||||
// learn models
|
||||
if(!modelSamples.empty())
|
||||
em_models[modelIndex].train(modelSamples);
|
||||
if( !modelSamples.empty() )
|
||||
{
|
||||
em_models[i] = EM::train(modelSamples, noArray(), noArray(), noArray(),
|
||||
EM::Params(componentCount, EM::COV_MAT_DIAGONAL));
|
||||
}
|
||||
}
|
||||
|
||||
// classify coordinate plane points using the bayes classifier, i.e.
|
||||
// y(x) = arg max_i=1_modelsCount likelihoods_i(x)
|
||||
Mat testSample(1, 2, CV_32FC1 );
|
||||
Mat logLikelihoods(1, nmodels, CV_64FC1, Scalar(-DBL_MAX));
|
||||
|
||||
for( int y = 0; y < img.rows; y += testStep )
|
||||
{
|
||||
for( int x = 0; x < img.cols; x += testStep )
|
||||
@@ -501,17 +264,14 @@ void find_decision_boundary_EM()
|
||||
testSample.at<float>(0) = (float)x;
|
||||
testSample.at<float>(1) = (float)y;
|
||||
|
||||
Mat logLikelihoods(1, em_models.size(), CV_64FC1, Scalar(-DBL_MAX));
|
||||
for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
|
||||
for( i = 0; i < nmodels; i++ )
|
||||
{
|
||||
if(em_models[modelIndex].isTrained())
|
||||
logLikelihoods.at<double>(modelIndex) = em_models[modelIndex].predict(testSample)[0];
|
||||
if( !em_models[i].empty() )
|
||||
logLikelihoods.at<double>(i) = em_models[i]->predict2(testSample, noArray())[0];
|
||||
}
|
||||
Point maxLoc;
|
||||
minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc);
|
||||
|
||||
int response = maxLoc.x;
|
||||
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
|
||||
imgDst.at<Vec3b>(y, x) = classColors[maxLoc.x];
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -520,7 +280,7 @@ void find_decision_boundary_EM()
|
||||
int main()
|
||||
{
|
||||
cout << "Use:" << endl
|
||||
<< " right mouse button - to add new class;" << endl
|
||||
<< " key '0' .. '1' - switch to class #n" << endl
|
||||
<< " left mouse button - to add new point;" << endl
|
||||
<< " key 'r' - to run the ML model;" << endl
|
||||
<< " key 'i' - to init (clear) the data." << endl << endl;
|
||||
@@ -532,6 +292,9 @@ int main()
|
||||
imshow( "points", img );
|
||||
setMouseCallback( "points", on_mouse );
|
||||
|
||||
classColors[0] = Vec3b(0, 255, 0);
|
||||
classColors[1] = Vec3b(0, 0, 255);
|
||||
|
||||
for(;;)
|
||||
{
|
||||
uchar key = (uchar)waitKey();
|
||||
@@ -542,98 +305,94 @@ int main()
|
||||
{
|
||||
img = Scalar::all(0);
|
||||
|
||||
classColors.clear();
|
||||
trainedPoints.clear();
|
||||
trainedPointsMarkers.clear();
|
||||
classCounters.assign(MAX_CLASSES, 0);
|
||||
|
||||
imshow( winName, img );
|
||||
}
|
||||
|
||||
if( key == '0' || key == '1' )
|
||||
{
|
||||
currentClass = key - '0';
|
||||
}
|
||||
|
||||
if( key == 'r' ) // run
|
||||
{
|
||||
double minVal = 0;
|
||||
minMaxLoc(classCounters, &minVal, 0, 0, 0);
|
||||
if( minVal == 0 )
|
||||
{
|
||||
printf("each class should have at least 1 point\n");
|
||||
continue;
|
||||
}
|
||||
img.copyTo( imgDst );
|
||||
#if _NBC_
|
||||
find_decision_boundary_NBC();
|
||||
namedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE );
|
||||
imshow( "NormalBayesClassifier", imgDst );
|
||||
#endif
|
||||
#if _KNN_
|
||||
int K = 3;
|
||||
find_decision_boundary_KNN( K );
|
||||
namedWindow( "kNN", WINDOW_AUTOSIZE );
|
||||
imshow( "kNN", imgDst );
|
||||
|
||||
K = 15;
|
||||
find_decision_boundary_KNN( K );
|
||||
namedWindow( "kNN2", WINDOW_AUTOSIZE );
|
||||
imshow( "kNN2", imgDst );
|
||||
#endif
|
||||
|
||||
#if _SVM_
|
||||
//(1)-(2)separable and not sets
|
||||
CvSVMParams params;
|
||||
params.svm_type = CvSVM::C_SVC;
|
||||
params.kernel_type = CvSVM::POLY; //CvSVM::LINEAR;
|
||||
SVM::Params params;
|
||||
params.svmType = SVM::C_SVC;
|
||||
params.kernelType = SVM::POLY; //CvSVM::LINEAR;
|
||||
params.degree = 0.5;
|
||||
params.gamma = 1;
|
||||
params.coef0 = 1;
|
||||
params.C = 1;
|
||||
params.nu = 0.5;
|
||||
params.p = 0;
|
||||
params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 1000, 0.01);
|
||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01);
|
||||
|
||||
find_decision_boundary_SVM( params );
|
||||
namedWindow( "classificationSVM1", WINDOW_AUTOSIZE );
|
||||
imshow( "classificationSVM1", imgDst );
|
||||
|
||||
params.C = 10;
|
||||
find_decision_boundary_SVM( params );
|
||||
namedWindow( "classificationSVM2", WINDOW_AUTOSIZE );
|
||||
imshow( "classificationSVM2", imgDst );
|
||||
#endif
|
||||
|
||||
#if _DT_
|
||||
find_decision_boundary_DT();
|
||||
namedWindow( "DT", WINDOW_AUTOSIZE );
|
||||
imshow( "DT", imgDst );
|
||||
#endif
|
||||
|
||||
#if _BT_
|
||||
find_decision_boundary_BT();
|
||||
namedWindow( "BT", WINDOW_AUTOSIZE );
|
||||
imshow( "BT", imgDst);
|
||||
#endif
|
||||
|
||||
#if _GBT_
|
||||
find_decision_boundary_GBT();
|
||||
namedWindow( "GBT", WINDOW_AUTOSIZE );
|
||||
imshow( "GBT", imgDst);
|
||||
#endif
|
||||
|
||||
#if _RF_
|
||||
find_decision_boundary_RF();
|
||||
namedWindow( "RF", WINDOW_AUTOSIZE );
|
||||
imshow( "RF", imgDst);
|
||||
#endif
|
||||
|
||||
#if _ERT_
|
||||
find_decision_boundary_ERT();
|
||||
namedWindow( "ERT", WINDOW_AUTOSIZE );
|
||||
imshow( "ERT", imgDst);
|
||||
#endif
|
||||
|
||||
#if _ANN_
|
||||
Mat layer_sizes1( 1, 3, CV_32SC1 );
|
||||
layer_sizes1.at<int>(0) = 2;
|
||||
layer_sizes1.at<int>(1) = 5;
|
||||
layer_sizes1.at<int>(2) = classColors.size();
|
||||
layer_sizes1.at<int>(2) = (int)classColors.size();
|
||||
find_decision_boundary_ANN( layer_sizes1 );
|
||||
namedWindow( "ANN", WINDOW_AUTOSIZE );
|
||||
imshow( "ANN", imgDst );
|
||||
#endif
|
||||
|
||||
#if _EM_
|
||||
find_decision_boundary_EM();
|
||||
namedWindow( "EM", WINDOW_AUTOSIZE );
|
||||
imshow( "EM", imgDst );
|
||||
#endif
|
||||
}
|
||||
|
86
samples/cpp/polar_transforms.cpp
Normal file
@@ -0,0 +1,86 @@
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
|
||||
static void help( void )
|
||||
{
|
||||
printf("\nThis program illustrates Linear-Polar and Log-Polar image transforms\n"
|
||||
"Usage :\n"
|
||||
"./polar_transforms [[camera number -- Default 0],[AVI path_filename]]\n\n"
|
||||
);
|
||||
}
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
CvCapture* capture = 0;
|
||||
IplImage* log_polar_img = 0;
|
||||
IplImage* lin_polar_img = 0;
|
||||
IplImage* recovered_img = 0;
|
||||
|
||||
help();
|
||||
|
||||
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
|
||||
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
|
||||
else if( argc == 2 )
|
||||
capture = cvCaptureFromAVI( argv[1] );
|
||||
if( !capture )
|
||||
{
|
||||
fprintf(stderr,"Could not initialize capturing...\n");
|
||||
fprintf(stderr,"Usage: %s <CAMERA_NUMBER> , or \n %s <VIDEO_FILE>\n",argv[0],argv[0]);
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
cvNamedWindow( "Linear-Polar", 0 );
|
||||
cvNamedWindow( "Log-Polar", 0 );
|
||||
cvNamedWindow( "Recovered image", 0 );
|
||||
|
||||
cvMoveWindow( "Linear-Polar", 20,20 );
|
||||
cvMoveWindow( "Log-Polar", 700,20 );
|
||||
cvMoveWindow( "Recovered image", 20,700 );
|
||||
|
||||
for(;;)
|
||||
{
|
||||
IplImage* frame = 0;
|
||||
|
||||
frame = cvQueryFrame( capture );
|
||||
if( !frame )
|
||||
break;
|
||||
|
||||
if( !log_polar_img )
|
||||
{
|
||||
log_polar_img = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_8U, frame->nChannels );
|
||||
lin_polar_img = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_8U, frame->nChannels );
|
||||
recovered_img = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_8U, frame->nChannels );
|
||||
}
|
||||
|
||||
cvLogPolar(frame,log_polar_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS);
|
||||
cvLinearPolar(frame,lin_polar_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS);
|
||||
|
||||
#if 0
|
||||
cvLogPolar(log_polar_img,recovered_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_WARP_INVERSE_MAP+CV_INTER_LINEAR);
|
||||
#else
|
||||
cvLinearPolar(lin_polar_img,recovered_img,cvPoint2D32f(frame->width >> 1,frame->height >> 1),70, CV_WARP_INVERSE_MAP+CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS);
|
||||
#endif
|
||||
|
||||
cvShowImage("Log-Polar", log_polar_img );
|
||||
cvShowImage("Linear-Polar", lin_polar_img );
|
||||
cvShowImage("Recovered image", recovered_img );
|
||||
|
||||
if( cvWaitKey(10) >= 0 )
|
||||
break;
|
||||
}
|
||||
|
||||
cvReleaseCapture( &capture );
|
||||
cvDestroyWindow("Linear-Polar");
|
||||
cvDestroyWindow("Log-Polar");
|
||||
cvDestroyWindow("Recovered image");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef _EiC
|
||||
main(1,"laplace.c");
|
||||
#endif
|
BIN
samples/cpp/puzzle.png
Normal file
After Width: | Height: | Size: 631 KiB |
@@ -1,178 +0,0 @@
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
#include <ctime>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static
|
||||
void cvtDepth2Cloud( const Mat& depth, Mat& cloud, const Mat& cameraMatrix )
|
||||
{
|
||||
const float inv_fx = 1.f/cameraMatrix.at<float>(0,0);
|
||||
const float inv_fy = 1.f/cameraMatrix.at<float>(1,1);
|
||||
const float ox = cameraMatrix.at<float>(0,2);
|
||||
const float oy = cameraMatrix.at<float>(1,2);
|
||||
cloud.create( depth.size(), CV_32FC3 );
|
||||
for( int y = 0; y < cloud.rows; y++ )
|
||||
{
|
||||
Point3f* cloud_ptr = (Point3f*)cloud.ptr(y);
|
||||
const float* depth_prt = (const float*) depth.ptr(y);
|
||||
for( int x = 0; x < cloud.cols; x++ )
|
||||
{
|
||||
float z = depth_prt[x];
|
||||
cloud_ptr[x].x = (x - ox) * z * inv_fx;
|
||||
cloud_ptr[x].y = (y - oy) * z * inv_fy;
|
||||
cloud_ptr[x].z = z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class ImageElemType>
|
||||
static void warpImage( const Mat& image, const Mat& depth,
|
||||
const Mat& Rt, const Mat& cameraMatrix, const Mat& distCoeff,
|
||||
Mat& warpedImage )
|
||||
{
|
||||
const Rect rect = Rect(0, 0, image.cols, image.rows);
|
||||
|
||||
vector<Point2f> points2d;
|
||||
Mat cloud, transformedCloud;
|
||||
|
||||
cvtDepth2Cloud( depth, cloud, cameraMatrix );
|
||||
perspectiveTransform( cloud, transformedCloud, Rt );
|
||||
projectPoints( transformedCloud.reshape(3,1), Mat::eye(3,3,CV_64FC1), Mat::zeros(3,1,CV_64FC1), cameraMatrix, distCoeff, points2d );
|
||||
|
||||
Mat pointsPositions( points2d );
|
||||
pointsPositions = pointsPositions.reshape( 2, image.rows );
|
||||
|
||||
warpedImage.create( image.size(), image.type() );
|
||||
warpedImage = Scalar::all(0);
|
||||
|
||||
Mat zBuffer( image.size(), CV_32FC1, FLT_MAX );
|
||||
for( int y = 0; y < image.rows; y++ )
|
||||
{
|
||||
for( int x = 0; x < image.cols; x++ )
|
||||
{
|
||||
const Point3f p3d = transformedCloud.at<Point3f>(y,x);
|
||||
const Point p2d = pointsPositions.at<Point2f>(y,x);
|
||||
if( !cvIsNaN(cloud.at<Point3f>(y,x).z) && cloud.at<Point3f>(y,x).z > 0 &&
|
||||
rect.contains(p2d) && zBuffer.at<float>(p2d) > p3d.z )
|
||||
{
|
||||
warpedImage.at<ImageElemType>(p2d) = image.at<ImageElemType>(y,x);
|
||||
zBuffer.at<float>(p2d) = p3d.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
float vals[] = {525., 0., 3.1950000000000000e+02,
|
||||
0., 525., 2.3950000000000000e+02,
|
||||
0., 0., 1.};
|
||||
|
||||
const Mat cameraMatrix = Mat(3,3,CV_32FC1,vals);
|
||||
const Mat distCoeff(1,5,CV_32FC1,Scalar(0));
|
||||
|
||||
if( argc != 5 && argc != 6 )
|
||||
{
|
||||
cout << "Format: image0 depth0 image1 depth1 [transformationType]" << endl;
|
||||
cout << "Depth file must be 16U image stored depth in mm." << endl;
|
||||
cout << "Transformation types:" << endl;
|
||||
cout << " -rbm - rigid body motion (default)" << endl;
|
||||
cout << " -r - rotation rotation only" << endl;
|
||||
cout << " -t - translation only" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat colorImage0 = imread( argv[1] );
|
||||
Mat depth0 = imread( argv[2], -1 );
|
||||
|
||||
Mat colorImage1 = imread( argv[3] );
|
||||
Mat depth1 = imread( argv[4], -1 );
|
||||
|
||||
if( colorImage0.empty() || depth0.empty() || colorImage1.empty() || depth1.empty() )
|
||||
{
|
||||
cout << "Data (rgb or depth images) is empty.";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int transformationType = RIGID_BODY_MOTION;
|
||||
if( argc == 6 )
|
||||
{
|
||||
string ttype = argv[5];
|
||||
if( ttype == "-rbm" )
|
||||
{
|
||||
transformationType = RIGID_BODY_MOTION;
|
||||
}
|
||||
else if ( ttype == "-r")
|
||||
{
|
||||
transformationType = ROTATION;
|
||||
}
|
||||
else if ( ttype == "-t")
|
||||
{
|
||||
transformationType = TRANSLATION;
|
||||
}
|
||||
else
|
||||
{
|
||||
cout << "Unsupported transformation type." << endl;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
Mat grayImage0, grayImage1, depthFlt0, depthFlt1/*in meters*/;
|
||||
cvtColor( colorImage0, grayImage0, COLOR_BGR2GRAY );
|
||||
cvtColor( colorImage1, grayImage1, COLOR_BGR2GRAY );
|
||||
depth0.convertTo( depthFlt0, CV_32FC1, 1./1000 );
|
||||
depth1.convertTo( depthFlt1, CV_32FC1, 1./1000 );
|
||||
|
||||
TickMeter tm;
|
||||
Mat Rt;
|
||||
|
||||
vector<int> iterCounts(4);
|
||||
iterCounts[0] = 7;
|
||||
iterCounts[1] = 7;
|
||||
iterCounts[2] = 7;
|
||||
iterCounts[3] = 10;
|
||||
|
||||
vector<float> minGradMagnitudes(4);
|
||||
minGradMagnitudes[0] = 12;
|
||||
minGradMagnitudes[1] = 5;
|
||||
minGradMagnitudes[2] = 3;
|
||||
minGradMagnitudes[3] = 1;
|
||||
|
||||
const float minDepth = 0.f; //in meters
|
||||
const float maxDepth = 4.f; //in meters
|
||||
const float maxDepthDiff = 0.07f; //in meters
|
||||
|
||||
tm.start();
|
||||
bool isFound = cv::RGBDOdometry( Rt, Mat(),
|
||||
grayImage0, depthFlt0, Mat(),
|
||||
grayImage1, depthFlt1, Mat(),
|
||||
cameraMatrix, minDepth, maxDepth, maxDepthDiff,
|
||||
iterCounts, minGradMagnitudes, transformationType );
|
||||
tm.stop();
|
||||
|
||||
cout << "Rt = " << Rt << endl;
|
||||
cout << "Time = " << tm.getTimeSec() << " sec." << endl;
|
||||
|
||||
if( !isFound )
|
||||
{
|
||||
cout << "Rigid body motion cann't be estimated for given RGBD data." << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat warpedImage0;
|
||||
warpImage<Point3_<uchar> >( colorImage0, depthFlt0, Rt, cameraMatrix, distCoeff, warpedImage0 );
|
||||
|
||||
imshow( "image0", colorImage0 );
|
||||
imshow( "warped_image0", warpedImage0 );
|
||||
imshow( "image1", colorImage1 );
|
||||
waitKey();
|
||||
|
||||
return 0;
|
||||
}
|
Before Width: | Height: | Size: 102 KiB |
Before Width: | Height: | Size: 100 KiB |