Merge with master and fix conflicts

This commit is contained in:
mshabunin
2014-08-13 18:24:11 +04:00
778 changed files with 40820 additions and 452611 deletions

View File

@@ -4,6 +4,7 @@
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>

View File

@@ -4,8 +4,9 @@
# ----------------------------------------------------------------------------
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree
opencv_features2d opencv_calib3d opencv_stitching opencv_videostab opencv_shape)
opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
opencv_objdetect opencv_photo opencv_features2d opencv_calib3d
opencv_stitching opencv_videostab opencv_shape)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
@@ -54,14 +55,14 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
set(the_target "${sample_kind}_${name}")
add_executable(${the_target} ${srcs})
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
ocv_target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
if("${srcs}" MATCHES "gpu/")
target_link_libraries(${the_target} opencv_cudaarithm opencv_cudafilters)
ocv_target_link_libraries(${the_target} opencv_cudaarithm opencv_cudafilters)
endif()
if(HAVE_opencv_ocl)
target_link_libraries(${the_target} opencv_ocl)
ocv_target_link_libraries(${the_target} opencv_ocl)
endif()
set_target_properties(${the_target} PROPERTIES

11
samples/cpp/H1to3p.xml Executable file
View File

@@ -0,0 +1,11 @@
<?xml version="1.0"?>
<opencv_storage>
<H13 type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
7.6285898e-01 -2.9922929e-01 2.2567123e+02
3.3443473e-01 1.0143901e+00 -7.6999973e+01
3.4663091e-04 -1.4364524e-05 1.0000000e+00 </data></H13>
</opencv_storage>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,80 +0,0 @@
/*
* FGBGTest.cpp
*
* Created on: May 7, 2012
* Author: Andrew B. Godbehere
*/
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"
#include <opencv2/core/utility.hpp>
#include <iostream>
using namespace cv;
static void help()
{
std::cout <<
"\nA program demonstrating the use and capabilities of a particular BackgroundSubtraction\n"
"algorithm described in A. Godbehere, A. Matsukawa, K. Goldberg, \n"
"\"Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive\n"
"Audio Art Installation\", American Control Conference, 2012, used in an interactive\n"
"installation at the Contemporary Jewish Museum in San Francisco, CA from March 31 through\n"
"July 31, 2011.\n"
"Call:\n"
"./BackgroundSubtractorGMG_sample\n"
"Using OpenCV version " << CV_VERSION << "\n"<<std::endl;
}
int main(int argc, char** argv)
{
help();
initModule_video();
setUseOptimized(true);
setNumThreads(8);
Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
if (!fgbg)
{
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
return -1;
}
VideoCapture cap;
if (argc > 1)
cap.open(argv[1]);
else
cap.open(0);
if (!cap.isOpened())
{
std::cerr << "Cannot read video. Try moving video file to sample directory." << std::endl;
return -1;
}
Mat frame, fgmask, segm;
namedWindow("FG Segmentation", WINDOW_NORMAL);
for (;;)
{
cap >> frame;
if (frame.empty())
break;
fgbg->apply(frame, fgmask);
frame.convertTo(segm, CV_8U, 0.5);
add(frame, Scalar(100, 100, 0), segm, fgmask);
imshow("FG Segmentation", segm);
int c = waitKey(30);
if (c == 'q' || c == 'Q' || (c & 255) == 27)
break;
}
return 0;
}

View File

@@ -2,6 +2,7 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>
@@ -20,6 +21,8 @@ static void help()
const char* keys =
{
"{c camera | | use camera or not}"
"{m method |mog2 | method (knn or mog2) }"
"{s smooth | | smooth the mask }"
"{fn file_name|tree.avi | movie file }"
};
@@ -30,7 +33,9 @@ int main(int argc, const char** argv)
CommandLineParser parser(argc, argv, keys);
bool useCamera = parser.has("camera");
bool smoothMask = parser.has("smooth");
string file = parser.get<string>("file_name");
string method = parser.get<string>("method");
VideoCapture cap;
bool update_bg_model = true;
@@ -52,24 +57,31 @@ int main(int argc, const char** argv)
namedWindow("foreground image", WINDOW_NORMAL);
namedWindow("mean background image", WINDOW_NORMAL);
Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
Ptr<BackgroundSubtractor> bg_model = method == "knn" ?
createBackgroundSubtractorKNN().dynamicCast<BackgroundSubtractor>() :
createBackgroundSubtractorMOG2().dynamicCast<BackgroundSubtractor>();
Mat img, fgmask, fgimg;
Mat img0, img, fgmask, fgimg;
for(;;)
{
cap >> img;
cap >> img0;
if( img.empty() )
if( img0.empty() )
break;
//cvtColor(_img, img, COLOR_BGR2GRAY);
resize(img0, img, Size(640, 640*img0.rows/img0.cols), INTER_LINEAR);
if( fgimg.empty() )
fgimg.create(img.size(), img.type());
//update the model
bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
if( smoothMask )
{
GaussianBlur(fgmask, fgmask, Size(11, 11), 3.5, 3.5);
threshold(fgmask, fgmask, 10, 255, THRESH_BINARY);
}
fgimg = Scalar::all(0);
img.copyTo(fgimg, fgmask);

View File

@@ -2,6 +2,8 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <cctype>

View File

@@ -1,6 +1,7 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@@ -23,6 +23,7 @@
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>

View File

@@ -33,6 +33,7 @@
#include <signal.h>
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>

View File

@@ -1,5 +1,6 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@@ -10,6 +10,7 @@
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>

View File

@@ -2,6 +2,7 @@
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/videoio/videoio.hpp>
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/objdetect/objdetect.hpp>

View File

@@ -1,5 +1,6 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@@ -1,304 +0,0 @@
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void help(char** argv)
{
cout << "\nThis program demonstrats keypoint finding and matching between 2 images using features2d framework.\n"
<< " In one case, the 2nd image is synthesized by homography from the first, in the second case, there are 2 images\n"
<< "\n"
<< "Case1: second image is obtained from the first (given) image using random generated homography matrix\n"
<< argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image] [evaluate(0 or 1)]\n"
<< "Example of case1:\n"
<< "./descriptor_extractor_matcher SURF SURF FlannBased NoneFilter cola.jpg 0\n"
<< "\n"
<< "Case2: both images are given. If ransacReprojThreshold>=0 then homography matrix are calculated\n"
<< argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image1] [image2] [ransacReprojThreshold]\n"
<< "\n"
<< "Matches are filtered using homography matrix in case1 and case2 (if ransacReprojThreshold>=0)\n"
<< "Example of case2:\n"
<< "./descriptor_extractor_matcher SURF SURF BruteForce CrossCheckFilter cola1.jpg cola2.jpg 3\n"
<< "\n"
<< "Possible detectorType values: see in documentation on createFeatureDetector().\n"
<< "Possible descriptorType values: see in documentation on createDescriptorExtractor().\n"
<< "Possible matcherType values: see in documentation on createDescriptorMatcher().\n"
<< "Possible matcherFilterType values: NoneFilter, CrossCheckFilter." << endl;
}
#define DRAW_RICH_KEYPOINTS_MODE 0
#define DRAW_OUTLIERS_MODE 0
const string winName = "correspondences";
enum { NONE_FILTER = 0, CROSS_CHECK_FILTER = 1 };
static int getMatcherFilterType( const string& str )
{
if( str == "NoneFilter" )
return NONE_FILTER;
if( str == "CrossCheckFilter" )
return CROSS_CHECK_FILTER;
CV_Error(Error::StsBadArg, "Invalid filter name");
return -1;
}
static void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& matches12 )
{
vector<DMatch> matches;
descriptorMatcher->match( descriptors1, descriptors2, matches12 );
}
static void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& filteredMatches12, int knn=1 )
{
filteredMatches12.clear();
vector<vector<DMatch> > matches12, matches21;
descriptorMatcher->knnMatch( descriptors1, descriptors2, matches12, knn );
descriptorMatcher->knnMatch( descriptors2, descriptors1, matches21, knn );
for( size_t m = 0; m < matches12.size(); m++ )
{
bool findCrossCheck = false;
for( size_t fk = 0; fk < matches12[m].size(); fk++ )
{
DMatch forward = matches12[m][fk];
for( size_t bk = 0; bk < matches21[forward.trainIdx].size(); bk++ )
{
DMatch backward = matches21[forward.trainIdx][bk];
if( backward.trainIdx == forward.queryIdx )
{
filteredMatches12.push_back(forward);
findCrossCheck = true;
break;
}
}
if( findCrossCheck ) break;
}
}
}
static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
{
H.create(3, 3, CV_32FC1);
H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
H.at<float>(0,1) = rng.uniform(-0.1f, 0.1f);
H.at<float>(0,2) = rng.uniform(-0.1f, 0.1f)*src.cols;
H.at<float>(1,0) = rng.uniform(-0.1f, 0.1f);
H.at<float>(1,1) = rng.uniform( 0.8f, 1.2f);
H.at<float>(1,2) = rng.uniform(-0.1f, 0.1f)*src.rows;
H.at<float>(2,0) = rng.uniform( -1e-4f, 1e-4f);
H.at<float>(2,1) = rng.uniform( -1e-4f, 1e-4f);
H.at<float>(2,2) = rng.uniform( 0.8f, 1.2f);
warpPerspective( src, dst, H, src.size() );
}
static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
vector<KeyPoint>& keypoints1, const Mat& descriptors1,
Ptr<FeatureDetector>& detector, Ptr<DescriptorExtractor>& descriptorExtractor,
Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter, bool eval,
double ransacReprojThreshold, RNG& rng )
{
CV_Assert( !img1.empty() );
Mat H12;
if( isWarpPerspective )
warpPerspectiveRand(img1, img2, H12, rng );
else
CV_Assert( !img2.empty()/* && img2.cols==img1.cols && img2.rows==img1.rows*/ );
cout << endl << "< Extracting keypoints from second image..." << endl;
vector<KeyPoint> keypoints2;
detector->detect( img2, keypoints2 );
cout << keypoints2.size() << " points" << endl << ">" << endl;
if( !H12.empty() && eval )
{
cout << "< Evaluate feature detector..." << endl;
float repeatability;
int correspCount;
evaluateFeatureDetector( img1, img2, H12, &keypoints1, &keypoints2, repeatability, correspCount );
cout << "repeatability = " << repeatability << endl;
cout << "correspCount = " << correspCount << endl;
cout << ">" << endl;
}
cout << "< Computing descriptors for keypoints from second image..." << endl;
Mat descriptors2;
descriptorExtractor->compute( img2, keypoints2, descriptors2 );
cout << ">" << endl;
cout << "< Matching descriptors..." << endl;
vector<DMatch> filteredMatches;
switch( matcherFilter )
{
case CROSS_CHECK_FILTER :
crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );
break;
default :
simpleMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches );
}
cout << ">" << endl;
if( !H12.empty() && eval )
{
cout << "< Evaluate descriptor matcher..." << endl;
vector<Point2f> curve;
Ptr<GenericDescriptorMatcher> gdm = makePtr<VectorDescriptorMatcher>( descriptorExtractor, descriptorMatcher );
evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
Point2f firstPoint = *curve.begin();
Point2f lastPoint = *curve.rbegin();
int prevPointIndex = -1;
cout << "1-precision = " << firstPoint.x << "; recall = " << firstPoint.y << endl;
for( float l_p = 0; l_p <= 1 + FLT_EPSILON; l_p+=0.05f )
{
int nearest = getNearestPoint( curve, l_p );
if( nearest >= 0 )
{
Point2f curPoint = curve[nearest];
if( curPoint.x > firstPoint.x && curPoint.x < lastPoint.x && nearest != prevPointIndex )
{
cout << "1-precision = " << curPoint.x << "; recall = " << curPoint.y << endl;
prevPointIndex = nearest;
}
}
}
cout << "1-precision = " << lastPoint.x << "; recall = " << lastPoint.y << endl;
cout << ">" << endl;
}
vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() );
for( size_t i = 0; i < filteredMatches.size(); i++ )
{
queryIdxs[i] = filteredMatches[i].queryIdx;
trainIdxs[i] = filteredMatches[i].trainIdx;
}
if( !isWarpPerspective && ransacReprojThreshold >= 0 )
{
cout << "< Computing homography (RANSAC)..." << endl;
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
H12 = findHomography( Mat(points1), Mat(points2), RANSAC, ransacReprojThreshold );
cout << ">" << endl;
}
Mat drawImg;
if( !H12.empty() ) // filter outliers
{
vector<char> matchesMask( filteredMatches.size(), 0 );
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
double maxInlierDist = ransacReprojThreshold < 0 ? 3 : ransacReprojThreshold;
for( size_t i1 = 0; i1 < points1.size(); i1++ )
{
if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
matchesMask[i1] = 1;
}
// draw inliers
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask
#if DRAW_RICH_KEYPOINTS_MODE
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
#endif
);
#if DRAW_OUTLIERS_MODE
// draw outliers
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
matchesMask[i1] = !matchesMask[i1];
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(255, 0, 0), Scalar(0, 0, 255), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif
cout << "Number of inliers: " << countNonZero(matchesMask) << endl;
}
else
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg );
imshow( winName, drawImg );
}
int main(int argc, char** argv)
{
if( argc != 7 && argc != 8 )
{
help(argv);
return -1;
}
cv::initModule_nonfree();
bool isWarpPerspective = argc == 7;
double ransacReprojThreshold = -1;
if( !isWarpPerspective )
ransacReprojThreshold = atof(argv[7]);
cout << "< Creating detector, descriptor extractor and descriptor matcher ..." << endl;
Ptr<FeatureDetector> detector = FeatureDetector::create( argv[1] );
Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( argv[2] );
Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( argv[3] );
int mactherFilterType = getMatcherFilterType( argv[4] );
bool eval = !isWarpPerspective ? false : (atoi(argv[6]) == 0 ? false : true);
cout << ">" << endl;
if( !detector || !descriptorExtractor || !descriptorMatcher )
{
cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
return -1;
}
cout << "< Reading the images..." << endl;
Mat img1 = imread( argv[5] ), img2;
if( !isWarpPerspective )
img2 = imread( argv[6] );
cout << ">" << endl;
if( img1.empty() || (!isWarpPerspective && img2.empty()) )
{
cout << "Can not read images" << endl;
return -1;
}
cout << endl << "< Extracting keypoints from first image..." << endl;
vector<KeyPoint> keypoints1;
detector->detect( img1, keypoints1 );
cout << keypoints1.size() << " points" << endl << ">" << endl;
cout << "< Computing descriptors for keypoints from first image..." << endl;
Mat descriptors1;
descriptorExtractor->compute( img1, keypoints1, descriptors1 );
cout << ">" << endl;
namedWindow(winName, 1);
RNG rng = theRNG();
doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
ransacReprojThreshold, rng );
for(;;)
{
char c = (char)waitKey(0);
if( c == '\x1b' ) // esc
{
cout << "Exiting ..." << endl;
break;
}
else if( isWarpPerspective )
{
doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
ransacReprojThreshold, rng );
}
}
return 0;
}

View File

@@ -1,6 +1,7 @@
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>

View File

@@ -1,5 +1,6 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>

View File

@@ -1,5 +1,6 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>

View File

@@ -2,6 +2,7 @@
#include "opencv2/ml.hpp"
using namespace cv;
using namespace cv::ml;
int main( int /*argc*/, char** /*argv*/ )
{
@@ -34,8 +35,9 @@ int main( int /*argc*/, char** /*argv*/ )
samples = samples.reshape(1, 0);
// cluster the data
EM em_model(N, EM::COV_MAT_SPHERICAL, TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1));
em_model.train( samples, noArray(), labels, noArray() );
Ptr<EM> em_model = EM::train( samples, noArray(), labels, noArray(),
EM::Params(N, EM::COV_MAT_SPHERICAL,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)));
// classify every image pixel
for( i = 0; i < img.rows; i++ )
@@ -44,7 +46,7 @@ int main( int /*argc*/, char** /*argv*/ )
{
sample.at<float>(0) = (float)j;
sample.at<float>(1) = (float)i;
int response = cvRound(em_model.predict( sample )[1]);
int response = cvRound(em_model->predict2( sample, noArray() )[1]);
Scalar c = colors[response];
circle( img, Point(j, i), 1, c*0.75, FILLED );

View File

@@ -1,8 +1,11 @@
#include "opencv2/objdetect.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/videoio/videoio_c.h"
#include "opencv2/highgui/highgui_c.h"
#include <cctype>

View File

@@ -1,5 +1,6 @@
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
@@ -36,7 +37,8 @@ int main(int, char**)
if( !cap.isOpened() )
return -1;
Mat prevgray, gray, flow, cflow, frame;
Mat flow, cflow, frame;
UMat gray, prevgray, uflow;
namedWindow("flow", 1);
for(;;)
@@ -44,10 +46,11 @@ int main(int, char**)
cap >> frame;
cvtColor(frame, gray, COLOR_BGR2GRAY);
if( prevgray.data )
if( !prevgray.empty() )
{
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
calcOpticalFlowFarneback(prevgray, gray, uflow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvtColor(prevgray, cflow, COLOR_GRAY2BGR);
uflow.copyTo(flow);
drawOptFlowMap(flow, cflow, 16, 1.5, Scalar(0, 255, 0));
imshow("flow", cflow);
}

View File

@@ -1,4 +1,6 @@
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -15,6 +15,7 @@
*
********************************************************************************/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;

View File

@@ -1,3 +1,4 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

BIN
samples/cpp/graf1.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 929 KiB

BIN
samples/cpp/graf3.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 953 KiB

View File

@@ -1,3 +1,4 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

View File

@@ -1,3 +1,4 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

View File

@@ -22,6 +22,7 @@ static void help()
#ifdef DEMO_MIXED_API_USE
# include <opencv2/highgui/highgui_c.h>
# include <opencv2/imgcodecs/imgcodecs_c.h>
#endif
int main( int argc, char** argv )

View File

@@ -13,6 +13,7 @@
* Authors: G. Evangelidis, INRIA, Grenoble, France
* M. Asbach, Fraunhofer IAIS, St. Augustin, Germany
*/
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include <opencv2/imgproc.hpp>

View File

@@ -1,4 +1,5 @@
#include <opencv2/core/core.hpp>
#include <opencv2/videoio/videoio.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>

View File

@@ -2,6 +2,7 @@
*/
#include "opencv2/core/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <string>
#include <iostream>

View File

@@ -1,3 +1,4 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/photo/photo.hpp"

View File

@@ -1,6 +1,7 @@
// testOpenCVCam.cpp : Defines the entry point for the console application.
//
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -1,3 +1,4 @@
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

File diff suppressed because it is too large Load Diff

View File

@@ -1,704 +0,0 @@
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc/imgproc_c.h> // cvFindContours
#include <opencv2/imgproc.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/highgui.hpp>
#include <iterator>
#include <set>
#include <cstdio>
#include <iostream>
// Function prototypes
void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f);
std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& mask, cv::Mat& dst);
void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& dst);
void drawResponse(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Mat& dst, cv::Point offset, int T);
cv::Mat displayQuantized(const cv::Mat& quantized);
// Copy of cv_mouse from cv_utilities
class Mouse
{
public:
static void start(const std::string& a_img_name)
{
cv::setMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
}
static int event(void)
{
int l_event = m_event;
m_event = -1;
return l_event;
}
static int x(void)
{
return m_x;
}
static int y(void)
{
return m_y;
}
private:
static void cv_on_mouse(int a_event, int a_x, int a_y, int, void *)
{
m_event = a_event;
m_x = a_x;
m_y = a_y;
}
static int m_event;
static int m_x;
static int m_y;
};
int Mouse::m_event;
int Mouse::m_x;
int Mouse::m_y;
static void help()
{
printf("Usage: openni_demo [templates.yml]\n\n"
"Place your object on a planar, featureless surface. With the mouse,\n"
"frame it in the 'color' window and right click to learn a first template.\n"
"Then press 'l' to enter online learning mode, and move the camera around.\n"
"When the match score falls between 90-95%% the demo will add a new template.\n\n"
"Keys:\n"
"\t h -- This help page\n"
"\t l -- Toggle online learning\n"
"\t m -- Toggle printing match result\n"
"\t t -- Toggle printing timings\n"
"\t w -- Write learned templates to disk\n"
"\t [ ] -- Adjust matching threshold: '[' down, ']' up\n"
"\t q -- Quit\n\n");
}
// Adapted from cv_timer in cv_utilities
class Timer
{
public:
Timer() : start_(0), time_(0) {}
void start()
{
start_ = cv::getTickCount();
}
void stop()
{
CV_Assert(start_ != 0);
int64 end = cv::getTickCount();
time_ += end - start_;
start_ = 0;
}
double time()
{
double ret = time_ / cv::getTickFrequency();
time_ = 0;
return ret;
}
private:
int64 start_, time_;
};
// Functions to store detector and templates in single XML/YAML file
static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
{
cv::Ptr<cv::linemod::Detector> detector = cv::makePtr<cv::linemod::Detector>();
cv::FileStorage fs(filename, cv::FileStorage::READ);
detector->read(fs.root());
cv::FileNode fn = fs["classes"];
for (cv::FileNodeIterator i = fn.begin(), iend = fn.end(); i != iend; ++i)
detector->readClass(*i);
return detector;
}
static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
{
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
detector->write(fs);
std::vector<cv::String> ids = detector->classIds();
fs << "classes" << "[";
for (int i = 0; i < (int)ids.size(); ++i)
{
fs << "{";
detector->writeClass(ids[i], fs);
fs << "}"; // current class
}
fs << "]"; // classes
}
int main(int argc, char * argv[])
{
// Various settings and flags
bool show_match_result = true;
bool show_timings = false;
bool learn_online = false;
int num_classes = 0;
int matching_threshold = 80;
/// @todo Keys for changing these?
cv::Size roi_size(200, 200);
int learning_lower_bound = 90;
int learning_upper_bound = 95;
// Timers
Timer extract_timer;
Timer match_timer;
// Initialize HighGUI
help();
cv::namedWindow("color");
cv::namedWindow("normals");
Mouse::start("color");
// Initialize LINEMOD data structures
cv::Ptr<cv::linemod::Detector> detector;
std::string filename;
if (argc == 1)
{
filename = "linemod_templates.yml";
detector = cv::linemod::getDefaultLINEMOD();
}
else
{
detector = readLinemod(argv[1]);
std::vector<cv::String> ids = detector->classIds();
num_classes = detector->numClasses();
printf("Loaded %s with %d classes and %d templates\n",
argv[1], num_classes, detector->numTemplates());
if (!ids.empty())
{
printf("Class ids:\n");
std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
}
}
int num_modalities = (int)detector->getModalities().size();
// Open Kinect sensor
cv::VideoCapture capture( cv::CAP_OPENNI );
if (!capture.isOpened())
{
printf("Could not open OpenNI-capable sensor\n");
return -1;
}
capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1);
double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
//printf("Focal length = %f\n", focal_length);
// Main loop
cv::Mat color, depth;
for(;;)
{
// Capture next color/depth pair
capture.grab();
capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP);
capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE);
std::vector<cv::Mat> sources;
sources.push_back(color);
sources.push_back(depth);
cv::Mat display = color.clone();
if (!learn_online)
{
cv::Point mouse(Mouse::x(), Mouse::y());
int event = Mouse::event();
// Compute ROI centered on current mouse location
cv::Point roi_offset(roi_size.width / 2, roi_size.height / 2);
cv::Point pt1 = mouse - roi_offset; // top left
cv::Point pt2 = mouse + roi_offset; // bottom right
if (event == cv::EVENT_RBUTTONDOWN)
{
// Compute object mask by subtracting the plane within the ROI
std::vector<CvPoint> chain(4);
chain[0] = pt1;
chain[1] = cv::Point(pt2.x, pt1.y);
chain[2] = pt2;
chain[3] = cv::Point(pt1.x, pt2.y);
cv::Mat mask;
subtractPlane(depth, mask, chain, focal_length);
cv::imshow("mask", mask);
// Extract template
std::string class_id = cv::format("class%d", num_classes);
cv::Rect bb;
extract_timer.start();
int template_id = detector->addTemplate(sources, class_id, mask, &bb);
extract_timer.stop();
if (template_id != -1)
{
printf("*** Added template (id %d) for new object class %d***\n",
template_id, num_classes);
//printf("Extracted at (%d, %d) size %dx%d\n", bb.x, bb.y, bb.width, bb.height);
}
++num_classes;
}
// Draw ROI for display
cv::rectangle(display, pt1, pt2, CV_RGB(0,0,0), 3);
cv::rectangle(display, pt1, pt2, CV_RGB(255,255,0), 1);
}
// Perform matching
std::vector<cv::linemod::Match> matches;
std::vector<cv::String> class_ids;
std::vector<cv::Mat> quantized_images;
match_timer.start();
detector->match(sources, (float)matching_threshold, matches, class_ids, quantized_images);
match_timer.stop();
int classes_visited = 0;
std::set<std::string> visited;
for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
{
cv::linemod::Match m = matches[i];
if (visited.insert(m.class_id).second)
{
++classes_visited;
if (show_match_result)
{
printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
}
// Draw matching template
const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
if (learn_online == true)
{
/// @todo Online learning possibly broken by new gradient feature extraction,
/// which assumes an accurate object outline.
// Compute masks based on convex hull of matched template
cv::Mat color_mask, depth_mask;
std::vector<CvPoint> chain = maskFromTemplate(templates, num_modalities,
cv::Point(m.x, m.y), color.size(),
color_mask, display);
subtractPlane(depth, depth_mask, chain, focal_length);
cv::imshow("mask", depth_mask);
// If pretty sure (but not TOO sure), add new template
if (learning_lower_bound < m.similarity && m.similarity < learning_upper_bound)
{
extract_timer.start();
int template_id = detector->addTemplate(sources, m.class_id, depth_mask);
extract_timer.stop();
if (template_id != -1)
{
printf("*** Added template (id %d) for existing object class %s***\n",
template_id, m.class_id.c_str());
}
}
}
}
}
if (show_match_result && matches.empty())
printf("No matches found...\n");
if (show_timings)
{
printf("Training: %.2fs\n", extract_timer.time());
printf("Matching: %.2fs\n", match_timer.time());
}
if (show_match_result || show_timings)
printf("------------------------------------------------------------\n");
cv::imshow("color", display);
cv::imshow("normals", quantized_images[1]);
cv::FileStorage fs;
char key = (char)cv::waitKey(10);
if( key == 'q' )
break;
switch (key)
{
case 'h':
help();
break;
case 'm':
// toggle printing match result
show_match_result = !show_match_result;
printf("Show match result %s\n", show_match_result ? "ON" : "OFF");
break;
case 't':
// toggle printing timings
show_timings = !show_timings;
printf("Show timings %s\n", show_timings ? "ON" : "OFF");
break;
case 'l':
// toggle online learning
learn_online = !learn_online;
printf("Online learning %s\n", learn_online ? "ON" : "OFF");
break;
case '[':
// decrement threshold
matching_threshold = std::max(matching_threshold - 1, -100);
printf("New threshold: %d\n", matching_threshold);
break;
case ']':
// increment threshold
matching_threshold = std::min(matching_threshold + 1, +100);
printf("New threshold: %d\n", matching_threshold);
break;
case 'w':
// write model to disk
writeLinemod(detector, filename);
printf("Wrote detector and templates to %s\n", filename.c_str());
break;
default:
;
}
}
return 0;
}
static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
{
real.resize(proj.size());
double f_inv = 1.0 / f;
for (int i = 0; i < (int)proj.size(); ++i)
{
double Z = proj[i].z;
real[i].x = (proj[i].x - 320.) * (f_inv * Z);
real[i].y = (proj[i].y - 240.) * (f_inv * Z);
real[i].z = Z;
}
}
static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
{
const int l_num_cost_pts = 200;
float l_thres = 4;
IplImage * lp_mask = cvCreateImage(cvGetSize(ap_depth), IPL_DEPTH_8U, 1);
cvSet(lp_mask, cvRealScalar(0));
std::vector<CvPoint> l_chain_vector;
float l_chain_length = 0;
float * lp_seg_length = new float[a_chain.size()];
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
{
float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
l_chain_length += lp_seg_length[l_i];
}
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
{
if (lp_seg_length[l_i] > 0)
{
int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
float l_cur_len = lp_seg_length[l_i] / l_cur_num;
for (int l_j = 0; l_j < l_cur_num; ++l_j)
{
float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);
CvPoint l_pts;
l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
l_chain_vector.push_back(l_pts);
}
}
}
std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
{
lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
lp_src_3Dpts[l_i].z = CV_IMAGE_ELEM(ap_depth, unsigned short, cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].x));
//CV_IMAGE_ELEM(lp_mask,unsigned char,(int)lp_src_3Dpts[l_i].Y,(int)lp_src_3Dpts[l_i].X)=255;
}
//cv_show_image(lp_mask,"hallo2");
reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);
CvMat * lp_pts = cvCreateMat((int)l_chain_vector.size(), 4, CV_32F);
CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
CvMat * lp_w = cvCreateMat(4, 1, CV_32F);
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
{
CV_MAT_ELEM(*lp_pts, float, l_i, 0) = (float)lp_src_3Dpts[l_i].x;
CV_MAT_ELEM(*lp_pts, float, l_i, 1) = (float)lp_src_3Dpts[l_i].y;
CV_MAT_ELEM(*lp_pts, float, l_i, 2) = (float)lp_src_3Dpts[l_i].z;
CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0f;
}
cvSVD(lp_pts, lp_w, 0, lp_v);
float l_n[4] = {CV_MAT_ELEM(*lp_v, float, 0, 3),
CV_MAT_ELEM(*lp_v, float, 1, 3),
CV_MAT_ELEM(*lp_v, float, 2, 3),
CV_MAT_ELEM(*lp_v, float, 3, 3)};
float l_norm = sqrt(l_n[0] * l_n[0] + l_n[1] * l_n[1] + l_n[2] * l_n[2]);
l_n[0] /= l_norm;
l_n[1] /= l_norm;
l_n[2] /= l_norm;
l_n[3] /= l_norm;
float l_max_dist = 0;
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
{
float l_dist = l_n[0] * CV_MAT_ELEM(*lp_pts, float, l_i, 0) +
l_n[1] * CV_MAT_ELEM(*lp_pts, float, l_i, 1) +
l_n[2] * CV_MAT_ELEM(*lp_pts, float, l_i, 2) +
l_n[3] * CV_MAT_ELEM(*lp_pts, float, l_i, 3);
if (fabs(l_dist) > l_max_dist)
l_max_dist = l_dist;
}
//std::cerr << "plane: " << l_n[0] << ";" << l_n[1] << ";" << l_n[2] << ";" << l_n[3] << " maxdist: " << l_max_dist << " end" << std::endl;
int l_minx = ap_depth->width;
int l_miny = ap_depth->height;
int l_maxx = 0;
int l_maxy = 0;
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
{
l_minx = std::min(l_minx, a_chain[l_i].x);
l_miny = std::min(l_miny, a_chain[l_i].y);
l_maxx = std::max(l_maxx, a_chain[l_i].x);
l_maxy = std::max(l_maxy, a_chain[l_i].y);
}
int l_w = l_maxx - l_minx + 1;
int l_h = l_maxy - l_miny + 1;
int l_nn = (int)a_chain.size();
CvPoint * lp_chain = new CvPoint[l_nn];
for (int l_i = 0; l_i < l_nn; ++l_i)
lp_chain[l_i] = a_chain[l_i];
cvFillPoly(lp_mask, &lp_chain, &l_nn, 1, cvScalar(255, 255, 255));
delete[] lp_chain;
//cv_show_image(lp_mask,"hallo1");
std::vector<cv::Point3d> lp_dst_3Dpts(l_h * l_w);
int l_ind = 0;
for (int l_r = 0; l_r < l_h; ++l_r)
{
for (int l_c = 0; l_c < l_w; ++l_c)
{
lp_dst_3Dpts[l_ind].x = l_c + l_minx;
lp_dst_3Dpts[l_ind].y = l_r + l_miny;
lp_dst_3Dpts[l_ind].z = CV_IMAGE_ELEM(ap_depth, unsigned short, l_r + l_miny, l_c + l_minx);
++l_ind;
}
}
reprojectPoints(lp_dst_3Dpts, lp_dst_3Dpts, f);
l_ind = 0;
for (int l_r = 0; l_r < l_h; ++l_r)
{
for (int l_c = 0; l_c < l_w; ++l_c)
{
float l_dist = (float)(l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
++l_ind;
if (CV_IMAGE_ELEM(lp_mask, unsigned char, l_r + l_miny, l_c + l_minx) != 0)
{
if (fabs(l_dist) < std::max(l_thres, (l_max_dist * 2.0f)))
{
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
{
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 0;
}
}
else
{
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
{
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 255;
}
}
}
}
}
cvReleaseImage(&lp_mask);
cvReleaseMat(&lp_pts);
cvReleaseMat(&lp_w);
cvReleaseMat(&lp_v);
}
void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f)
{
mask = cv::Mat::zeros(depth.size(), CV_8U);
std::vector<IplImage*> tmp;
IplImage mask_ipl = mask;
tmp.push_back(&mask_ipl);
IplImage depth_ipl = depth;
filterPlane(&depth_ipl, tmp, chain, f);
}
std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& mask, cv::Mat& dst)
{
templateConvexHull(templates, num_modalities, offset, size, mask);
const int OFFSET = 30;
cv::dilate(mask, mask, cv::Mat(), cv::Point(-1,-1), OFFSET);
CvMemStorage * lp_storage = cvCreateMemStorage(0);
CvTreeNodeIterator l_iterator;
CvSeqReader l_reader;
CvSeq * lp_contour = 0;
cv::Mat mask_copy = mask.clone();
IplImage mask_copy_ipl = mask_copy;
cvFindContours(&mask_copy_ipl, lp_storage, &lp_contour, sizeof(CvContour),
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
std::vector<CvPoint> l_pts1; // to use as input to cv_primesensor::filter_plane
cvInitTreeNodeIterator(&l_iterator, lp_contour, 1);
while ((lp_contour = (CvSeq *)cvNextTreeNode(&l_iterator)) != 0)
{
CvPoint l_pt0;
cvStartReadSeq(lp_contour, &l_reader, 0);
CV_READ_SEQ_ELEM(l_pt0, l_reader);
l_pts1.push_back(l_pt0);
for (int i = 0; i < lp_contour->total; ++i)
{
CvPoint l_pt1;
CV_READ_SEQ_ELEM(l_pt1, l_reader);
/// @todo Really need dst at all? Can just as well do this outside
cv::line(dst, l_pt0, l_pt1, CV_RGB(0, 255, 0), 2);
l_pt0 = l_pt1;
l_pts1.push_back(l_pt0);
}
}
cvReleaseMemStorage(&lp_storage);
return l_pts1;
}
// Adapted from cv_show_angles
cv::Mat displayQuantized(const cv::Mat& quantized)
{
cv::Mat color(quantized.size(), CV_8UC3);
for (int r = 0; r < quantized.rows; ++r)
{
const uchar* quant_r = quantized.ptr(r);
cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
for (int c = 0; c < quantized.cols; ++c)
{
cv::Vec3b& bgr = color_r[c];
switch (quant_r[c])
{
case 0: bgr[0]= 0; bgr[1]= 0; bgr[2]= 0; break;
case 1: bgr[0]= 55; bgr[1]= 55; bgr[2]= 55; break;
case 2: bgr[0]= 80; bgr[1]= 80; bgr[2]= 80; break;
case 4: bgr[0]=105; bgr[1]=105; bgr[2]=105; break;
case 8: bgr[0]=130; bgr[1]=130; bgr[2]=130; break;
case 16: bgr[0]=155; bgr[1]=155; bgr[2]=155; break;
case 32: bgr[0]=180; bgr[1]=180; bgr[2]=180; break;
case 64: bgr[0]=205; bgr[1]=205; bgr[2]=205; break;
case 128: bgr[0]=230; bgr[1]=230; bgr[2]=230; break;
case 255: bgr[0]= 0; bgr[1]= 0; bgr[2]=255; break;
default: bgr[0]= 0; bgr[1]=255; bgr[2]= 0; break;
}
}
}
return color;
}
// Adapted from cv_line_template::convex_hull
void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& dst)
{
std::vector<cv::Point> points;
for (int m = 0; m < num_modalities; ++m)
{
for (int i = 0; i < (int)templates[m].features.size(); ++i)
{
cv::linemod::Feature f = templates[m].features[i];
points.push_back(cv::Point(f.x, f.y) + offset);
}
}
std::vector<cv::Point> hull;
cv::convexHull(points, hull);
dst = cv::Mat::zeros(size, CV_8U);
const int hull_count = (int)hull.size();
const cv::Point* hull_pts = &hull[0];
cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
}
void drawResponse(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Mat& dst, cv::Point offset, int T)
{
static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
CV_RGB(0, 255, 0),
CV_RGB(255, 255, 0),
CV_RGB(255, 140, 0),
CV_RGB(255, 0, 0) };
for (int m = 0; m < num_modalities; ++m)
{
// NOTE: Original demo recalculated max response for each feature in the TxT
// box around it and chose the display color based on that response. Here
// the display color just depends on the modality.
cv::Scalar color = COLORS[m];
for (int i = 0; i < (int)templates[m].features.size(); ++i)
{
cv::linemod::Feature f = templates[m].features[i];
cv::Point pt(f.x + offset.x, f.y + offset.y);
cv::circle(dst, pt, T / 2, color);
}
}
}

View File

@@ -1,5 +1,6 @@
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -4,6 +4,7 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;

View File

@@ -1,4 +1,5 @@
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -1,203 +0,0 @@
#include "opencv2/video/tracking_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui_c.h"
#include <time.h>
#include <stdio.h>
#include <ctype.h>
static void help(void)
{
printf(
"\nThis program demonstrated the use of motion templates -- basically using the gradients\n"
"of thresholded layers of decaying frame differencing. New movements are stamped on top with floating system\n"
"time code and motions too old are thresholded away. This is the 'motion history file'. The program reads from the camera of your choice or from\n"
"a file. Gradients of motion history are used to detect direction of motoin etc\n"
"Usage :\n"
"./motempl [camera number 0-n or file name, default is camera 0]\n"
);
}
// various tracking parameters (in seconds)
const double MHI_DURATION = 1;
const double MAX_TIME_DELTA = 0.5;
const double MIN_TIME_DELTA = 0.05;
// number of cyclic frame buffer used for motion detection
// (should, probably, depend on FPS)
const int N = 4;
// ring image buffer
IplImage **buf = 0;
int last = 0;
// temporary images
IplImage *mhi = 0; // MHI
IplImage *orient = 0; // orientation
IplImage *mask = 0; // valid orientation mask
IplImage *segmask = 0; // motion segmentation map
CvMemStorage* storage = 0; // temporary storage
// parameters:
// img - input video frame
// dst - resultant motion picture
// args - optional parameters
static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
CvSize size = cvSize(img->width,img->height); // get current frame size
int i, idx1 = last, idx2;
IplImage* silh;
CvSeq* seq;
CvRect comp_rect;
double count;
double angle;
CvPoint center;
double magnitude;
CvScalar color;
// allocate images at the beginning or
// reallocate them if the frame size is changed
if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
if( buf == 0 ) {
buf = (IplImage**)malloc(N*sizeof(buf[0]));
memset( buf, 0, N*sizeof(buf[0]));
}
for( i = 0; i < N; i++ ) {
cvReleaseImage( &buf[i] );
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
cvZero( buf[i] );
}
cvReleaseImage( &mhi );
cvReleaseImage( &orient );
cvReleaseImage( &segmask );
cvReleaseImage( &mask );
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
cvZero( mhi ); // clear MHI at the beginning
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
}
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
last = idx2;
silh = buf[idx2];
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI
// convert MHI to blue 8u image
cvCvtScale( mhi, mask, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION );
cvZero( dst );
cvMerge( mask, 0, 0, 0, dst );
// calculate motion gradient orientation and valid orientation mask
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
if( !storage )
storage = cvCreateMemStorage(0);
else
cvClearMemStorage(storage);
// segment motion: get sequence of motion components
// segmask is marked motion components map. It is not used further
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );
// iterate through the motion components,
// One more iteration (i == -1) corresponds to the whole image (global motion)
for( i = -1; i < seq->total; i++ ) {
if( i < 0 ) { // case of the whole image
comp_rect = cvRect( 0, 0, size.width, size.height );
color = CV_RGB(255,255,255);
magnitude = 100;
}
else { // i-th motion component
comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
continue;
color = CV_RGB(255,0,0);
magnitude = 30;
}
// select component ROI
cvSetImageROI( silh, comp_rect );
cvSetImageROI( mhi, comp_rect );
cvSetImageROI( orient, comp_rect );
cvSetImageROI( mask, comp_rect );
// calculate orientation
angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
angle = 360.0 - angle; // adjust for images with top-left origin
count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
cvResetImageROI( mhi );
cvResetImageROI( orient );
cvResetImageROI( mask );
cvResetImageROI( silh );
// check for the case of little motion
if( count < comp_rect.width*comp_rect.height * 0.05 )
continue;
// draw a clock with arrow indicating the direction
center = cvPoint( (comp_rect.x + comp_rect.width/2),
(comp_rect.y + comp_rect.height/2) );
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
}
}
int main(int argc, char** argv)
{
IplImage* motion = 0;
CvCapture* capture = 0;
help();
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
else if( argc == 2 )
capture = cvCaptureFromFile( argv[1] );
if( capture )
{
cvNamedWindow( "Motion", 1 );
for(;;)
{
IplImage* image = cvQueryFrame( capture );
if( !image )
break;
if( !motion )
{
motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
cvZero( motion );
motion->origin = image->origin;
}
update_mhi( image, motion, 30 );
cvShowImage( "Motion", motion );
if( cvWaitKey(10) >= 0 )
break;
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Motion" );
}
return 0;
}
#ifdef _EiC
main(1,"motempl.c");
#endif

View File

@@ -1,322 +0,0 @@
#include "opencv2/core/core_c.h"
#include "opencv2/ml/ml.hpp"
#include <stdio.h>
static void help()
{
printf("\nThis program demonstrated the use of OpenCV's decision tree function for learning and predicting data\n"
"Usage :\n"
"./mushroom <path to agaricus-lepiota.data>\n"
"\n"
"The sample demonstrates how to build a decision tree for classifying mushrooms.\n"
"It uses the sample base agaricus-lepiota.data from UCI Repository, here is the link:\n"
"\n"
"Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n"
"UCI Repository of machine learning databases\n"
"[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n"
"Irvine, CA: University of California, Department of Information and Computer Science.\n"
"\n"
"// loads the mushroom database, which is a text file, containing\n"
"// one training sample per row, all the input variables and the output variable are categorical,\n"
"// the values are encoded by characters.\n\n");
}
static int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
{
const int M = 1024;
FILE* f = fopen( filename, "rt" );
CvMemStorage* storage;
CvSeq* seq;
char buf[M+2], *ptr;
float* el_ptr;
CvSeqReader reader;
int i, j, var_count = 0;
if( !f )
return 0;
// read the first line and determine the number of variables
if( !fgets( buf, M, f ))
{
fclose(f);
return 0;
}
for( ptr = buf; *ptr != '\0'; ptr++ )
var_count += *ptr == ',';
assert( ptr - buf == (var_count+1)*2 );
// create temporary memory storage to store the whole database
el_ptr = new float[var_count+1];
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof(*seq), (var_count+1)*sizeof(float), storage );
for(;;)
{
for( i = 0; i <= var_count; i++ )
{
int c = buf[i*2];
el_ptr[i] = c == '?' ? -1.f : (float)c;
}
if( i != var_count+1 )
break;
cvSeqPush( seq, el_ptr );
if( !fgets( buf, M, f ) || !strchr( buf, ',' ) )
break;
}
fclose(f);
// allocate the output matrices and copy the base there
*data = cvCreateMat( seq->total, var_count, CV_32F );
*missing = cvCreateMat( seq->total, var_count, CV_8U );
*responses = cvCreateMat( seq->total, 1, CV_32F );
cvStartReadSeq( seq, &reader );
for( i = 0; i < seq->total; i++ )
{
const float* sdata = (float*)reader.ptr + 1;
float* ddata = data[0]->data.fl + var_count*i;
float* dr = responses[0]->data.fl + i;
uchar* dm = missing[0]->data.ptr + var_count*i;
for( j = 0; j < var_count; j++ )
{
ddata[j] = sdata[j];
dm[j] = sdata[j] < 0;
}
*dr = sdata[-1];
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
cvReleaseMemStorage( &storage );
delete [] el_ptr;
return 1;
}
static CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
const CvMat* responses, float p_weight )
{
CvDTree* dtree;
CvMat* var_type;
int i, hr1 = 0, hr2 = 0, p_total = 0;
float priors[] = { 1, p_weight };
var_type = cvCreateMat( data->cols + 1, 1, CV_8U );
cvSet( var_type, cvScalarAll(CV_VAR_CATEGORICAL) ); // all the variables are categorical
dtree = new CvDTree;
dtree->train( data, CV_ROW_SAMPLE, responses, 0, 0, var_type, missing,
CvDTreeParams( 8, // max depth
10, // min sample count
0, // regression accuracy: N/A here
true, // compute surrogate split, as we have missing data
15, // max number of categories (use sub-optimal algorithm for larger numbers)
10, // the number of cross-validation folds
true, // use 1SE rule => smaller tree
true, // throw away the pruned tree branches
priors // the array of priors, the bigger p_weight, the more attention
// to the poisonous mushrooms
// (a mushroom will be judjed to be poisonous with bigger chance)
));
// compute hit-rate on the training database, demonstrates predict usage.
for( i = 0; i < data->rows; i++ )
{
CvMat sample, mask;
cvGetRow( data, &sample, i );
cvGetRow( missing, &mask, i );
double r = dtree->predict( &sample, &mask )->value;
int d = fabs(r - responses->data.fl[i]) >= FLT_EPSILON;
if( d )
{
if( r != 'p' )
hr1++;
else
hr2++;
}
p_total += responses->data.fl[i] == 'p';
}
printf( "Results on the training database:\n"
"\tPoisonous mushrooms mis-predicted: %d (%g%%)\n"
"\tFalse-alarms: %d (%g%%)\n", hr1, (double)hr1*100/p_total,
hr2, (double)hr2*100/(data->rows - p_total) );
cvReleaseMat( &var_type );
return dtree;
}
static const char* var_desc[] =
{
"cap shape (bell=b,conical=c,convex=x,flat=f)",
"cap surface (fibrous=f,grooves=g,scaly=y,smooth=s)",
"cap color (brown=n,buff=b,cinnamon=c,gray=g,green=r,\n\tpink=p,purple=u,red=e,white=w,yellow=y)",
"bruises? (bruises=t,no=f)",
"odor (almond=a,anise=l,creosote=c,fishy=y,foul=f,\n\tmusty=m,none=n,pungent=p,spicy=s)",
"gill attachment (attached=a,descending=d,free=f,notched=n)",
"gill spacing (close=c,crowded=w,distant=d)",
"gill size (broad=b,narrow=n)",
"gill color (black=k,brown=n,buff=b,chocolate=h,gray=g,\n\tgreen=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y)",
"stalk shape (enlarging=e,tapering=t)",
"stalk root (bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r)",
"stalk surface above ring (ibrous=f,scaly=y,silky=k,smooth=s)",
"stalk surface below ring (ibrous=f,scaly=y,silky=k,smooth=s)",
"stalk color above ring (brown=n,buff=b,cinnamon=c,gray=g,orange=o,\n\tpink=p,red=e,white=w,yellow=y)",
"stalk color below ring (brown=n,buff=b,cinnamon=c,gray=g,orange=o,\n\tpink=p,red=e,white=w,yellow=y)",
"veil type (partial=p,universal=u)",
"veil color (brown=n,orange=o,white=w,yellow=y)",
"ring number (none=n,one=o,two=t)",
"ring type (cobwebby=c,evanescent=e,flaring=f,large=l,\n\tnone=n,pendant=p,sheathing=s,zone=z)",
"spore print color (black=k,brown=n,buff=b,chocolate=h,green=r,\n\torange=o,purple=u,white=w,yellow=y)",
"population (abundant=a,clustered=c,numerous=n,\n\tscattered=s,several=v,solitary=y)",
"habitat (grasses=g,leaves=l,meadows=m,paths=p\n\turban=u,waste=w,woods=d)",
0
};
static void print_variable_importance( CvDTree* dtree )
{
const CvMat* var_importance = dtree->get_var_importance();
int i;
char input[1000];
if( !var_importance )
{
printf( "Error: Variable importance can not be retrieved\n" );
return;
}
printf( "Print variable importance information? (y/n) " );
int values_read = scanf( "%1s", input );
CV_Assert(values_read == 1);
if( input[0] != 'y' && input[0] != 'Y' )
return;
for( i = 0; i < var_importance->cols*var_importance->rows; i++ )
{
double val = var_importance->data.db[i];
char buf[100];
int len = (int)(strchr( var_desc[i], '(' ) - var_desc[i] - 1);
strncpy( buf, var_desc[i], len );
buf[len] = '\0';
printf( "%s", buf );
printf( ": %g%%\n", val*100. );
}
}
static void interactive_classification( CvDTree* dtree )
{
char input[1000];
const CvDTreeNode* root;
CvDTreeTrainData* data;
if( !dtree )
return;
root = dtree->get_root();
data = dtree->get_data();
for(;;)
{
const CvDTreeNode* node;
printf( "Start/Proceed with interactive mushroom classification (y/n): " );
int values_read = scanf( "%1s", input );
CV_Assert(values_read == 1);
if( input[0] != 'y' && input[0] != 'Y' )
break;
printf( "Enter 1-letter answers, '?' for missing/unknown value...\n" );
// custom version of predict
node = root;
for(;;)
{
CvDTreeSplit* split = node->split;
int dir = 0;
if( !node->left || node->Tn <= dtree->get_pruned_tree_idx() || !node->split )
break;
for( ; split != 0; )
{
int vi = split->var_idx, j;
int count = data->cat_count->data.i[vi];
const int* map = data->cat_map->data.i + data->cat_ofs->data.i[vi];
printf( "%s: ", var_desc[vi] );
values_read = scanf( "%1s", input );
CV_Assert(values_read == 1);
if( input[0] == '?' )
{
split = split->next;
continue;
}
// convert the input character to the normalized value of the variable
for( j = 0; j < count; j++ )
if( map[j] == input[0] )
break;
if( j < count )
{
dir = (split->subset[j>>5] & (1 << (j&31))) ? -1 : 1;
if( split->inversed )
dir = -dir;
break;
}
else
printf( "Error: unrecognized value\n" );
}
if( !dir )
{
printf( "Impossible to classify the sample\n");
node = 0;
break;
}
node = dir < 0 ? node->left : node->right;
}
if( node )
printf( "Prediction result: the mushroom is %s\n",
node->class_idx == 0 ? "EDIBLE" : "POISONOUS" );
printf( "\n-----------------------------\n" );
}
}
int main( int argc, char** argv )
{
CvMat *data = 0, *missing = 0, *responses = 0;
CvDTree* dtree;
const char* base_path = argc >= 2 ? argv[1] : "agaricus-lepiota.data";
help();
if( !mushroom_read_database( base_path, &data, &missing, &responses ) )
{
printf( "\nUnable to load the training database\n\n");
help();
return -1;
}
dtree = mushroom_create_dtree( data, missing, responses,
10 // poisonous mushrooms will have 10x higher weight in the decision tree
);
cvReleaseMat( &data );
cvReleaseMat( &missing );
cvReleaseMat( &responses );
print_variable_importance( dtree );
interactive_classification( dtree );
delete dtree;
return 0;
}

View File

@@ -17,6 +17,7 @@
#include <signal.h>
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>

View File

@@ -1,3 +1,4 @@
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

View File

@@ -43,6 +43,7 @@
#include <sstream>
#include <opencv2/core/core.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

View File

@@ -1,4 +1,5 @@
#include "opencv2/core/core.hpp"
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

View File

@@ -12,6 +12,7 @@
using namespace std;
using namespace cv;
using namespace cv::ml;
const Scalar WHITE_COLOR = Scalar(255,255,255);
const string winName = "points";
@@ -22,18 +23,20 @@ RNG rng;
vector<Point> trainedPoints;
vector<int> trainedPointsMarkers;
vector<Scalar> classColors;
const int MAX_CLASSES = 2;
vector<Vec3b> classColors(MAX_CLASSES);
int currentClass = 0;
vector<int> classCounters(MAX_CLASSES);
#define _NBC_ 0 // normal Bayessian classifier
#define _KNN_ 0 // k nearest neighbors classifier
#define _SVM_ 0 // support vectors machine
#define _NBC_ 1 // normal Bayessian classifier
#define _KNN_ 1 // k nearest neighbors classifier
#define _SVM_ 1 // support vectors machine
#define _DT_ 1 // decision tree
#define _BT_ 0 // ADA Boost
#define _BT_ 1 // ADA Boost
#define _GBT_ 0 // gradient boosted trees
#define _RF_ 0 // random forest
#define _ERT_ 0 // extremely randomized trees
#define _ANN_ 0 // artificial neural networks
#define _EM_ 0 // expectation-maximization
#define _RF_ 1 // random forest
#define _ANN_ 1 // artificial neural networks
#define _EM_ 1 // expectation-maximization
static void on_mouse( int event, int x, int y, int /*flags*/, void* )
{
@@ -44,76 +47,43 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* )
if( event == EVENT_LBUTTONUP )
{
if( classColors.empty() )
return;
trainedPoints.push_back( Point(x,y) );
trainedPointsMarkers.push_back( (int)(classColors.size()-1) );
trainedPointsMarkers.push_back( currentClass );
classCounters[currentClass]++;
updateFlag = true;
}
else if( event == EVENT_RBUTTONUP )
{
#if _BT_
if( classColors.size() < 2 )
{
#endif
classColors.push_back( Scalar((uchar)rng(256), (uchar)rng(256), (uchar)rng(256)) );
updateFlag = true;
#if _BT_
}
else
cout << "New class can not be added, because CvBoost can only be used for 2-class classification" << endl;
#endif
}
//draw
if( updateFlag )
{
img = Scalar::all(0);
// put the text
stringstream text;
text << "current class " << classColors.size()-1;
putText( img, text.str(), Point(10,25), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str("");
text << "total classes " << classColors.size();
putText( img, text.str(), Point(10,50), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str("");
text << "total points " << trainedPoints.size();
putText(img, text.str(), Point(10,75), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
// draw points
for( size_t i = 0; i < trainedPoints.size(); i++ )
circle( img, trainedPoints[i], 5, classColors[trainedPointsMarkers[i]], -1 );
{
Vec3b c = classColors[trainedPointsMarkers[i]];
circle( img, trainedPoints[i], 5, Scalar(c), -1 );
}
imshow( winName, img );
}
}
static void prepare_train_data( Mat& samples, Mat& classes )
static Mat prepare_train_samples(const vector<Point>& pts)
{
Mat( trainedPoints ).copyTo( samples );
Mat( trainedPointsMarkers ).copyTo( classes );
// reshape trainData and change its type
samples = samples.reshape( 1, samples.rows );
samples.convertTo( samples, CV_32FC1 );
Mat samples;
Mat(pts).reshape(1, (int)pts.size()).convertTo(samples, CV_32F);
return samples;
}
#if _NBC_
static void find_decision_boundary_NBC()
static Ptr<TrainData> prepare_train_data()
{
img.copyTo( imgDst );
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// learn classifier
CvNormalBayesClassifier normalBayesClassifier( trainSamples, trainClasses );
Mat samples = prepare_train_samples(trainedPoints);
return TrainData::create(samples, ROW_SAMPLE, Mat(trainedPointsMarkers));
}
static void predict_and_paint(const Ptr<StatModel>& model, Mat& dst)
{
Mat testSample( 1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
@@ -122,378 +92,171 @@ static void find_decision_boundary_NBC()
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)normalBayesClassifier.predict( testSample );
circle( imgDst, Point(x,y), 1, classColors[response] );
int response = (int)model->predict( testSample );
dst.at<Vec3b>(y, x) = classColors[response];
}
}
}
#if _NBC_
static void find_decision_boundary_NBC()
{
// learn classifier
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data(), NormalBayesClassifier::Params());
predict_and_paint(normalBayesClassifier, imgDst);
}
#endif
#if _KNN_
static void find_decision_boundary_KNN( int K )
{
img.copyTo( imgDst );
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// learn classifier
#if defined HAVE_OPENCV_OCL && _OCL_KNN_
cv::ocl::KNearestNeighbour knnClassifier;
Mat temp, result;
knnClassifier.train(trainSamples, trainClasses, temp, false, K);
cv::ocl::oclMat testSample_ocl, reslut_ocl;
#else
CvKNearest knnClassifier( trainSamples, trainClasses, Mat(), false, K );
#endif
Mat testSample( 1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
#if defined HAVE_OPENCV_OCL && _OCL_KNN_
testSample_ocl.upload(testSample);
knnClassifier.find_nearest(testSample_ocl, K, reslut_ocl);
reslut_ocl.download(result);
int response = saturate_cast<int>(result.at<float>(0));
circle(imgDst, Point(x, y), 1, classColors[response]);
#else
int response = (int)knnClassifier.find_nearest( testSample, K );
circle( imgDst, Point(x,y), 1, classColors[response] );
#endif
}
}
Ptr<KNearest> knn = StatModel::train<KNearest>(prepare_train_data(), KNearest::Params(K, true));
predict_and_paint(knn, imgDst);
}
#endif
#if _SVM_
static void find_decision_boundary_SVM( CvSVMParams params )
static void find_decision_boundary_SVM( SVM::Params params )
{
img.copyTo( imgDst );
Ptr<SVM> svm = StatModel::train<SVM>(prepare_train_data(), params);
predict_and_paint(svm, imgDst);
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// learn classifier
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
cv::ocl::CvSVM_OCL svmClassifier(trainSamples, trainClasses, Mat(), Mat(), params);
#else
CvSVM svmClassifier( trainSamples, trainClasses, Mat(), Mat(), params );
#endif
Mat testSample( 1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
Mat sv = svm->getSupportVectors();
for( int i = 0; i < sv.rows; i++ )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)svmClassifier.predict( testSample );
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
const float* supportVector = sv.ptr<float>(i);
circle( imgDst, Point(saturate_cast<int>(supportVector[0]),saturate_cast<int>(supportVector[1])), 5, Scalar(255,255,255), -1 );
}
for( int i = 0; i < svmClassifier.get_support_vector_count(); i++ )
{
const float* supportVector = svmClassifier.get_support_vector(i);
circle( imgDst, Point(saturate_cast<int>(supportVector[0]),saturate_cast<int>(supportVector[1])), 5, CV_RGB(255,255,255), -1 );
}
}
#endif
#if _DT_
static void find_decision_boundary_DT()
{
img.copyTo( imgDst );
DTrees::Params params;
params.maxDepth = 8;
params.minSampleCount = 2;
params.useSurrogates = false;
params.CVFolds = 0; // the number of cross-validation folds
params.use1SERule = false;
params.truncatePrunedTree = false;
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
Ptr<DTrees> dtree = StatModel::train<DTrees>(prepare_train_data(), params);
// learn classifier
CvDTree dtree;
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
CvDTreeParams params;
params.max_depth = 8;
params.min_sample_count = 2;
params.use_surrogates = false;
params.cv_folds = 0; // the number of cross-validation folds
params.use_1se_rule = false;
params.truncate_pruned_tree = false;
dtree.train( trainSamples, CV_ROW_SAMPLE, trainClasses,
Mat(), Mat(), var_types, Mat(), params );
Mat testSample(1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)dtree.predict( testSample )->value;
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
}
predict_and_paint(dtree, imgDst);
}
#endif
#if _BT_
void find_decision_boundary_BT()
static void find_decision_boundary_BT()
{
img.copyTo( imgDst );
Boost::Params params( Boost::DISCRETE, // boost_type
100, // weak_count
0.95, // weight_trim_rate
2, // max_depth
false, //use_surrogates
Mat() // priors
);
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// learn classifier
CvBoost boost;
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
CvBoostParams params( CvBoost::DISCRETE, // boost_type
100, // weak_count
0.95, // weight_trim_rate
2, // max_depth
false, //use_surrogates
0 // priors
);
boost.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), var_types, Mat(), params );
Mat testSample(1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)boost.predict( testSample );
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
}
Ptr<Boost> boost = StatModel::train<Boost>(prepare_train_data(), params);
predict_and_paint(boost, imgDst);
}
#endif
#if _GBT_
void find_decision_boundary_GBT()
static void find_decision_boundary_GBT()
{
img.copyTo( imgDst );
GBTrees::Params params( GBTrees::DEVIANCE_LOSS, // loss_function_type
100, // weak_count
0.1f, // shrinkage
1.0f, // subsample_portion
2, // max_depth
false // use_surrogates )
);
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// learn classifier
CvGBTrees gbtrees;
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
CvGBTreesParams params( CvGBTrees::DEVIANCE_LOSS, // loss_function_type
100, // weak_count
0.1f, // shrinkage
1.0f, // subsample_portion
2, // max_depth
false // use_surrogates )
);
gbtrees.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), var_types, Mat(), params );
Mat testSample(1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)gbtrees.predict( testSample );
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
}
Ptr<GBTrees> gbtrees = StatModel::train<GBTrees>(prepare_train_data(), params);
predict_and_paint(gbtrees, imgDst);
}
#endif
#if _RF_
void find_decision_boundary_RF()
static void find_decision_boundary_RF()
{
img.copyTo( imgDst );
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// learn classifier
CvRTrees rtrees;
CvRTParams params( 4, // max_depth,
RTrees::Params params( 4, // max_depth,
2, // min_sample_count,
0.f, // regression_accuracy,
false, // use_surrogates,
16, // max_categories,
0, // priors,
Mat(), // priors,
false, // calc_var_importance,
1, // nactive_vars,
5, // max_num_of_trees_in_the_forest,
0, // forest_accuracy,
CV_TERMCRIT_ITER // termcrit_type
TermCriteria(TermCriteria::MAX_ITER, 5, 0) // max_num_of_trees_in_the_forest,
);
rtrees.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), Mat(), Mat(), params );
Mat testSample(1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)rtrees.predict( testSample );
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
}
Ptr<RTrees> rtrees = StatModel::train<RTrees>(prepare_train_data(), params);
predict_and_paint(rtrees, imgDst);
}
#endif
#if _ERT_
void find_decision_boundary_ERT()
{
img.copyTo( imgDst );
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// learn classifier
CvERTrees ertrees;
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
CvRTParams params( 4, // max_depth,
2, // min_sample_count,
0.f, // regression_accuracy,
false, // use_surrogates,
16, // max_categories,
0, // priors,
false, // calc_var_importance,
1, // nactive_vars,
5, // max_num_of_trees_in_the_forest,
0, // forest_accuracy,
CV_TERMCRIT_ITER // termcrit_type
);
ertrees.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), var_types, Mat(), params );
Mat testSample(1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)ertrees.predict( testSample );
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
}
}
#endif
#if _ANN_
void find_decision_boundary_ANN( const Mat& layer_sizes )
static void find_decision_boundary_ANN( const Mat& layer_sizes )
{
img.copyTo( imgDst );
ANN_MLP::Params params(layer_sizes, ANN_MLP::SIGMOID_SYM, 1, 1, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON),
ANN_MLP::Params::BACKPROP, 0.001);
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
// prerare trainClasses
trainClasses.create( trainedPoints.size(), classColors.size(), CV_32FC1 );
for( int i = 0; i < trainClasses.rows; i++ )
Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 );
for( int i = 0; i < trainClasses.rows; i++ )
{
for( int k = 0; k < trainClasses.cols; k++ )
{
if( k == trainedPointsMarkers[i] )
trainClasses.at<float>(i,k) = 1;
else
trainClasses.at<float>(i,k) = 0;
}
trainClasses.at<float>(i, trainedPointsMarkers[i]) = 1.f;
}
Mat weights( 1, trainedPoints.size(), CV_32FC1, Scalar::all(1) );
Mat samples = prepare_train_samples(trainedPoints);
Ptr<TrainData> tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses);
// learn classifier
CvANN_MLP ann( layer_sizes, CvANN_MLP::SIGMOID_SYM, 1, 1 );
ann.train( trainSamples, trainClasses, weights );
Mat testSample( 1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
Mat outputs( 1, classColors.size(), CV_32FC1, testSample.data );
ann.predict( testSample, outputs );
Point maxLoc;
minMaxLoc( outputs, 0, 0, 0, &maxLoc );
circle( imgDst, Point(x,y), 2, classColors[maxLoc.x], 1 );
}
}
Ptr<ANN_MLP> ann = StatModel::train<ANN_MLP>(tdata, params);
predict_and_paint(ann, imgDst);
}
#endif
#if _EM_
void find_decision_boundary_EM()
static void find_decision_boundary_EM()
{
img.copyTo( imgDst );
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
Mat samples = prepare_train_samples(trainedPoints);
vector<cv::EM> em_models(classColors.size());
int i, j, nmodels = (int)classColors.size();
vector<Ptr<EM> > em_models(nmodels);
Mat modelSamples;
CV_Assert((int)trainClasses.total() == trainSamples.rows);
CV_Assert((int)trainClasses.type() == CV_32SC1);
for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
for( i = 0; i < nmodels; i++ )
{
const int componentCount = 3;
em_models[modelIndex] = EM(componentCount, cv::EM::COV_MAT_DIAGONAL);
Mat modelSamples;
for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
modelSamples.release();
for( j = 0; j < samples.rows; j++ )
{
if(trainClasses.at<int>(sampleIndex) == (int)modelIndex)
modelSamples.push_back(trainSamples.row(sampleIndex));
if( trainedPointsMarkers[j] == i )
modelSamples.push_back(samples.row(j));
}
// learn models
if(!modelSamples.empty())
em_models[modelIndex].train(modelSamples);
if( !modelSamples.empty() )
{
em_models[i] = EM::train(modelSamples, noArray(), noArray(), noArray(),
EM::Params(componentCount, EM::COV_MAT_DIAGONAL));
}
}
// classify coordinate plane points using the bayes classifier, i.e.
// y(x) = arg max_i=1_modelsCount likelihoods_i(x)
Mat testSample(1, 2, CV_32FC1 );
Mat logLikelihoods(1, nmodels, CV_64FC1, Scalar(-DBL_MAX));
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
@@ -501,17 +264,14 @@ void find_decision_boundary_EM()
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
Mat logLikelihoods(1, em_models.size(), CV_64FC1, Scalar(-DBL_MAX));
for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
for( i = 0; i < nmodels; i++ )
{
if(em_models[modelIndex].isTrained())
logLikelihoods.at<double>(modelIndex) = em_models[modelIndex].predict(testSample)[0];
if( !em_models[i].empty() )
logLikelihoods.at<double>(i) = em_models[i]->predict2(testSample, noArray())[0];
}
Point maxLoc;
minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc);
int response = maxLoc.x;
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
imgDst.at<Vec3b>(y, x) = classColors[maxLoc.x];
}
}
}
@@ -520,7 +280,7 @@ void find_decision_boundary_EM()
int main()
{
cout << "Use:" << endl
<< " right mouse button - to add new class;" << endl
<< " key '0' .. '1' - switch to class #n" << endl
<< " left mouse button - to add new point;" << endl
<< " key 'r' - to run the ML model;" << endl
<< " key 'i' - to init (clear) the data." << endl << endl;
@@ -532,6 +292,9 @@ int main()
imshow( "points", img );
setMouseCallback( "points", on_mouse );
classColors[0] = Vec3b(0, 255, 0);
classColors[1] = Vec3b(0, 0, 255);
for(;;)
{
uchar key = (uchar)waitKey();
@@ -542,98 +305,94 @@ int main()
{
img = Scalar::all(0);
classColors.clear();
trainedPoints.clear();
trainedPointsMarkers.clear();
classCounters.assign(MAX_CLASSES, 0);
imshow( winName, img );
}
if( key == '0' || key == '1' )
{
currentClass = key - '0';
}
if( key == 'r' ) // run
{
double minVal = 0;
minMaxLoc(classCounters, &minVal, 0, 0, 0);
if( minVal == 0 )
{
printf("each class should have at least 1 point\n");
continue;
}
img.copyTo( imgDst );
#if _NBC_
find_decision_boundary_NBC();
namedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE );
imshow( "NormalBayesClassifier", imgDst );
#endif
#if _KNN_
int K = 3;
find_decision_boundary_KNN( K );
namedWindow( "kNN", WINDOW_AUTOSIZE );
imshow( "kNN", imgDst );
K = 15;
find_decision_boundary_KNN( K );
namedWindow( "kNN2", WINDOW_AUTOSIZE );
imshow( "kNN2", imgDst );
#endif
#if _SVM_
//(1)-(2)separable and not sets
CvSVMParams params;
params.svm_type = CvSVM::C_SVC;
params.kernel_type = CvSVM::POLY; //CvSVM::LINEAR;
SVM::Params params;
params.svmType = SVM::C_SVC;
params.kernelType = SVM::POLY; //CvSVM::LINEAR;
params.degree = 0.5;
params.gamma = 1;
params.coef0 = 1;
params.C = 1;
params.nu = 0.5;
params.p = 0;
params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 1000, 0.01);
params.termCrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01);
find_decision_boundary_SVM( params );
namedWindow( "classificationSVM1", WINDOW_AUTOSIZE );
imshow( "classificationSVM1", imgDst );
params.C = 10;
find_decision_boundary_SVM( params );
namedWindow( "classificationSVM2", WINDOW_AUTOSIZE );
imshow( "classificationSVM2", imgDst );
#endif
#if _DT_
find_decision_boundary_DT();
namedWindow( "DT", WINDOW_AUTOSIZE );
imshow( "DT", imgDst );
#endif
#if _BT_
find_decision_boundary_BT();
namedWindow( "BT", WINDOW_AUTOSIZE );
imshow( "BT", imgDst);
#endif
#if _GBT_
find_decision_boundary_GBT();
namedWindow( "GBT", WINDOW_AUTOSIZE );
imshow( "GBT", imgDst);
#endif
#if _RF_
find_decision_boundary_RF();
namedWindow( "RF", WINDOW_AUTOSIZE );
imshow( "RF", imgDst);
#endif
#if _ERT_
find_decision_boundary_ERT();
namedWindow( "ERT", WINDOW_AUTOSIZE );
imshow( "ERT", imgDst);
#endif
#if _ANN_
Mat layer_sizes1( 1, 3, CV_32SC1 );
layer_sizes1.at<int>(0) = 2;
layer_sizes1.at<int>(1) = 5;
layer_sizes1.at<int>(2) = classColors.size();
layer_sizes1.at<int>(2) = (int)classColors.size();
find_decision_boundary_ANN( layer_sizes1 );
namedWindow( "ANN", WINDOW_AUTOSIZE );
imshow( "ANN", imgDst );
#endif
#if _EM_
find_decision_boundary_EM();
namedWindow( "EM", WINDOW_AUTOSIZE );
imshow( "EM", imgDst );
#endif
}

View File

@@ -1,4 +1,5 @@
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/videoio/videoio_c.h"
#include "opencv2/highgui/highgui_c.h"
#include <ctype.h>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

View File

@@ -1,4 +1,5 @@
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/videoio/videoio.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/video/background_segm.hpp"
#include <stdio.h>
@@ -87,8 +88,8 @@ int main(int argc, char** argv)
namedWindow("video", 1);
namedWindow("segmented", 1);
Ptr<BackgroundSubtractorMOG> bgsubtractor=createBackgroundSubtractorMOG();
bgsubtractor->setNoiseSigma(10);
Ptr<BackgroundSubtractorMOG2> bgsubtractor=createBackgroundSubtractorMOG2();
bgsubtractor->setVarThreshold(10);
for(;;)
{

View File

@@ -12,6 +12,8 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <ctype.h>

View File

@@ -3,6 +3,7 @@
*/
#include "opencv2/shape.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <opencv2/core/utility.hpp>

View File

@@ -1,74 +0,0 @@
/*
* shape_context.cpp -- Shape context demo for shape matching
*/
#include "opencv2/shape.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <opencv2/core/utility.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
static void help()
{
printf("\nThis program demonstrates how to use common interface for shape transformers\n"
"Call\n"
"shape_transformation [image1] [image2]\n");
}
int main(int argc, char** argv)
{
help();
Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
if(img1.empty() || img2.empty() || argc<2)
{
printf("Can't read one of the images\n");
return -1;
}
// detecting keypoints
SurfFeatureDetector detector(5000);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
BFMatcher matcher(extractor.defaultNorm());
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
// drawing the results
namedWindow("matches", 1);
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
imshow("matches", img_matches);
// extract points
vector<Point2f> pts1, pts2;
for (size_t ii=0; ii<keypoints1.size(); ii++)
pts1.push_back( keypoints1[ii].pt );
for (size_t ii=0; ii<keypoints2.size(); ii++)
pts2.push_back( keypoints2[ii].pt );
// Apply TPS
Ptr<ThinPlateSplineShapeTransformer> mytps = createThinPlateSplineShapeTransformer(25000); //TPS with a relaxed constraint
mytps->estimateTransformation(pts1, pts2, matches);
mytps->warpImage(img2, img2);
imshow("Tranformed", img2);
waitKey(0);
return 0;
}

View File

@@ -1,220 +0,0 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <cstdio>
#include <iostream>
using namespace cv;
using namespace std;
#define APP_NAME "simpleflow_demo : "
static void help()
{
// print a welcome message, and the OpenCV version
printf("This is a demo of SimpleFlow optical flow algorithm,\n"
"Using OpenCV version %s\n\n", CV_VERSION);
printf("Usage: simpleflow_demo frame1 frame2 output_flow"
"\nApplication will write estimated flow "
"\nbetween 'frame1' and 'frame2' in binary format"
"\ninto file 'output_flow'"
"\nThen one can use code from http://vision.middlebury.edu/flow/data/"
"\nto convert flow in binary file to image\n");
}
// binary file format for flow data specified here:
// http://vision.middlebury.edu/flow/data/
static void writeOpticalFlowToFile(const Mat& flow, FILE* file) {
int cols = flow.cols;
int rows = flow.rows;
fprintf(file, "PIEH");
if (fwrite(&cols, sizeof(int), 1, file) != 1 ||
fwrite(&rows, sizeof(int), 1, file) != 1) {
printf(APP_NAME "writeOpticalFlowToFile : problem writing header\n");
exit(1);
}
for (int i= 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
Vec2f flow_at_point = flow.at<Vec2f>(i, j);
if (fwrite(&(flow_at_point[0]), sizeof(float), 1, file) != 1 ||
fwrite(&(flow_at_point[1]), sizeof(float), 1, file) != 1) {
printf(APP_NAME "writeOpticalFlowToFile : problem writing data\n");
exit(1);
}
}
}
}
static void run(int argc, char** argv) {
if (argc < 3) {
printf(APP_NAME "Wrong number of command line arguments for mode `run`: %d (expected %d)\n",
argc, 3);
exit(1);
}
Mat frame1 = imread(argv[0]);
Mat frame2 = imread(argv[1]);
if (frame1.empty()) {
printf(APP_NAME "Image #1 : %s cannot be read\n", argv[0]);
exit(1);
}
if (frame2.empty()) {
printf(APP_NAME "Image #2 : %s cannot be read\n", argv[1]);
exit(1);
}
if (frame1.rows != frame2.rows && frame1.cols != frame2.cols) {
printf(APP_NAME "Images should be of equal sizes\n");
exit(1);
}
if (frame1.type() != 16 || frame2.type() != 16) {
printf(APP_NAME "Images should be of equal type CV_8UC3\n");
exit(1);
}
printf(APP_NAME "Read two images of size [rows = %d, cols = %d]\n",
frame1.rows, frame1.cols);
Mat flow;
float start = (float)getTickCount();
calcOpticalFlowSF(frame1, frame2,
flow,
3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10);
printf(APP_NAME "calcOpticalFlowSF : %lf sec\n", (getTickCount() - start) / getTickFrequency());
FILE* file = fopen(argv[2], "wb");
if (file == NULL) {
printf(APP_NAME "Unable to open file '%s' for writing\n", argv[2]);
exit(1);
}
printf(APP_NAME "Writing to file\n");
writeOpticalFlowToFile(flow, file);
fclose(file);
}
static bool readOpticalFlowFromFile(FILE* file, Mat& flow) {
char header[5];
if (fread(header, 1, 4, file) < 4 && (string)header != "PIEH") {
return false;
}
int cols, rows;
if (fread(&cols, sizeof(int), 1, file) != 1||
fread(&rows, sizeof(int), 1, file) != 1) {
return false;
}
flow = Mat::zeros(rows, cols, CV_32FC2);
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
Vec2f flow_at_point;
if (fread(&(flow_at_point[0]), sizeof(float), 1, file) != 1 ||
fread(&(flow_at_point[1]), sizeof(float), 1, file) != 1) {
return false;
}
flow.at<Vec2f>(i, j) = flow_at_point;
}
}
return true;
}
static bool isFlowCorrect(float u) {
return !cvIsNaN(u) && (fabs(u) < 1e9);
}
static float calc_rmse(Mat flow1, Mat flow2) {
float sum = 0;
int counter = 0;
const int rows = flow1.rows;
const int cols = flow1.cols;
for (int y = 0; y < rows; ++y) {
for (int x = 0; x < cols; ++x) {
Vec2f flow1_at_point = flow1.at<Vec2f>(y, x);
Vec2f flow2_at_point = flow2.at<Vec2f>(y, x);
float u1 = flow1_at_point[0];
float v1 = flow1_at_point[1];
float u2 = flow2_at_point[0];
float v2 = flow2_at_point[1];
if (isFlowCorrect(u1) && isFlowCorrect(u2) && isFlowCorrect(v1) && isFlowCorrect(v2)) {
sum += (u1-u2)*(u1-u2) + (v1-v2)*(v1-v2);
counter++;
}
}
}
return (float)sqrt(sum / (1e-9 + counter));
}
static void eval(int argc, char** argv) {
if (argc < 2) {
printf(APP_NAME "Wrong number of command line arguments for mode `eval` : %d (expected %d)\n",
argc, 2);
exit(1);
}
Mat flow1, flow2;
FILE* flow_file_1 = fopen(argv[0], "rb");
if (flow_file_1 == NULL) {
printf(APP_NAME "Cannot open file with first flow : %s\n", argv[0]);
exit(1);
}
if (!readOpticalFlowFromFile(flow_file_1, flow1)) {
printf(APP_NAME "Cannot read flow data from file %s\n", argv[0]);
exit(1);
}
fclose(flow_file_1);
FILE* flow_file_2 = fopen(argv[1], "rb");
if (flow_file_2 == NULL) {
printf(APP_NAME "Cannot open file with first flow : %s\n", argv[1]);
exit(1);
}
if (!readOpticalFlowFromFile(flow_file_2, flow2)) {
printf(APP_NAME "Cannot read flow data from file %s\n", argv[1]);
exit(1);
}
fclose(flow_file_2);
float rmse = calc_rmse(flow1, flow2);
printf("%lf\n", rmse);
}
int main(int argc, char** argv) {
if (argc < 2) {
printf(APP_NAME "Mode is not specified\n");
help();
exit(1);
}
string mode = (string)argv[1];
int new_argc = argc - 2;
char** new_argv = &argv[2];
if ("run" == mode) {
run(new_argc, new_argv);
} else if ("eval" == mode) {
eval(new_argc, new_argv);
} else if ("help" == mode)
help();
else {
printf(APP_NAME "Unknown mode : %s\n", argv[1]);
help();
}
return 0;
}

View File

@@ -3,6 +3,7 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/videoio/videoio_c.h"
#include "opencv2/highgui/highgui_c.h"
#include <cctype>

View File

@@ -4,6 +4,7 @@
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -8,6 +8,7 @@
* that was generated with imagelist_creator.cpp
* easy as CV_PI right?
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <vector>

View File

@@ -11,6 +11,8 @@
* easy as CV_PI right?
*/
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio/videoio.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>

View File

@@ -23,6 +23,7 @@
************************************************** */
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

View File

@@ -9,6 +9,7 @@
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/utility.hpp"

View File

@@ -42,6 +42,7 @@
#include <iostream>
#include <fstream>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching.hpp"

View File

@@ -46,9 +46,11 @@
#include <string>
#include "opencv2/opencv_modules.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/timelapsers.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
@@ -116,7 +118,9 @@ static void printUsage()
" --blend_strength <float>\n"
" Blending strength from [0,100] range. The default is 5.\n"
" --output <result_img>\n"
" The default is 'result.jpg'.\n";
" The default is 'result.jpg'.\n"
" --timelapse (as_is|crop) (range_width)\n"
" Output warped images separately as frames of a time lapse movie, with 'fixed_' prepended to input file names.\n";
}
@@ -140,8 +144,12 @@ int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
float match_conf = 0.3f;
string seam_find_type = "gc_color";
int blend_type = Blender::MULTI_BAND;
int timelapse_type = Timelapser::AS_IS;
float blend_strength = 5;
string result_name = "result.jpg";
bool timelapse = false;
int timelapse_range = 5;
static int parseCmdArgs(int argc, char** argv)
{
@@ -304,6 +312,24 @@ static int parseCmdArgs(int argc, char** argv)
}
i++;
}
else if (string(argv[i]) == "--timelapse")
{
timelapse = true;
if (string(argv[i + 1]) == "as_is")
timelapse_type = Timelapser::AS_IS;
else if (string(argv[i + 1]) == "crop")
timelapse_type = Timelapser::CROP;
else
{
cout << "Bad timelapse method\n";
return -1;
}
i++;
timelapse_range = atoi(argv[i + 1]);
i++;
}
else if (string(argv[i]) == "--blend_strength")
{
blend_strength = static_cast<float>(atof(argv[i + 1]));
@@ -432,9 +458,19 @@ int main(int argc, char* argv[])
t = getTickCount();
#endif
vector<MatchesInfo> pairwise_matches;
BestOf2NearestMatcher matcher(try_cuda, match_conf);
matcher(features, pairwise_matches);
matcher.collectGarbage();
if (!timelapse)
{
BestOf2NearestMatcher matcher(try_cuda, match_conf);
matcher(features, pairwise_matches);
matcher.collectGarbage();
}
else
{
BestOf2NearestRangeMatcher matcher(timelapse_range, try_cuda, match_conf);
matcher(features, pairwise_matches);
matcher.collectGarbage();
}
LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Check if we should save matches graph
@@ -527,7 +563,7 @@ int main(int argc, char* argv[])
{
vector<Mat> rmats;
for (size_t i = 0; i < cameras.size(); ++i)
rmats.push_back(cameras[i].R);
rmats.push_back(cameras[i].R.clone());
waveCorrect(rmats, wave_correct);
for (size_t i = 0; i < cameras.size(); ++i)
cameras[i].R = rmats[i];
@@ -679,6 +715,7 @@ int main(int argc, char* argv[])
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
Ptr<Blender> blender;
Ptr<Timelapser> timelapser;
//double compose_seam_aspect = 1;
double compose_work_aspect = 1;
@@ -755,7 +792,7 @@ int main(int argc, char* argv[])
resize(dilated_mask, seam_mask, mask_warped.size());
mask_warped = seam_mask & mask_warped;
if (!blender)
if (!blender && !timelapse)
{
blender = Blender::createDefault(blend_type, try_cuda);
Size dst_sz = resultRoi(corners, sizes).size();
@@ -776,17 +813,35 @@ int main(int argc, char* argv[])
}
blender->prepare(corners, sizes);
}
else if (!timelapser)
{
CV_Assert(timelapse);
timelapser = Timelapser::createDefault(timelapse_type);
timelapser->initialize(corners, sizes);
}
// Blend the current image
blender->feed(img_warped_s, mask_warped, corners[img_idx]);
if (timelapse)
{
timelapser->process(img_warped_s, Mat::ones(img_warped_s.size(), CV_8UC1), corners[img_idx]);
imwrite("fixed_" + img_names[img_idx], timelapser->getDst());
}
else
{
blender->feed(img_warped_s, mask_warped, corners[img_idx]);
}
}
Mat result, result_mask;
blender->blend(result, result_mask);
if (!timelapse)
{
Mat result, result_mask;
blender->blend(result, result_mask);
LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
imwrite(result_name, result);
imwrite(result_name, result);
}
LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
return 0;

View File

@@ -1,127 +0,0 @@
/*
* textdetection.cpp
*
* A demo program of the Extremal Region Filter algorithm described in
* Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
*
* Created on: Sep 23, 2013
* Author: Lluis Gomez i Bigorda <lgomez AT cvc.uab.es>
*/
#include "opencv2/opencv.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <vector>
#include <iostream>
#include <iomanip>
using namespace std;
using namespace cv;
void show_help_and_exit(const char *cmd);
void groups_draw(Mat &src, vector<Rect> &groups);
void er_show(vector<Mat> &channels, vector<vector<ERStat> > &regions);
int main(int argc, const char * argv[])
{
cout << endl << argv[0] << endl << endl;
cout << "Demo program of the Extremal Region Filter algorithm described in " << endl;
cout << "Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012" << endl << endl;
if (argc < 2) show_help_and_exit(argv[0]);
Mat src = imread(argv[1]);
// Extract channels to be processed individually
vector<Mat> channels;
computeNMChannels(src, channels);
int cn = (int)channels.size();
// Append negative channels to detect ER- (bright regions over dark background)
for (int c = 0; c < cn-1; c++)
channels.push_back(255-channels[c]);
// Create ERFilter objects with the 1st and 2nd stage default classifiers
Ptr<ERFilter> er_filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),16,0.00015f,0.13f,0.2f,true,0.1f);
Ptr<ERFilter> er_filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
vector<vector<ERStat> > regions(channels.size());
// Apply the default cascade classifier to each independent channel (could be done in parallel)
cout << "Extracting Class Specific Extremal Regions from " << (int)channels.size() << " channels ..." << endl;
cout << " (...) this may take a while (...)" << endl << endl;
for (int c=0; c<(int)channels.size(); c++)
{
er_filter1->run(channels[c], regions[c]);
er_filter2->run(channels[c], regions[c]);
}
// Detect character groups
cout << "Grouping extracted ERs ... ";
vector<Rect> groups;
erGrouping(channels, regions, "trained_classifier_erGrouping.xml", 0.5, groups);
// draw groups
groups_draw(src, groups);
imshow("grouping",src);
cout << "Done!" << endl << endl;
cout << "Press 'e' to show the extracted Extremal Regions, any other key to exit." << endl << endl;
if( waitKey (-1) == 101)
er_show(channels,regions);
// memory clean-up
er_filter1.release();
er_filter2.release();
regions.clear();
if (!groups.empty())
{
groups.clear();
}
}
// helper functions
void show_help_and_exit(const char *cmd)
{
cout << " Usage: " << cmd << " <input_image> " << endl;
cout << " Default classifier files (trained_classifierNM*.xml) must be in current directory" << endl << endl;
exit(-1);
}
void groups_draw(Mat &src, vector<Rect> &groups)
{
for (int i=(int)groups.size()-1; i>=0; i--)
{
if (src.type() == CV_8UC3)
rectangle(src,groups.at(i).tl(),groups.at(i).br(),Scalar( 0, 255, 255 ), 3, 8 );
else
rectangle(src,groups.at(i).tl(),groups.at(i).br(),Scalar( 255 ), 3, 8 );
}
}
void er_show(vector<Mat> &channels, vector<vector<ERStat> > &regions)
{
for (int c=0; c<(int)channels.size(); c++)
{
Mat dst = Mat::zeros(channels[0].rows+2,channels[0].cols+2,CV_8UC1);
for (int r=0; r<(int)regions[c].size(); r++)
{
ERStat er = regions[c][r];
if (er.parent != NULL) // deprecate the root region
{
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
floodFill(channels[c],dst,Point(er.pixel%channels[c].cols,er.pixel/channels[c].cols),
Scalar(255),0,Scalar(er.level),Scalar(0),flags);
}
}
char buff[10]; char *buff_ptr = buff;
sprintf(buff, "channel %d", c);
imshow(buff_ptr, dst);
}
waitKey(-1);
}

View File

@@ -8,9 +8,10 @@
#include <time.h>
using namespace cv;
using namespace cv::ml;
using namespace std;
void get_svm_detector(const SVM& svm, vector< float > & hog_detector );
void get_svm_detector(const Ptr<SVM>& svm, vector< float > & hog_detector );
void convert_to_ml(const std::vector< cv::Mat > & train_samples, cv::Mat& trainData );
void load_images( const string & prefix, const string & filename, vector< Mat > & img_lst );
void sample_neg( const vector< Mat > & full_neg_lst, vector< Mat > & neg_lst, const Size & size );
@@ -20,49 +21,24 @@ void train_svm( const vector< Mat > & gradient_lst, const vector< int > & labels
void draw_locations( Mat & img, const vector< Rect > & locations, const Scalar & color );
void test_it( const Size & size );
void get_svm_detector(const SVM& svm, vector< float > & hog_detector )
void get_svm_detector(const Ptr<SVM>& svm, vector< float > & hog_detector )
{
// get the number of variables
const int var_all = svm.get_var_count();
// get the number of support vectors
const int sv_total = svm.get_support_vector_count();
// get the decision function
const CvSVMDecisionFunc* decision_func = svm.get_decision_function();
// get the support vectors
const float** sv = new const float*[ sv_total ];
for( int i = 0 ; i < sv_total ; ++i )
sv[ i ] = svm.get_support_vector(i);
Mat sv = svm->getSupportVectors();
const int sv_total = sv.rows;
// get the decision function
Mat alpha, svidx;
double rho = svm->getDecisionFunction(0, alpha, svidx);
CV_Assert( var_all > 0 &&
sv_total > 0 &&
decision_func != 0 &&
decision_func->alpha != 0 &&
decision_func->sv_count == sv_total );
CV_Assert( alpha.total() == 1 && svidx.total() == 1 && sv_total == 1 );
CV_Assert( (alpha.type() == CV_64F && alpha.at<double>(0) == 1.) ||
(alpha.type() == CV_32F && alpha.at<float>(0) == 1.f) );
CV_Assert( sv.type() == CV_32F );
hog_detector.clear();
float svi = 0.f;
hog_detector.clear(); //clear stuff in vector.
hog_detector.reserve( var_all + 1 ); //reserve place for memory efficiency.
/**
* hog_detector^i = \sum_j support_vector_j^i * \alpha_j
* hog_detector^dim = -\rho
*/
for( int i = 0 ; i < var_all ; ++i )
{
svi = 0.f;
for( int j = 0 ; j < sv_total ; ++j )
{
if( decision_func->sv_index != NULL ) // sometime the sv_index isn't store on YML/XML.
svi += (float)( sv[decision_func->sv_index[j]][i] * decision_func->alpha[ j ] );
else
svi += (float)( sv[j][i] * decision_func->alpha[ j ] );
}
hog_detector.push_back( svi );
}
hog_detector.push_back( (float)-decision_func->rho );
delete[] sv;
hog_detector.resize(sv.cols + 1);
memcpy(&hog_detector[0], sv.data, sv.cols*sizeof(hog_detector[0]));
hog_detector[sv.cols] = (float)-rho;
}
@@ -263,7 +239,7 @@ Mat get_hogdescriptor_visu(const Mat& color_origImg, vector<float>& descriptorVa
int mx = drawX + cellSize/2;
int my = drawY + cellSize/2;
rectangle(visu, Point((int)(drawX*zoomFac), (int)(drawY*zoomFac)), Point((int)((drawX+cellSize)*zoomFac), (int)((drawY+cellSize)*zoomFac)), CV_RGB(100,100,100), 1);
rectangle(visu, Point((int)(drawX*zoomFac), (int)(drawY*zoomFac)), Point((int)((drawX+cellSize)*zoomFac), (int)((drawY+cellSize)*zoomFac)), Scalar(100,100,100), 1);
// draw in each cell all 9 gradient strengths
for (int bin=0; bin<gradientBinSize; bin++)
@@ -288,7 +264,7 @@ Mat get_hogdescriptor_visu(const Mat& color_origImg, vector<float>& descriptorVa
float y2 = my + dirVecY * currentGradStrength * maxVecLen * scale;
// draw gradient visualization
line(visu, Point((int)(x1*zoomFac),(int)(y1*zoomFac)), Point((int)(x2*zoomFac),(int)(y2*zoomFac)), CV_RGB(0,255,0), 1);
line(visu, Point((int)(x1*zoomFac),(int)(y1*zoomFac)), Point((int)(x2*zoomFac),(int)(y2*zoomFac)), Scalar(0,255,0), 1);
} // for (all bins)
@@ -337,28 +313,26 @@ void compute_hog( const vector< Mat > & img_lst, vector< Mat > & gradient_lst, c
void train_svm( const vector< Mat > & gradient_lst, const vector< int > & labels )
{
SVM svm;
/* Default values to train SVM */
SVMParams params;
SVM::Params params;
params.coef0 = 0.0;
params.degree = 3;
params.term_crit.epsilon = 1e-3;
params.termCrit.epsilon = 1e-3;
params.gamma = 0;
params.kernel_type = SVM::LINEAR;
params.kernelType = SVM::LINEAR;
params.nu = 0.5;
params.p = 0.1; // for EPSILON_SVR, epsilon in loss function?
params.C = 0.01; // From paper, soft classifier
params.svm_type = SVM::EPS_SVR; // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
params.svmType = SVM::EPS_SVR; // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
Mat train_data;
convert_to_ml( gradient_lst, train_data );
clog << "Start training...";
svm.train( train_data, Mat( labels ), Mat(), Mat(), params );
Ptr<SVM> svm = StatModel::train<SVM>(train_data, ROW_SAMPLE, Mat(labels), params);
clog << "...[done]" << endl;
svm.save( "my_people_detector.yml" );
svm->save( "my_people_detector.yml" );
}
void draw_locations( Mat & img, const vector< Rect > & locations, const Scalar & color )
@@ -380,7 +354,7 @@ void test_it( const Size & size )
Scalar reference( 0, 255, 0 );
Scalar trained( 0, 0, 255 );
Mat img, draw;
SVM svm;
Ptr<SVM> svm;
HOGDescriptor hog;
HOGDescriptor my_hog;
my_hog.winSize = size;
@@ -388,7 +362,7 @@ void test_it( const Size & size )
vector< Rect > locations;
// Load the trained SVM.
svm.load( "my_people_detector.yml" );
svm = StatModel::load<SVM>( "my_people_detector.yml" );
// Set the trained svm to my_hog
vector< float > hog_detector;
get_svm_detector( svm, hog_detector );

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +1,35 @@
#include "opencv2/ml/ml.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
#include "opencv2/core/utility.hpp"
#include <stdio.h>
#include <string>
#include <map>
using namespace cv;
using namespace cv::ml;
static void help()
{
printf(
"\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
"CvDTree dtree;\n"
"CvBoost boost;\n"
"CvRTrees rtrees;\n"
"CvERTrees ertrees;\n"
"CvGBTrees gbtrees;\n"
"Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
"\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees.\n"
"Usage:\n\t./tree_engine [-r <response_column>] [-ts type_spec] <csv filename>\n"
"where -r <response_column> specified the 0-based index of the response (0 by default)\n"
"-c specifies that the response is categorical (it's ordered by default) and\n"
"-ts specifies the var type spec in the form ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\n"
"<csv filename> is the name of training data file in comma-separated value format\n\n");
}
static int count_classes(CvMLData& data)
static void train_and_print_errs(Ptr<StatModel> model, const Ptr<TrainData>& data)
{
cv::Mat r = cv::cvarrToMat(data.get_responses());
std::map<int, int> rmap;
int i, n = (int)r.total();
for( i = 0; i < n; i++ )
bool ok = model->train(data);
if( !ok )
{
float val = r.at<float>(i);
int ival = cvRound(val);
if( ival != val )
return -1;
rmap[ival] = 1;
printf("Training failed\n");
}
return (int)rmap.size();
}
static void print_result(float train_err, float test_err, const CvMat* _var_imp)
{
printf( "train error %f\n", train_err );
printf( "test error %f\n\n", test_err );
if (_var_imp)
else
{
cv::Mat var_imp = cv::cvarrToMat(_var_imp), sorted_idx;
cv::sortIdx(var_imp, sorted_idx, CV_SORT_EVERY_ROW + CV_SORT_DESCENDING);
printf( "variable importance:\n" );
int i, n = (int)var_imp.total();
int type = var_imp.type();
CV_Assert(type == CV_32F || type == CV_64F);
for( i = 0; i < n; i++)
{
int k = sorted_idx.at<int>(i);
printf( "%d\t%f\n", k, type == CV_32F ? var_imp.at<float>(k) : var_imp.at<double>(k));
}
printf( "train error: %f\n", model->calcError(data, false, noArray()) );
printf( "test error: %f\n\n", model->calcError(data, true, noArray()) );
}
printf("\n");
}
int main(int argc, char** argv)
@@ -69,14 +41,14 @@ int main(int argc, char** argv)
}
const char* filename = 0;
int response_idx = 0;
bool categorical_response = false;
std::string typespec;
for(int i = 1; i < argc; i++)
{
if(strcmp(argv[i], "-r") == 0)
sscanf(argv[++i], "%d", &response_idx);
else if(strcmp(argv[i], "-c") == 0)
categorical_response = true;
else if(strcmp(argv[i], "-ts") == 0)
typespec = argv[++i];
else if(argv[i][0] != '-' )
filename = argv[i];
else
@@ -88,52 +60,32 @@ int main(int argc, char** argv)
}
printf("\nReading in %s...\n\n",filename);
CvDTree dtree;
CvBoost boost;
CvRTrees rtrees;
CvERTrees ertrees;
CvGBTrees gbtrees;
const double train_test_split_ratio = 0.5;
CvMLData data;
Ptr<TrainData> data = TrainData::loadFromCSV(filename, 0, response_idx, response_idx+1, typespec);
CvTrainTestSplit spl( 0.5f );
if ( data.read_csv( filename ) == 0)
if( data.empty() )
{
data.set_response_idx( response_idx );
if(categorical_response)
data.change_var_type( response_idx, CV_VAR_CATEGORICAL );
data.set_train_test_split( &spl );
printf("======DTREE=====\n");
dtree.train( &data, CvDTreeParams( 10, 2, 0, false, 16, 0, false, false, 0 ));
print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data, CV_TEST_ERROR ), dtree.get_var_importance() );
if( categorical_response && count_classes(data) == 2 )
{
printf("======BOOST=====\n");
boost.train( &data, CvBoostParams(CvBoost::DISCRETE, 100, 0.95, 2, false, 0));
print_result( boost.calc_error( &data, CV_TRAIN_ERROR ), boost.calc_error( &data, CV_TEST_ERROR ), 0 ); //doesn't compute importance
}
printf("======RTREES=====\n");
rtrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
print_result( rtrees.calc_error( &data, CV_TRAIN_ERROR), rtrees.calc_error( &data, CV_TEST_ERROR ), rtrees.get_var_importance() );
printf("======ERTREES=====\n");
ertrees.train( &data, CvRTParams( 18, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data, CV_TEST_ERROR ), ertrees.get_var_importance() );
printf("======GBTREES=====\n");
if (categorical_response)
gbtrees.train( &data, CvGBTreesParams(CvGBTrees::DEVIANCE_LOSS, 100, 0.1f, 0.8f, 5, false));
else
gbtrees.train( &data, CvGBTreesParams(CvGBTrees::SQUARED_LOSS, 100, 0.1f, 0.8f, 5, false));
print_result( gbtrees.calc_error( &data, CV_TRAIN_ERROR), gbtrees.calc_error( &data, CV_TEST_ERROR ), 0 ); //doesn't compute importance
printf("ERROR: File %s can not be read\n", filename);
return 0;
}
else
printf("File can not be read");
data->setTrainTestSplitRatio(train_test_split_ratio);
printf("======DTREE=====\n");
Ptr<DTrees> dtree = DTrees::create(DTrees::Params( 10, 2, 0, false, 16, 0, false, false, Mat() ));
train_and_print_errs(dtree, data);
if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem
{
printf("======BOOST=====\n");
Ptr<Boost> boost = Boost::create(Boost::Params(Boost::GENTLE, 100, 0.95, 2, false, Mat()));
train_and_print_errs(boost, data);
}
printf("======RTREES=====\n");
Ptr<RTrees> rtrees = RTrees::create(RTrees::Params(10, 2, 0, false, 16, Mat(), false, 0, TermCriteria(TermCriteria::MAX_ITER, 100, 0)));
train_and_print_errs(rtrees, data);
return 0;
}

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>

View File

@@ -5,6 +5,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;

View File

@@ -5,6 +5,7 @@
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/videoio/videoio.hpp>
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
using namespace std;

View File

@@ -2,7 +2,7 @@
#include <string> // for strings
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
#include <opencv2/videoio/videoio.hpp> // Video write
using namespace std;
using namespace cv;

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <math.h>
#include <stdlib.h>

View File

@@ -7,6 +7,7 @@
#include <vector>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

View File

@@ -4,6 +4,7 @@
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -4,6 +4,7 @@
* @author Ana Huaman
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

View File

@@ -5,6 +5,7 @@
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>

Some files were not shown because too many files have changed in this diff Show More