Merge remote-tracking branch 'upstream/master' into svm_hog

This commit is contained in:
Mathieu Barnachon
2013-11-24 13:24:39 +01:00
2649 changed files with 192484 additions and 87403 deletions

View File

@@ -5,7 +5,7 @@
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree opencv_softcascade
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab opencv_bioinspired)
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab opencv_bioinspired opencv_shape)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
@@ -16,17 +16,17 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
ocv_include_directories("${OpenCV_SOURCE_DIR}/include")#for opencv.hpp
ocv_include_modules(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
if(HAVE_opencv_gpuoptflow)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpuoptflow/include")
if(HAVE_opencv_cudaoptflow)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/cudaoptflow/include")
endif()
if(HAVE_opencv_gpuimgproc)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpuimgproc/include")
if(HAVE_opencv_cudaimgproc)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/cudaimgproc/include")
endif()
if(HAVE_opencv_gpuarithm)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpuarithm/include")
if(HAVE_opencv_cudaarithm)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/cudaarithm/include")
endif()
if(HAVE_opencv_gpufilters)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpufilters/include")
if(HAVE_opencv_cudafilters)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/cudafilters/include")
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS)
@@ -41,11 +41,11 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
if("${srcs}" MATCHES "tutorial_code")
set(sample_kind tutorial)
set(sample_KIND TUTORIAL)
set(sample_folder "samples//tutorials")
set(sample_subfolder "tutorials")
else()
set(sample_kind example)
set(sample_KIND EXAMPLE)
set(sample_folder "samples//cpp")
set(sample_subfolder "cpp")
endif()
set(the_target "${sample_kind}_${name}")
@@ -53,7 +53,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
if("${srcs}" MATCHES "gpu/")
target_link_libraries(${the_target} opencv_gpuarithm opencv_gpufilters)
target_link_libraries(${the_target} opencv_cudaarithm opencv_cudafilters)
endif()
set_target_properties(${the_target} PROPERTIES
@@ -61,7 +61,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
PROJECT_LABEL "(${sample_KIND}) ${name}")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES FOLDER "${sample_folder}")
set_target_properties(${the_target} PROPERTIES FOLDER "samples/${sample_subfolder}")
endif()
if(WIN32)
@@ -69,7 +69,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
endif()
install(TARGETS ${the_target}
RUNTIME DESTINATION "${sample_folder}" COMPONENT main)
RUNTIME DESTINATION "${OPENCV_SAMPLES_BIN_INSTALL_PATH}/${sample_subfolder}" COMPONENT main)
endif()
ENDMACRO()
@@ -79,10 +79,12 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
ocv_list_filterout(cpp_samples Qt_sample)
endif()
if(NOT HAVE_opencv_gpuarithm OR NOT HAVE_opencv_gpufilters)
if(NOT HAVE_opencv_cudaarithm OR NOT HAVE_opencv_cudafilters)
ocv_list_filterout(cpp_samples "/gpu/")
endif()
ocv_list_filterout(cpp_samples "viz")
foreach(sample_filename ${cpp_samples})
get_filename_component(sample ${sample_filename} NAME_WE)
OPENCV_DEFINE_CPP_EXAMPLE(${sample} ${sample_filename})
@@ -95,4 +97,3 @@ if (INSTALL_C_EXAMPLES AND NOT WIN32)
DESTINATION share/OpenCV/samples/cpp
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif()

View File

@@ -1,10 +1,10 @@
//============================================================================
// Name : HighDynamicRange_RetinaCompression.cpp
// Name : OpenEXRimages_HDR_Retina_toneMapping.cpp
// Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com)
// Version : 0.1
// Copyright : Alexandre Benoit, LISTIC Lab, july 2011
// Description : HighDynamicRange compression (tone mapping) with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Description : HighDynamicRange retina tone mapping with the help of the Gipsa/Listic's retina in C++, Ansi-style
//============================================================================
#include <iostream>
@@ -17,10 +17,10 @@
static void help(std::string errorMessage)
{
std::cout<<"Program init error : "<<errorMessage<<std::endl;
std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image to process]"<<std::endl;
std::cout<<"\nProgram call procedure : ./OpenEXRimages_HDR_Retina_toneMapping [OpenEXR image to process]"<<std::endl;
std::cout<<"\t[OpenEXR image to process] : the input HDR image to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
std::cout<<"\nExamples:"<<std::endl;
std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping memorial.exr"<<std::endl;
std::cout<<"\t-Image processing : ./OpenEXRimages_HDR_Retina_toneMapping memorial.exr"<<std::endl;
}
// simple procedure for 1D curve tracing
@@ -71,7 +71,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
{
cv::Mat rgbIntImg;
outputMat.convertTo(rgbIntImg, CV_8UC3);
cv::cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
}
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
@@ -302,5 +302,3 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
return 0;
}

View File

@@ -1,10 +1,10 @@
//============================================================================
// Name : OpenEXRimages_HighDynamicRange_Retina_toneMapping_video.cpp
// Name : OpenEXRimages_HDR_Retina_toneMapping_video.cpp
// Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com)
// Version : 0.2
// Copyright : Alexandre Benoit, LISTIC Lab, december 2011
// Description : HighDynamicRange compression (tone mapping) for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Description : HighDynamicRange retina tone mapping for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Known issues: the input OpenEXR sequences can have bad computed pixels that should be removed
// => a simple method consists of cutting histogram edges (a slider for this on the UI is provided)
// => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done...
@@ -18,17 +18,21 @@
#include "opencv2/imgproc.hpp" // cvCvtcolor function
#include "opencv2/highgui.hpp" // display
#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif
static void help(std::string errorMessage)
{
std::cout<<"Program init error : "<<errorMessage<<std::endl;
std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image sequence to process] [OPTIONNAL start frame] [OPTIONNAL end frame]"<<std::endl;
std::cout<<"\nProgram call procedure : ./OpenEXRimages_HDR_Retina_toneMapping [OpenEXR image sequence to process] [OPTIONNAL start frame] [OPTIONNAL end frame]"<<std::endl;
std::cout<<"\t[OpenEXR image sequence to process] : std::sprintf style ready prototype filename of the input HDR images to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
std::cout<<"\t\t => WARNING : image index number of digits cannot exceed 10"<<std::endl;
std::cout<<"\t[start frame] : the starting frame tat should be considered"<<std::endl;
std::cout<<"\t[end frame] : the ending frame tat should be considered"<<std::endl;
std::cout<<"\nExamples:"<<std::endl;
std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45"<<std::endl;
std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45 log"<<std::endl;
std::cout<<"\t-Image processing : ./OpenEXRimages_HDR_Retina_toneMapping_video memorial%3d.exr 20 45"<<std::endl;
std::cout<<"\t-Image processing : ./OpenEXRimages_HDR_Retina_toneMapping_video memorial%3d.exr 20 45 log"<<std::endl;
std::cout<<"\t ==> to process images from memorial020d.exr to memorial045d.exr"<<std::endl;
}
@@ -90,7 +94,7 @@ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, con
{
cv::Mat rgbIntImg;
normalisedImage.convertTo(rgbIntImg, CV_8UC3);
cv::cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
}
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
@@ -359,5 +363,3 @@ static void loadNewFrame(const std::string filenamePrototype, const int currentF
return 0;
}

View File

@@ -4,15 +4,11 @@
#include <iostream>
#include <vector>
#include <opencv2/core/core_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/legacy/compat.hpp>
#include <opencv2/calib3d/calib3d_c.h>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/legacy/compat.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h>
@@ -116,19 +112,16 @@ static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source, IplImage* grayImage)
static void foundCorners(vector<CvPoint2D32f> *srcImagePoints, const Mat& source, Mat& grayImage)
{
cvCvtColor(source,grayImage,CV_RGB2GRAY);
cvSmooth( grayImage, grayImage,CV_GAUSSIAN,11);
cvNormalize(grayImage, grayImage, 0, 255, CV_MINMAX);
cvThreshold( grayImage, grayImage, 26, 255, CV_THRESH_BINARY_INV);//25
cvtColor(source, grayImage, COLOR_RGB2GRAY);
GaussianBlur(grayImage, grayImage, Size(11,11), 0, 0);
normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
Mat MgrayImage = cv::cvarrToMat(grayImage);
//For debug
//MgrayImage = MgrayImage.clone();//deep copy
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(MgrayImage, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE);
findContours(grayImage, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
@@ -189,17 +182,17 @@ static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source,
}
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
Mat Msource = cv::cvarrToMat(source);
Mat Msource = source;
stringstream ss;
for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
{
ss<<i;
circle(Msource,srcImagePoints->at(i),5,CV_RGB(255,0,0));
putText( Msource, ss.str(), srcImagePoints->at(i),CV_FONT_HERSHEY_SIMPLEX,1,CV_RGB(255,0,0));
circle(Msource,srcImagePoints->at(i),5,Scalar(0,0,255));
putText(Msource,ss.str(),srcImagePoints->at(i),FONT_HERSHEY_SIMPLEX,1,Scalar(0,0,255));
ss.str("");
//new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source->width/2,source->height/2-srcImagePoints_temp.at(i).y);
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source.cols/2,source.rows/2-srcImagePoints_temp.at(i).y);
}
}
@@ -232,15 +225,14 @@ int main(void)
VideoCapture video("cube4.avi");
CV_Assert(video.isOpened());
Mat frame; video >> frame;
Mat source, grayImage;
IplImage* grayImage = cvCreateImage(frame.size(),8,1);
video >> source;
namedWindow("original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
//For debug
//cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
@@ -259,25 +251,20 @@ int main(void)
while(waitKey(33) != 27)
{
video >> frame;
imshow("original", frame);
video >> source;
imshow("original",source);
IplImage source = frame;
foundCorners(&srcImagePoints, &source, grayImage);
foundCorners(&srcImagePoints, source, grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
imshow("POSIT", frame);
//For debug
//cvShowImage("tempGray",grayImage);
imshow("POSIT",source);
if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
video.set(CAP_PROP_POS_AVI_RATIO, 0);
}
destroyAllWindows();
cvReleaseImage(&grayImage);
video.release();
cvReleasePOSITObject(&positObject);
return 0;

View File

@@ -54,10 +54,6 @@ static void help(char** argv)
<< "\n";
}
static void makeDir( const string& dir )
{
#if defined WIN32 || defined _WIN32
@@ -2563,19 +2559,19 @@ int main(int argc, char** argv)
Ptr<FeatureDetector> featureDetector = FeatureDetector::create( ddmParams.detectorType );
Ptr<DescriptorExtractor> descExtractor = DescriptorExtractor::create( ddmParams.descriptorType );
Ptr<BOWImgDescriptorExtractor> bowExtractor;
if( featureDetector.empty() || descExtractor.empty() )
if( !featureDetector || !descExtractor )
{
cout << "featureDetector or descExtractor was not created" << endl;
return -1;
}
{
Ptr<DescriptorMatcher> descMatcher = DescriptorMatcher::create( ddmParams.matcherType );
if( featureDetector.empty() || descExtractor.empty() || descMatcher.empty() )
if( !featureDetector || !descExtractor || !descMatcher )
{
cout << "descMatcher was not created" << endl;
return -1;
}
bowExtractor = new BOWImgDescriptorExtractor( descExtractor, descMatcher );
bowExtractor = makePtr<BOWImgDescriptorExtractor>( descExtractor, descMatcher );
}
// Print configuration to screen

View File

@@ -35,7 +35,7 @@ int main(int argc, char** argv)
setNumThreads(8);
Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
if (fgbg.empty())
if (!fgbg)
{
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
return -1;
@@ -78,4 +78,3 @@ int main(int argc, char** argv)
return 0;
}

View File

@@ -332,4 +332,3 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight, pts3d, corners);
}

View File

@@ -22,7 +22,7 @@ class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
IDetector(),
Detector(detector)
{
CV_Assert(!detector.empty());
CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
@@ -51,11 +51,11 @@ int main(int , char** )
}
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker Detector(MainDetector, TrackingDetector, params);

View File

@@ -153,7 +153,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
{
cout << "< Evaluate descriptor matcher..." << endl;
vector<Point2f> curve;
Ptr<GenericDescriptorMatcher> gdm = new VectorDescriptorMatcher( descriptorExtractor, descriptorMatcher );
Ptr<GenericDescriptorMatcher> gdm = makePtr<VectorDescriptorMatcher>( descriptorExtractor, descriptorMatcher );
evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
Point2f firstPoint = *curve.begin();
@@ -208,7 +208,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
matchesMask[i1] = 1;
}
// draw inliers
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(0, 0, 255), matchesMask
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask
#if DRAW_RICH_KEYPOINTS_MODE
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
#endif
@@ -218,7 +218,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
// draw outliers
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
matchesMask[i1] = !matchesMask[i1];
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 0, 255), Scalar(255, 0, 0), matchesMask,
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(255, 0, 0), Scalar(0, 0, 255), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif
@@ -253,7 +253,7 @@ int main(int argc, char** argv)
int mactherFilterType = getMatcherFilterType( argv[4] );
bool eval = !isWarpPerspective ? false : (atoi(argv[6]) == 0 ? false : true);
cout << ">" << endl;
if( detector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty() )
if( !detector || !descriptorExtractor || !descriptorMatcher )
{
cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
return -1;

View File

@@ -67,7 +67,7 @@ class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
CV_Assert(!detector.empty());
CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
@@ -117,11 +117,11 @@ static int test_FaceDetector(int argc, char *argv[])
}
std::string cascadeFrontalfilename=cascadefile;
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker fd(MainDetector, TrackingDetector, params);

View File

@@ -535,7 +535,7 @@ void DetectorQualityEvaluator::readAlgorithm ()
{
defaultDetector = FeatureDetector::create( algName );
specificDetector = FeatureDetector::create( algName );
if( defaultDetector.empty() )
if( !defaultDetector )
{
printf( "Algorithm can not be read\n" );
exit(-1);
@@ -769,14 +769,14 @@ void DescriptorQualityEvaluator::readAlgorithm( )
defaultDescMatcher = GenericDescriptorMatcher::create( algName );
specificDescMatcher = GenericDescriptorMatcher::create( algName );
if( defaultDescMatcher.empty() )
if( !defaultDescMatcher )
{
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create( algName );
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create( matcherName );
defaultDescMatcher = new VectorDescriptorMatch( extractor, matcher );
specificDescMatcher = new VectorDescriptorMatch( extractor, matcher );
defaultDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
specificDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
if( extractor.empty() || matcher.empty() )
if( !extractor || !matcher )
{
printf("Algorithm can not be read\n");
exit(-1);
@@ -881,8 +881,9 @@ public:
virtual void readAlgorithm( )
{
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
defaultDescMatcher = new VectorDescriptorMatch( new CalonderDescriptorExtractor<float>( classifierFile ),
new BFMatcher(NORM_L2) );
defaultDescMatcher = makePtr<VectorDescriptorMatch>(
makePtr<CalonderDescriptorExtractor<float> >( classifierFile ),
makePtr<BFMatcher>(int(NORM_L2)));
specificDescMatcher = defaultDescMatcher;
}
};
@@ -922,10 +923,11 @@ void OneWayDescriptorQualityTest::processRunParamsFile ()
readAllDatasetsRunParams();
OneWayDescriptorBase *base = new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
trainPath, trainImagesList);
Ptr<OneWayDescriptorBase> base(
new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
trainPath, trainImagesList));
OneWayDescriptorMatch *match = new OneWayDescriptorMatch ();
Ptr<OneWayDescriptorMatch> match = makePtr<OneWayDescriptorMatch>();
match->initialize( OneWayDescriptorMatch::Params (), base );
defaultDescMatcher = match;
writeAllDatasetsRunParams();
@@ -958,18 +960,18 @@ int main( int argc, char** argv )
Ptr<BaseQualityEvaluator> evals[] =
{
new DetectorQualityEvaluator( "FAST", "quality-detector-fast" ),
new DetectorQualityEvaluator( "GFTT", "quality-detector-gftt" ),
new DetectorQualityEvaluator( "HARRIS", "quality-detector-harris" ),
new DetectorQualityEvaluator( "MSER", "quality-detector-mser" ),
new DetectorQualityEvaluator( "STAR", "quality-detector-star" ),
new DetectorQualityEvaluator( "SIFT", "quality-detector-sift" ),
new DetectorQualityEvaluator( "SURF", "quality-detector-surf" ),
makePtr<DetectorQualityEvaluator>( "FAST", "quality-detector-fast" ),
makePtr<DetectorQualityEvaluator>( "GFTT", "quality-detector-gftt" ),
makePtr<DetectorQualityEvaluator>( "HARRIS", "quality-detector-harris" ),
makePtr<DetectorQualityEvaluator>( "MSER", "quality-detector-mser" ),
makePtr<DetectorQualityEvaluator>( "STAR", "quality-detector-star" ),
makePtr<DetectorQualityEvaluator>( "SIFT", "quality-detector-sift" ),
makePtr<DetectorQualityEvaluator>( "SURF", "quality-detector-surf" ),
new DescriptorQualityEvaluator( "SIFT", "quality-descriptor-sift", "BruteForce" ),
new DescriptorQualityEvaluator( "SURF", "quality-descriptor-surf", "BruteForce" ),
new DescriptorQualityEvaluator( "FERN", "quality-descriptor-fern"),
new CalonderDescriptorQualityEvaluator()
makePtr<DescriptorQualityEvaluator>( "SIFT", "quality-descriptor-sift", "BruteForce" ),
makePtr<DescriptorQualityEvaluator>( "SURF", "quality-descriptor-surf", "BruteForce" ),
makePtr<DescriptorQualityEvaluator>( "FERN", "quality-descriptor-fern"),
makePtr<CalonderDescriptorQualityEvaluator>()
};
for( size_t i = 0; i < sizeof(evals)/sizeof(evals[0]); i++ )

View File

@@ -80,4 +80,3 @@ int main(int argc, const char ** argv)
waitKey();
return 0;
}

View File

@@ -130,7 +130,7 @@ int main( int argc, const char** argv )
// Call to update the view
onTrackbar(0, 0);
int c = waitKey() & 255;
int c = waitKey(0) & 255;
if( c == 27 )
break;

View File

@@ -59,7 +59,7 @@ int main( int /*argc*/, char** /*argv*/ )
params.cov_mat_type = CvEM::COV_MAT_DIAGONAL;
params.start_step = CvEM::START_E_STEP;
params.means = em_model.get_means();
params.covs = (const CvMat**)em_model.get_covs();
params.covs = em_model.get_covs();
params.weights = em_model.get_weights();
em_model2.train( samples, Mat(), params, &labels );

View File

@@ -1,120 +0,0 @@
//--------------------------------------------------------------------------------------------------
// A demo program of the Extremal Region Filter algorithm described in
// Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
//--------------------------------------------------------------------------------------------------
#include "opencv2/opencv.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <vector>
#include <iostream>
#include <iomanip>
using namespace std;
using namespace cv;
void er_draw(Mat &src, Mat &dst, ERStat& er);
void er_draw(Mat &src, Mat &dst, ERStat& er)
{
if (er.parent != NULL) // deprecate the root region
{
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
floodFill(src,dst,Point(er.pixel%src.cols,er.pixel/src.cols),Scalar(255),0,Scalar(er.level),Scalar(0),flags);
}
}
int main(int argc, const char * argv[])
{
vector<ERStat> regions;
if (argc < 2) {
cout << "Demo program of the Extremal Region Filter algorithm described in " << endl;
cout << "Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012" << endl << endl;
cout << " Usage: " << argv[0] << " input_image <optional_groundtruth_image>" << endl;
cout << " Default classifier files (trained_classifierNM*.xml) should be in ./" << endl;
return -1;
}
Mat original = imread(argv[1]);
Mat gt;
if (argc > 2)
{
gt = imread(argv[2]);
cvtColor(gt, gt, COLOR_RGB2GRAY);
threshold(gt, gt, 254, 255, THRESH_BINARY);
}
Mat grey(original.size(),CV_8UC1);
cvtColor(original,grey,COLOR_RGB2GRAY);
double t = (double)getTickCount();
// Build ER tree and filter with the 1st stage default classifier
Ptr<ERFilter> er_filter1 = createERFilterNM1();
er_filter1->run(grey, regions);
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t FIRST STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << setw(9) << regions.size()+er_filter1->getNumRejected() << "\t Extremal Regions extracted " << endl;
cout << setw(9) << regions.size() << "\t Extremal Regions selected by the first stage of the sequential classifier." << endl;
cout << "\t \t (saving into out_second_stage.jpg)" << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
er_filter1.release();
// draw regions
Mat mask = Mat::zeros(grey.rows+2,grey.cols+2,CV_8UC1);
for (int r=0; r<(int)regions.size(); r++)
er_draw(grey, mask, regions.at(r));
mask = 255-mask;
imwrite("out_first_stage.jpg", mask);
if (argc > 2)
{
Mat tmp_mask = (255-gt) & (255-mask(Rect(Point(1,1),Size(mask.cols-2,mask.rows-2))));
cout << "Recall for the 1st stage filter = " << (float)countNonZero(tmp_mask) / countNonZero(255-gt) << endl;
}
t = (double)getTickCount();
// Default second stage classifier
Ptr<ERFilter> er_filter2 = createERFilterNM2();
er_filter2->run(grey, regions);
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t SECOND STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << setw(9) << regions.size() << "\t Extremal Regions selected by the second stage of the sequential classifier." << endl;
cout << "\t \t (saving into out_second_stage.jpg)" << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
er_filter2.release();
// draw regions
mask = mask*0;
for (int r=0; r<(int)regions.size(); r++)
er_draw(grey, mask, regions.at(r));
mask = 255-mask;
imwrite("out_second_stage.jpg", mask);
if (argc > 2)
{
Mat tmp_mask = (255-gt) & (255-mask(Rect(Point(1,1),Size(mask.cols-2,mask.rows-2))));
cout << "Recall for the 2nd stage filter = " << (float)countNonZero(tmp_mask) / countNonZero(255-gt) << endl;
}
regions.clear();
}

View File

@@ -131,11 +131,11 @@ int main(int argc, char * argv[]) {
//generate test data
cout << "Extracting Test Data from images" << endl <<
endl;
Ptr<FeatureDetector> detector =
Ptr<FeatureDetector> detector(
new DynamicAdaptedFeatureDetector(
AdjusterAdapter::create("STAR"), 130, 150, 5);
Ptr<DescriptorExtractor> extractor =
new SurfDescriptorExtractor(1000, 4, 2, false, true);
AdjusterAdapter::create("STAR"), 130, 150, 5));
Ptr<DescriptorExtractor> extractor(
new SurfDescriptorExtractor(1000, 4, 2, false, true));
Ptr<DescriptorMatcher> matcher =
DescriptorMatcher::create("FlannBased");
@@ -183,8 +183,8 @@ int main(int argc, char * argv[]) {
endl;
Ptr<of2::FabMap> fabmap;
fabmap = new of2::FabMap2(tree, 0.39, 0, of2::FabMap::SAMPLED |
of2::FabMap::CHOW_LIU);
fabmap.reset(new of2::FabMap2(tree, 0.39, 0, of2::FabMap::SAMPLED |
of2::FabMap::CHOW_LIU));
fabmap->addTraining(trainData);
vector<of2::IMatch> matches;

View File

@@ -33,7 +33,7 @@ int main(int argc, char** argv)
std::string params_filename = std::string(argv[4]);
Ptr<GenericDescriptorMatcher> descriptorMatcher = GenericDescriptorMatcher::create(alg_name, params_filename);
if( descriptorMatcher.empty() )
if( !descriptorMatcher )
{
printf ("Cannot create descriptor\n");
return 0;
@@ -80,7 +80,7 @@ Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, cons
for (size_t i = 0; i < features1.size(); i++)
{
circle(img_corr, features1[i].pt, 3, Scalar(255, 0, 0));
circle(img_corr, features1[i].pt, 3, Scalar(0, 0, 255));
}
for (size_t i = 0; i < features2.size(); i++)

View File

@@ -296,15 +296,15 @@ int main( int argc, char** argv )
help();
const string winName = "image";
namedWindow( winName.c_str(), WINDOW_AUTOSIZE );
setMouseCallback( winName.c_str(), on_mouse, 0 );
namedWindow( winName, WINDOW_AUTOSIZE );
setMouseCallback( winName, on_mouse, 0 );
gcapp.setImageAndWinName( image, winName );
gcapp.showImage();
for(;;)
{
int c = waitKey();
int c = waitKey(0);
switch( (char) c )
{
case '\x1b':
@@ -331,6 +331,6 @@ int main( int argc, char** argv )
}
exit_main:
destroyWindow( winName.c_str() );
destroyWindow( winName );
return 0;
}

View File

@@ -61,4 +61,3 @@ int main(int argc, char** argv)
return 0;
}

View File

@@ -18,6 +18,10 @@
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/hybridtracker.hpp"
#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif
using namespace cv;
using namespace std;

View File

@@ -31,8 +31,8 @@ int main( int argc, char** argv )
help();
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#if DEMO_MIXED_API_USE
Ptr<IplImage> iplimg = cvLoadImage(imagename); // Ptr<T> is safe ref-conting pointer class
if(iplimg.empty())
Ptr<IplImage> iplimg(cvLoadImage(imagename)); // Ptr<T> is safe ref-counting pointer class
if(!iplimg)
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;

View File

@@ -10,14 +10,14 @@ static void help(char** argv)
{
cout << "\nThis sample shows you how to read a sequence of images using the VideoCapture interface.\n"
<< "Usage: " << argv[0] << " <image_mask> (example mask: example_%%02d.jpg)\n"
<< "Image mask defines the name variation for the input images that have to be read as a sequence. \n"
<< "Using the mask example_%%02d.jpg will read in images labeled as 'example_00.jpg', 'example_01.jpg', etc."
<< "Image mask defines the name variation for the input images that have to be read as a sequence. \n"
<< "Using the mask example_%%02d.jpg will read in images labeled as 'example_00.jpg', 'example_01.jpg', etc."
<< endl;
}
int main(int argc, char** argv)
{
if(argc != 2)
if(argc != 2)
{
help(argv);
return 1;
@@ -25,28 +25,28 @@ int main(int argc, char** argv)
string first_file = argv[1];
VideoCapture sequence(first_file);
if (!sequence.isOpened())
{
cerr << "Failed to open the image sequence!\n" << endl;
return 1;
}
Mat image;
namedWindow("Image sequence | press ESC to close", 1);
for(;;)
{
// Read in image from sequence
sequence >> image;
// If no image was retrieved -> end of sequence
if(image.empty())
{
cout << "End of Sequence" << endl;
break;
}
imshow("Image sequence | press ESC to close", image);
if(waitKey(500) == 27)

View File

@@ -88,6 +88,8 @@ static void readDirectory( const string& directoryName, vector<String>& filename
else
filenames.push_back( string(dent->d_name) );
}
closedir( dir );
}
#endif

View File

@@ -114,7 +114,7 @@ private:
// Functions to store detector and templates in single XML/YAML file
static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
{
cv::Ptr<cv::linemod::Detector> detector = new cv::linemod::Detector;
cv::Ptr<cv::linemod::Detector> detector = cv::makePtr<cv::linemod::Detector>();
cv::FileStorage fs(filename, cv::FileStorage::READ);
detector->read(fs.root());

View File

@@ -30,9 +30,9 @@ int main(int argc, char** argv)
// Create and LSD detector with standard or no refinement.
#if 1
Ptr<LineSegmentDetector> ls = createLineSegmentDetectorPtr(LSD_REFINE_STD);
Ptr<LineSegmentDetector> ls = createLineSegmentDetector(LSD_REFINE_STD);
#else
Ptr<LineSegmentDetector> ls = createLineSegmentDetectorPtr(LSD_REFINE_NONE);
Ptr<LineSegmentDetector> ls = createLineSegmentDetector(LSD_REFINE_NONE);
#endif
double start = double(getTickCount());

View File

@@ -84,7 +84,7 @@ static bool createDetectorDescriptorMatcher( const string& detectorType, const s
descriptorMatcher = DescriptorMatcher::create( matcherType );
cout << ">" << endl;
bool isCreated = !( featureDetector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty() );
bool isCreated = featureDetector && descriptorExtractor && descriptorMatcher;
if( !isCreated )
cout << "Can not create feature detector or descriptor extractor or descriptor matcher of given types." << endl << ">" << endl;

View File

@@ -8,12 +8,13 @@ using namespace std;
static void help()
{
cout << "This program demonstrates finding the minimum enclosing box or circle of a set\n"
"of points using functions: minAreaRect() minEnclosingCircle().\n"
"Random points are generated and then enclosed.\n"
"Call:\n"
"./minarea\n"
"Using OpenCV v" << CV_VERSION << "\n" << endl;
cout << "This program demonstrates finding the minimum enclosing box, triangle or circle of a set\n"
<< "of points using functions: minAreaRect() minEnclosingTriangle() minEnclosingCircle().\n"
<< "Random points are generated and then enclosed.\n\n"
<< "Press ESC, 'q' or 'Q' to exit and any other key to regenerate the set of points.\n\n"
<< "Call:\n"
<< "./minarea\n"
<< "Using OpenCV v" << CV_VERSION << "\n" << endl;
}
int main( int /*argc*/, char** /*argv*/ )
@@ -27,6 +28,8 @@ int main( int /*argc*/, char** /*argv*/ )
{
int i, count = rng.uniform(1, 101);
vector<Point> points;
// Generate a random set of points
for( i = 0; i < count; i++ )
{
Point pt;
@@ -36,23 +39,38 @@ int main( int /*argc*/, char** /*argv*/ )
points.push_back(pt);
}
// Find the minimum area enclosing bounding box
RotatedRect box = minAreaRect(Mat(points));
// Find the minimum area enclosing triangle
vector<Point2f> triangle;
minEnclosingTriangle(points, triangle);
// Find the minimum area enclosing circle
Point2f center, vtx[4];
float radius = 0;
minEnclosingCircle(Mat(points), center, radius);
box.points(vtx);
img = Scalar::all(0);
// Draw the points
for( i = 0; i < count; i++ )
circle( img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA );
// Draw the bounding box
for( i = 0; i < 4; i++ )
line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, LINE_AA);
// Draw the triangle
for( i = 0; i < 3; i++ )
line(img, triangle[i], triangle[(i+1)%3], Scalar(255, 255, 0), 1, LINE_AA);
// Draw the circle
circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, LINE_AA);
imshow( "rect & circle", img );
imshow( "Rectangle, triangle & circle", img );
char key = (char)waitKey();
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'

View File

@@ -77,7 +77,7 @@ int main( int argc, char** argv )
OpenClose(open_close_pos, 0);
ErodeDilate(erode_dilate_pos, 0);
c = waitKey();
c = waitKey(0);
if( (char)c == 27 )
break;

View File

@@ -43,7 +43,3 @@ int main(int, char* [])
return 0;
}

View File

@@ -7,7 +7,7 @@
using namespace std;
using namespace cv;
const Scalar WHITE_COLOR = CV_RGB(255,255,255);
const Scalar WHITE_COLOR = Scalar(255,255,255);
const string winName = "points";
const int testStep = 5;
@@ -69,15 +69,15 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* )
// put the text
stringstream text;
text << "current class " << classColors.size()-1;
putText( img, text.str(), Point(10,25), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
putText( img, text.str(), Point(10,25), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str("");
text << "total classes " << classColors.size();
putText( img, text.str(), Point(10,50), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
putText( img, text.str(), Point(10,50), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str("");
text << "total points " << trainedPoints.size();
putText(img, text.str(), cvPoint(10,75), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
putText(img, text.str(), Point(10,75), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
// draw points
for( size_t i = 0; i < trainedPoints.size(); i++ )
@@ -178,7 +178,7 @@ static void find_decision_boundary_SVM( CvSVMParams params )
for( int i = 0; i < svmClassifier.get_support_vector_count(); i++ )
{
const float* supportVector = svmClassifier.get_support_vector(i);
circle( imgDst, Point(supportVector[0],supportVector[1]), 5, CV_RGB(255,255,255), -1 );
circle( imgDst, Point(supportVector[0],supportVector[1]), 5, Scalar(255,255,255), -1 );
}
}
@@ -526,7 +526,7 @@ int main()
{
#if _NBC_
find_decision_boundary_NBC();
cvNamedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE );
namedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE );
imshow( "NormalBayesClassifier", imgDst );
#endif
#if _KNN_
@@ -560,7 +560,7 @@ int main()
params.C = 10;
find_decision_boundary_SVM( params );
cvNamedWindow( "classificationSVM2", WINDOW_AUTOSIZE );
namedWindow( "classificationSVM2", WINDOW_AUTOSIZE );
imshow( "classificationSVM2", imgDst );
#endif

View File

@@ -156,4 +156,3 @@ int main(int argc, char* argv[]) {
return 0;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

BIN
samples/cpp/scenetext01.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

BIN
samples/cpp/scenetext02.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

BIN
samples/cpp/scenetext03.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

BIN
samples/cpp/scenetext04.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

BIN
samples/cpp/scenetext05.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

BIN
samples/cpp/scenetext06.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -96,8 +96,6 @@ int main(int argc, char** argv)
if( !tmp_frame.data )
break;
bgsubtractor->apply(tmp_frame, bgmask, update_bg_model ? -1 : 0);
//CvMat _bgmask = bgmask;
//cvSegmentFGMask(&_bgmask);
refineSegments(tmp_frame, bgmask, out_frame);
imshow("video", tmp_frame);
imshow("segmented", out_frame);

View File

@@ -0,0 +1,111 @@
/*
* shape_context.cpp -- Shape context demo for shape matching
*/
#include "opencv2/shape.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <opencv2/core/utility.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
static void help()
{
printf("\n"
"This program demonstrates a method for shape comparisson based on Shape Context\n"
"You should run the program providing a number between 1 and 20 for selecting an image in the folder shape_sample.\n"
"Call\n"
"./shape_example [number between 1 and 20]\n\n");
}
static vector<Point> simpleContour( const Mat& currentQuery, int n=300 )
{
vector<vector<Point> > _contoursQuery;
vector <Point> contoursQuery;
findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE);
for (size_t border=0; border<_contoursQuery.size(); border++)
{
for (size_t p=0; p<_contoursQuery[border].size(); p++)
{
contoursQuery.push_back( _contoursQuery[border][p] );
}
}
// In case actual number of points is less than n
int dummy=0;
for (int add=(int)contoursQuery.size()-1; add<n; add++)
{
contoursQuery.push_back(contoursQuery[dummy++]); //adding dummy values
}
// Uniformly sampling
random_shuffle(contoursQuery.begin(), contoursQuery.end());
vector<Point> cont;
for (int i=0; i<n; i++)
{
cont.push_back(contoursQuery[i]);
}
return cont;
}
int main(int argc, char** argv)
{
help();
string path = "./shape_sample/";
int indexQuery = 1;
if( argc < 2 )
{
std::cout<<"Using first image as query."<<std::endl;
}
else
{
sscanf( argv[1], "%i", &indexQuery );
}
cv::Ptr <cv::ShapeContextDistanceExtractor> mysc = cv::createShapeContextDistanceExtractor();
Size sz2Sh(300,300);
stringstream queryName;
queryName<<path<<indexQuery<<".png";
Mat query=imread(queryName.str(), IMREAD_GRAYSCALE);
Mat queryToShow;
resize(query, queryToShow, sz2Sh);
imshow("QUERY", queryToShow);
moveWindow("TEST", 0,0);
vector<Point> contQuery = simpleContour(query);
int bestMatch = 0;
float bestDis=FLT_MAX;
for ( int ii=1; ii<=20; ii++ )
{
if (ii==indexQuery) continue;
waitKey(30);
stringstream iiname;
iiname<<path<<ii<<".png";
cout<<"name: "<<iiname.str()<<endl;
Mat iiIm=imread(iiname.str(), 0);
Mat iiToShow;
resize(iiIm, iiToShow, sz2Sh);
imshow("TEST", iiToShow);
moveWindow("TEST", sz2Sh.width+50,0);
vector<Point> contii = simpleContour(iiIm);
float dis = mysc->computeDistance( contQuery, contii );
if ( dis<bestDis )
{
bestMatch = ii;
bestDis = dis;
}
std::cout<<" distance between "<<queryName.str()<<" and "<<iiname.str()<<" is: "<<dis<<std::endl;
}
destroyWindow("TEST");
stringstream bestname;
bestname<<path<<bestMatch<<".png";
Mat iiIm=imread(bestname.str(), 0);
Mat bestToShow;
resize(iiIm, bestToShow, sz2Sh);
imshow("BEST MATCH", bestToShow);
moveWindow("BEST MATCH", sz2Sh.width+50,0);
return 0;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 705 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 722 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 437 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 443 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 803 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 830 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 813 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 852 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 969 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 874 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 851 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@@ -0,0 +1,74 @@
/*
* shape_context.cpp -- Shape context demo for shape matching
*/
#include "opencv2/shape.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <opencv2/core/utility.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
static void help()
{
printf("\nThis program demonstrates how to use common interface for shape transformers\n"
"Call\n"
"shape_transformation [image1] [image2]\n");
}
int main(int argc, char** argv)
{
help();
Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
if(img1.empty() || img2.empty() || argc<2)
{
printf("Can't read one of the images\n");
return -1;
}
// detecting keypoints
SurfFeatureDetector detector(5000);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
BFMatcher matcher(NORM_L2);
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
// drawing the results
namedWindow("matches", 1);
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
imshow("matches", img_matches);
// extract points
vector<Point2f> pts1, pts2;
for (size_t ii=0; ii<keypoints1.size(); ii++)
pts1.push_back( keypoints1[ii].pt );
for (size_t ii=0; ii<keypoints2.size(); ii++)
pts2.push_back( keypoints2[ii].pt );
// Apply TPS
Ptr<ThinPlateSplineShapeTransformer> mytps = createThinPlateSplineShapeTransformer(25000); //TPS with a relaxed constraint
mytps->estimateTransformation(pts1, pts2, matches);
mytps->warpImage(img2, img2);
imshow("Tranformed", img2);
waitKey(0);
return 0;
}

View File

@@ -41,15 +41,15 @@ namespace {
cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window;
Mat frame;
for (;;) {
capture >> frame;
if (frame.empty())
break;
imshow(window_name, frame);
char key = (char)waitKey(30); //delay N millis, usually long enough to display and capture input
switch (key) {
case 'q':
case 'Q':

View File

@@ -401,4 +401,3 @@ int main(int argc, char** argv)
StereoCalib(imagelist, boardSize, true, showRectified);
return 0;
}

View File

@@ -134,5 +134,3 @@ int parseCmdArgs(int argc, char** argv)
}
return 0;
}

View File

@@ -357,15 +357,15 @@ int main(int argc, char* argv[])
if (features_type == "surf")
{
#ifdef HAVE_OPENCV_NONFREE
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
finder = new SurfFeaturesFinderGpu();
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
finder = makePtr<SurfFeaturesFinderGpu>();
else
#endif
finder = new SurfFeaturesFinder();
finder = makePtr<SurfFeaturesFinder>();
}
else if (features_type == "orb")
{
finder = new OrbFeaturesFinder();
finder = makePtr<OrbFeaturesFinder>();
}
else
{
@@ -469,7 +469,11 @@ int main(int argc, char* argv[])
HomographyBasedEstimator estimator;
vector<CameraParams> cameras;
estimator(features, pairwise_matches, cameras);
if (!estimator(features, pairwise_matches, cameras))
{
cout << "Homography estimation failed.\n";
return -1;
}
for (size_t i = 0; i < cameras.size(); ++i)
{
@@ -480,8 +484,8 @@ int main(int argc, char* argv[])
}
Ptr<detail::BundleAdjusterBase> adjuster;
if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
if (ba_cost_func == "reproj") adjuster = makePtr<detail::BundleAdjusterReproj>();
else if (ba_cost_func == "ray") adjuster = makePtr<detail::BundleAdjusterRay>();
else
{
cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
@@ -495,7 +499,11 @@ int main(int argc, char* argv[])
if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
adjuster->setRefinementMask(refine_mask);
(*adjuster)(features, pairwise_matches, cameras);
if (!(*adjuster)(features, pairwise_matches, cameras))
{
cout << "Camera parameters adjusting failed.\n";
return -1;
}
// Find median focal length
@@ -544,34 +552,52 @@ int main(int argc, char* argv[])
// Warp images and their masks
Ptr<WarperCreator> warper_creator;
#ifdef HAVE_OPENCV_GPUWARPING
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
#ifdef HAVE_OPENCV_CUDAWARPING
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
{
if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarperGpu>();
else if (warp_type == "cylindrical")
warper_creator = makePtr<cv::CylindricalWarperGpu>();
else if (warp_type == "spherical")
warper_creator = makePtr<cv::SphericalWarperGpu>();
}
else
#endif
{
if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarper>();
else if (warp_type == "cylindrical")
warper_creator = makePtr<cv::CylindricalWarper>();
else if (warp_type == "spherical")
warper_creator = makePtr<cv::SphericalWarper>();
else if (warp_type == "fisheye")
warper_creator = makePtr<cv::FisheyeWarper>();
else if (warp_type == "stereographic")
warper_creator = makePtr<cv::StereographicWarper>();
else if (warp_type == "compressedPlaneA2B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlaneA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
else if (warp_type == "compressedPlanePortraitA2B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlanePortraitA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "paniniA2B1")
warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
else if (warp_type == "paniniA1.5B1")
warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
else if (warp_type == "paniniPortraitA2B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "paniniPortraitA1.5B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "mercator")
warper_creator = makePtr<cv::MercatorWarper>();
else if (warp_type == "transverseMercator")
warper_creator = makePtr<cv::TransverseMercatorWarper>();
}
if (warper_creator.empty())
if (!warper_creator)
{
cout << "Can't create the following warper '" << warp_type << "'\n";
return 1;
@@ -604,32 +630,32 @@ int main(int argc, char* argv[])
Ptr<SeamFinder> seam_finder;
if (seam_find_type == "no")
seam_finder = new detail::NoSeamFinder();
seam_finder = makePtr<detail::NoSeamFinder>();
else if (seam_find_type == "voronoi")
seam_finder = new detail::VoronoiSeamFinder();
seam_finder = makePtr<detail::VoronoiSeamFinder>();
else if (seam_find_type == "gc_color")
{
#ifdef HAVE_OPENCV_GPU
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
#ifdef HAVE_OPENCV_CUDA
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR);
else
#endif
seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
}
else if (seam_find_type == "gc_colorgrad")
{
#ifdef HAVE_OPENCV_GPU
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
#ifdef HAVE_OPENCV_CUDA
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
else
#endif
seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
}
else if (seam_find_type == "dp_color")
seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);
seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR);
else if (seam_find_type == "dp_colorgrad")
seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);
if (seam_finder.empty())
seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR_GRAD);
if (!seam_finder)
{
cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
return 1;
@@ -727,7 +753,7 @@ int main(int argc, char* argv[])
resize(dilated_mask, seam_mask, mask_warped.size());
mask_warped = seam_mask & mask_warped;
if (blender.empty())
if (!blender)
{
blender = Blender::createDefault(blend_type, try_gpu);
Size dst_sz = resultRoi(corners, sizes).size();
@@ -736,13 +762,13 @@ int main(int argc, char* argv[])
blender = Blender::createDefault(Blender::NO, try_gpu);
else if (blend_type == Blender::MULTI_BAND)
{
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
LOGLN("Multi-band blender, number of bands: " << mb->numBands());
}
else if (blend_type == Blender::FEATHER)
{
FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
FeatherBlender* fb = dynamic_cast<FeatherBlender*>(blender.get());
fb->setSharpness(1.f/blend_width);
LOGLN("Feather blender, sharpness: " << fb->sharpness());
}
@@ -763,5 +789,3 @@ int main(int argc, char* argv[])
LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
return 0;
}

View File

@@ -0,0 +1,127 @@
/*
* textdetection.cpp
*
* A demo program of the Extremal Region Filter algorithm described in
* Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
*
* Created on: Sep 23, 2013
* Author: Lluis Gomez i Bigorda <lgomez AT cvc.uab.es>
*/
#include "opencv2/opencv.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <vector>
#include <iostream>
#include <iomanip>
using namespace std;
using namespace cv;
void show_help_and_exit(const char *cmd);
void groups_draw(Mat &src, vector<Rect> &groups);
void er_show(vector<Mat> &channels, vector<vector<ERStat> > &regions);
int main(int argc, const char * argv[])
{
cout << endl << argv[0] << endl << endl;
cout << "Demo program of the Extremal Region Filter algorithm described in " << endl;
cout << "Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012" << endl << endl;
if (argc < 2) show_help_and_exit(argv[0]);
Mat src = imread(argv[1]);
// Extract channels to be processed individually
vector<Mat> channels;
computeNMChannels(src, channels);
int cn = (int)channels.size();
// Append negative channels to detect ER- (bright regions over dark background)
for (int c = 0; c < cn-1; c++)
channels.push_back(255-channels[c]);
// Create ERFilter objects with the 1st and 2nd stage default classifiers
Ptr<ERFilter> er_filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),16,0.00015f,0.13f,0.2f,true,0.1f);
Ptr<ERFilter> er_filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
vector<vector<ERStat> > regions(channels.size());
// Apply the default cascade classifier to each independent channel (could be done in parallel)
cout << "Extracting Class Specific Extremal Regions from " << (int)channels.size() << " channels ..." << endl;
cout << " (...) this may take a while (...)" << endl << endl;
for (int c=0; c<(int)channels.size(); c++)
{
er_filter1->run(channels[c], regions[c]);
er_filter2->run(channels[c], regions[c]);
}
// Detect character groups
cout << "Grouping extracted ERs ... ";
vector<Rect> groups;
erGrouping(channels, regions, "trained_classifier_erGrouping.xml", 0.5, groups);
// draw groups
groups_draw(src, groups);
imshow("grouping",src);
cout << "Done!" << endl << endl;
cout << "Press 'e' to show the extracted Extremal Regions, any other key to exit." << endl << endl;
if( waitKey (-1) == 101)
er_show(channels,regions);
// memory clean-up
er_filter1.release();
er_filter2.release();
regions.clear();
if (!groups.empty())
{
groups.clear();
}
}
// helper functions
void show_help_and_exit(const char *cmd)
{
cout << " Usage: " << cmd << " <input_image> " << endl;
cout << " Default classifier files (trained_classifierNM*.xml) must be in current directory" << endl << endl;
exit(-1);
}
void groups_draw(Mat &src, vector<Rect> &groups)
{
for (int i=(int)groups.size()-1; i>=0; i--)
{
if (src.type() == CV_8UC3)
rectangle(src,groups.at(i).tl(),groups.at(i).br(),Scalar( 0, 255, 255 ), 3, 8 );
else
rectangle(src,groups.at(i).tl(),groups.at(i).br(),Scalar( 255 ), 3, 8 );
}
}
void er_show(vector<Mat> &channels, vector<vector<ERStat> > &regions)
{
for (int c=0; c<(int)channels.size(); c++)
{
Mat dst = Mat::zeros(channels[0].rows+2,channels[0].cols+2,CV_8UC1);
for (int r=0; r<(int)regions[c].size(); r++)
{
ERStat er = regions[c][r];
if (er.parent != NULL) // deprecate the root region
{
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
floodFill(channels[c],dst,Point(er.pixel%channels[c].cols,er.pixel/channels[c].cols),
Scalar(255),0,Scalar(er.level),Scalar(0),flags);
}
}
char buff[10]; char *buff_ptr = buff;
sprintf(buff, "channel %d", c);
imshow(buff_ptr, dst);
}
waitKey(-1);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -24,17 +24,14 @@ Mat image;
*/
static void on_trackbar( int, void* )
{
Mat new_image = Mat::zeros( image.size(), image.type() );
Mat new_image = Mat::zeros( image.size(), image.type() );
for( int y = 0; y < image.rows; y++ )
{ for( int x = 0; x < image.cols; x++ )
{ for( int c = 0; c < 3; c++ )
{
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
}
}
}
imshow("New Image", new_image);
for( int y = 0; y < image.rows; y++ )
for( int x = 0; x < image.cols; x++ )
for( int c = 0; c < 3; c++ )
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
imshow("New Image", new_image);
}

View File

@@ -22,24 +22,25 @@ int main( int argc, char** argv )
Mat src_test2, hsv_test2;
Mat hsv_half_down;
/// Load three images with different environment settings
if( argc < 4 )
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
}
/// Load three images with different environment settings
if( argc < 4 )
{
printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
}
src_base = imread( argv[1], 1 );
src_test1 = imread( argv[2], 1 );
src_test2 = imread( argv[3], 1 );
src_base = imread( argv[1], 1 );
src_test1 = imread( argv[2], 1 );
src_test2 = imread( argv[3], 1 );
/// Convert to HSV
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
/// Convert to HSV
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
/// Using 30 bins for hue and 32 for saturation
/// Using 30 bins for hue and 32 for saturation
int h_bins = 50; int s_bins = 60;
int histSize[] = { h_bins, s_bins };
@@ -74,14 +75,15 @@ int main( int argc, char** argv )
/// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ )
{ int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
{
int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
printf( "Done \n" );

View File

@@ -46,13 +46,13 @@ int main( int, char** argv )
/// Create Trackbar to select kernel type
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
Morphology_Operations );
&morph_elem, max_elem,
Morphology_Operations );
/// Create Trackbar to choose kernel size
createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size,
Morphology_Operations );
&morph_size, max_kernel_size,
Morphology_Operations );
/// Default start
Morphology_Operations( 0, 0 );
@@ -76,5 +76,3 @@ void Morphology_Operations( int, void* )
morphologyEx( src, dst, operation, element );
imshow( window_name, dst );
}

View File

@@ -66,10 +66,3 @@ int main( void )
return 0;
}

View File

@@ -44,12 +44,12 @@ int main( int, char** argv )
/// Create Trackbar to choose type of Threshold
createTrackbar( trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo );
window_name, &threshold_type,
max_type, Threshold_Demo );
createTrackbar( trackbar_value,
window_name, &threshold_value,
max_value, Threshold_Demo );
window_name, &threshold_value,
max_value, Threshold_Demo );
/// Call the function to initialize
Threshold_Demo( 0, 0 );

View File

@@ -61,5 +61,3 @@ int main( int, char** argv )
return 0;
}

View File

@@ -15,7 +15,6 @@ using namespace cv;
Mat src, dst;
int top, bottom, left, right;
int borderType;
Scalar value;
const char* window_name = "copyMakeBorder Demo";
RNG rng(12345);
@@ -64,7 +63,7 @@ int main( int, char** argv )
else if( (char)c == 'r' )
{ borderType = BORDER_REPLICATE; }
value = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
Scalar value( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
copyMakeBorder( src, dst, top, bottom, left, right, borderType, value );
imshow( window_name, dst );
@@ -72,5 +71,3 @@ int main( int, char** argv )
return 0;
}

View File

@@ -62,7 +62,7 @@ void thresh_callback(int, void* )
findContours( threshold_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
vector<vector<Point> >hull( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ convexHull( Mat(contours[i]), hull[i], false ); }

View File

@@ -92,4 +92,3 @@ void thresh_callback(int, void* )
circle( drawing, mc[i], 4, color, -1, 8, 0 );
}
}

View File

@@ -79,5 +79,3 @@ int main( void )
waitKey(0);
return(0);
}

View File

@@ -120,4 +120,3 @@ void myHarris_function( int, void* )
}
imshow( myHarris_window, myHarris_copy );
}

View File

@@ -102,4 +102,3 @@ void goodFeaturesToTrack_Demo( int, void* )
for( size_t i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}

View File

@@ -90,4 +90,3 @@ void goodFeaturesToTrack_Demo( int, void* )
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
}

View File

@@ -9,6 +9,10 @@
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif
using namespace cv;
using namespace std;

View File

@@ -168,5 +168,3 @@ void MyLine( Mat img, Point start, Point end )
thickness,
lineType );
}

View File

@@ -75,4 +75,4 @@ int main(int argc, char ** argv)
waitKey();
return 0;
}
}

View File

@@ -151,4 +151,4 @@ int main(int ac, char** av)
<< "Tip: Open up " << filename << " with a text editor to see the serialized data." << endl;
return 0;
}
}

View File

@@ -214,4 +214,4 @@ Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar* const table)
}
return I;
}
}

View File

@@ -32,8 +32,8 @@ int main( int argc, char** argv )
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#ifdef DEMO_MIXED_API_USE
Ptr<IplImage> IplI = cvLoadImage(imagename); // Ptr<T> is safe ref-counting pointer class
if(IplI.empty())
Ptr<IplImage> IplI(cvLoadImage(imagename)); // Ptr<T> is a safe ref-counting pointer class
if(!IplI)
{
cerr << "Can not load image " << imagename << endl;
return -1;

View File

@@ -43,7 +43,7 @@ int main( int argc, char* argv[])
cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J);
waitKey();
waitKey(0);
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
@@ -55,7 +55,7 @@ int main( int argc, char* argv[])
imshow("Output", K);
waitKey();
waitKey(0);
return 0;
}
void Sharpen(const Mat& myImage,Mat& Result)
@@ -84,4 +84,4 @@ void Sharpen(const Mat& myImage,Mat& Result)
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
}
}

View File

@@ -82,4 +82,4 @@ int main(int,char**)
cout << "A vector of 2D Points = " << vPoints << endl << endl;
return 0;
}
}

View File

@@ -6,9 +6,9 @@
#include <opencv2/imgproc.hpp>// Image processing methods for the CPU
#include <opencv2/highgui.hpp>// Read images
// GPU structures and methods
#include <opencv2/gpuarithm.hpp>
#include <opencv2/gpufilters.hpp>
// CUDA structures and methods
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudafilters.hpp>
using namespace std;
using namespace cv;
@@ -16,41 +16,41 @@ using namespace cv;
double getPSNR(const Mat& I1, const Mat& I2); // CPU versions
Scalar getMSSIM( const Mat& I1, const Mat& I2);
double getPSNR_GPU(const Mat& I1, const Mat& I2); // Basic GPU versions
Scalar getMSSIM_GPU( const Mat& I1, const Mat& I2);
double getPSNR_CUDA(const Mat& I1, const Mat& I2); // Basic CUDA versions
Scalar getMSSIM_CUDA( const Mat& I1, const Mat& I2);
struct BufferPSNR // Optimized GPU versions
{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
gpu::GpuMat gI1, gI2, gs, t1,t2;
struct BufferPSNR // Optimized CUDA versions
{ // Data allocations are very expensive on CUDA. Use a buffer to solve: allocate once reuse later.
cuda::GpuMat gI1, gI2, gs, t1,t2;
gpu::GpuMat buf;
cuda::GpuMat buf;
};
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b);
double getPSNR_CUDA_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b);
struct BufferMSSIM // Optimized GPU versions
{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
gpu::GpuMat gI1, gI2, gs, t1,t2;
struct BufferMSSIM // Optimized CUDA versions
{ // Data allocations are very expensive on CUDA. Use a buffer to solve: allocate once reuse later.
cuda::GpuMat gI1, gI2, gs, t1,t2;
gpu::GpuMat I1_2, I2_2, I1_I2;
vector<gpu::GpuMat> vI1, vI2;
cuda::GpuMat I1_2, I2_2, I1_I2;
vector<cuda::GpuMat> vI1, vI2;
gpu::GpuMat mu1, mu2;
gpu::GpuMat mu1_2, mu2_2, mu1_mu2;
cuda::GpuMat mu1, mu2;
cuda::GpuMat mu1_2, mu2_2, mu1_mu2;
gpu::GpuMat sigma1_2, sigma2_2, sigma12;
gpu::GpuMat t3;
cuda::GpuMat sigma1_2, sigma2_2, sigma12;
cuda::GpuMat t3;
gpu::GpuMat ssim_map;
cuda::GpuMat ssim_map;
gpu::GpuMat buf;
cuda::GpuMat buf;
};
Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b);
Scalar getMSSIM_CUDA_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b);
static void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to port your CPU code to GPU or write that from scratch." << endl
<< "This program shows how to port your CPU code to CUDA or write that from scratch." << endl
<< "You can see the performance improvement for the similarity check methods (PSNR and SSIM)." << endl
<< "Usage:" << endl
<< "./gpu-basics-similarity referenceImage comparedImage numberOfTimesToRunTest(like 10)." << endl
@@ -90,33 +90,33 @@ int main(int, char *argv[])
cout << "Time of PSNR CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
//------------------------------- PSNR GPU ----------------------------------------------------
//------------------------------- PSNR CUDA ----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR_GPU(I1,I2);
result = getPSNR_CUDA(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
cout << "Time of PSNR CUDA (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
//------------------------------- PSNR GPU Optimized--------------------------------------------
//------------------------------- PSNR CUDA Optimized--------------------------------------------
time = (double)getTickCount(); // Initial call
result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
result = getPSNR_CUDA_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
cout << "Initial call GPU optimized: " << time <<" milliseconds."
cout << "Initial call CUDA optimized: " << time <<" milliseconds."
<< " With result of: " << result << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
result = getPSNR_CUDA_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of PSNR GPU OPTIMIZED ( / " << TIMES << " runs): " << time
cout << "Time of PSNR CUDA OPTIMIZED ( / " << TIMES << " runs): " << time
<< " milliseconds." << " With result of: " << result << endl << endl;
@@ -133,34 +133,34 @@ int main(int, char *argv[])
cout << "Time of MSSIM CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
//------------------------------- SSIM GPU -----------------------------------------------------
//------------------------------- SSIM CUDA -----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM_GPU(I1,I2);
x = getMSSIM_CUDA(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
cout << "Time of MSSIM CUDA (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
//------------------------------- SSIM GPU Optimized--------------------------------------------
//------------------------------- SSIM CUDA Optimized--------------------------------------------
time = (double)getTickCount();
x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
x = getMSSIM_CUDA_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
cout << "Time of MSSIM GPU Initial Call " << time << " milliseconds."
cout << "Time of MSSIM CUDA Initial Call " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
x = getMSSIM_CUDA_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
cout << "Time of MSSIM GPU OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds."
cout << "Time of MSSIM CUDA OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl << endl;
return 0;
}
@@ -189,7 +189,7 @@ double getPSNR(const Mat& I1, const Mat& I2)
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
double getPSNR_CUDA_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
{
b.gI1.upload(I1);
b.gI2.upload(I2);
@@ -197,10 +197,10 @@ double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
b.gI1.convertTo(b.t1, CV_32F);
b.gI2.convertTo(b.t2, CV_32F);
gpu::absdiff(b.t1.reshape(1), b.t2.reshape(1), b.gs);
gpu::multiply(b.gs, b.gs, b.gs);
cuda::absdiff(b.t1.reshape(1), b.t2.reshape(1), b.gs);
cuda::multiply(b.gs, b.gs, b.gs);
double sse = gpu::sum(b.gs, b.buf)[0];
double sse = cuda::sum(b.gs, b.buf)[0];
if( sse <= 1e-10) // for small values return zero
return 0;
@@ -212,9 +212,9 @@ double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
}
}
double getPSNR_GPU(const Mat& I1, const Mat& I2)
double getPSNR_CUDA(const Mat& I1, const Mat& I2)
{
gpu::GpuMat gI1, gI2, gs, t1,t2;
cuda::GpuMat gI1, gI2, gs, t1,t2;
gI1.upload(I1);
gI2.upload(I2);
@@ -222,10 +222,10 @@ double getPSNR_GPU(const Mat& I1, const Mat& I2)
gI1.convertTo(t1, CV_32F);
gI2.convertTo(t2, CV_32F);
gpu::absdiff(t1.reshape(1), t2.reshape(1), gs);
gpu::multiply(gs, gs, gs);
cuda::absdiff(t1.reshape(1), t2.reshape(1), gs);
cuda::multiply(gs, gs, gs);
Scalar s = gpu::sum(gs);
Scalar s = cuda::sum(gs);
double sse = s.val[0] + s.val[1] + s.val[2];
if( sse <= 1e-10) // for small values return zero
@@ -291,11 +291,11 @@ Scalar getMSSIM( const Mat& i1, const Mat& i2)
return mssim;
}
Scalar getMSSIM_GPU( const Mat& i1, const Mat& i2)
Scalar getMSSIM_CUDA( const Mat& i1, const Mat& i2)
{
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
gpu::GpuMat gI1, gI2, gs1, tmp1,tmp2;
cuda::GpuMat gI1, gI2, gs1, tmp1,tmp2;
gI1.upload(i1);
gI2.upload(i2);
@@ -303,64 +303,64 @@ Scalar getMSSIM_GPU( const Mat& i1, const Mat& i2)
gI1.convertTo(tmp1, CV_MAKE_TYPE(CV_32F, gI1.channels()));
gI2.convertTo(tmp2, CV_MAKE_TYPE(CV_32F, gI2.channels()));
vector<gpu::GpuMat> vI1, vI2;
gpu::split(tmp1, vI1);
gpu::split(tmp2, vI2);
vector<cuda::GpuMat> vI1, vI2;
cuda::split(tmp1, vI1);
cuda::split(tmp2, vI2);
Scalar mssim;
Ptr<gpu::Filter> gauss = gpu::createGaussianFilter(vI2[0].type(), -1, Size(11, 11), 1.5);
Ptr<cuda::Filter> gauss = cuda::createGaussianFilter(vI2[0].type(), -1, Size(11, 11), 1.5);
for( int i = 0; i < gI1.channels(); ++i )
{
gpu::GpuMat I2_2, I1_2, I1_I2;
cuda::GpuMat I2_2, I1_2, I1_I2;
gpu::multiply(vI2[i], vI2[i], I2_2); // I2^2
gpu::multiply(vI1[i], vI1[i], I1_2); // I1^2
gpu::multiply(vI1[i], vI2[i], I1_I2); // I1 * I2
cuda::multiply(vI2[i], vI2[i], I2_2); // I2^2
cuda::multiply(vI1[i], vI1[i], I1_2); // I1^2
cuda::multiply(vI1[i], vI2[i], I1_I2); // I1 * I2
/*************************** END INITS **********************************/
gpu::GpuMat mu1, mu2; // PRELIMINARY COMPUTING
cuda::GpuMat mu1, mu2; // PRELIMINARY COMPUTING
gauss->apply(vI1[i], mu1);
gauss->apply(vI2[i], mu2);
gpu::GpuMat mu1_2, mu2_2, mu1_mu2;
gpu::multiply(mu1, mu1, mu1_2);
gpu::multiply(mu2, mu2, mu2_2);
gpu::multiply(mu1, mu2, mu1_mu2);
cuda::GpuMat mu1_2, mu2_2, mu1_mu2;
cuda::multiply(mu1, mu1, mu1_2);
cuda::multiply(mu2, mu2, mu2_2);
cuda::multiply(mu1, mu2, mu1_mu2);
gpu::GpuMat sigma1_2, sigma2_2, sigma12;
cuda::GpuMat sigma1_2, sigma2_2, sigma12;
gauss->apply(I1_2, sigma1_2);
gpu::subtract(sigma1_2, mu1_2, sigma1_2); // sigma1_2 -= mu1_2;
cuda::subtract(sigma1_2, mu1_2, sigma1_2); // sigma1_2 -= mu1_2;
gauss->apply(I2_2, sigma2_2);
gpu::subtract(sigma2_2, mu2_2, sigma2_2); // sigma2_2 -= mu2_2;
cuda::subtract(sigma2_2, mu2_2, sigma2_2); // sigma2_2 -= mu2_2;
gauss->apply(I1_I2, sigma12);
gpu::subtract(sigma12, mu1_mu2, sigma12); // sigma12 -= mu1_mu2;
cuda::subtract(sigma12, mu1_mu2, sigma12); // sigma12 -= mu1_mu2;
///////////////////////////////// FORMULA ////////////////////////////////
gpu::GpuMat t1, t2, t3;
cuda::GpuMat t1, t2, t3;
mu1_mu2.convertTo(t1, -1, 2, C1); // t1 = 2 * mu1_mu2 + C1;
sigma12.convertTo(t2, -1, 2, C2); // t2 = 2 * sigma12 + C2;
gpu::multiply(t1, t2, t3); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
cuda::multiply(t1, t2, t3); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
gpu::addWeighted(mu1_2, 1.0, mu2_2, 1.0, C1, t1); // t1 = mu1_2 + mu2_2 + C1;
gpu::addWeighted(sigma1_2, 1.0, sigma2_2, 1.0, C2, t2); // t2 = sigma1_2 + sigma2_2 + C2;
gpu::multiply(t1, t2, t1); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
cuda::addWeighted(mu1_2, 1.0, mu2_2, 1.0, C1, t1); // t1 = mu1_2 + mu2_2 + C1;
cuda::addWeighted(sigma1_2, 1.0, sigma2_2, 1.0, C2, t2); // t2 = sigma1_2 + sigma2_2 + C2;
cuda::multiply(t1, t2, t1); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
gpu::GpuMat ssim_map;
gpu::divide(t3, t1, ssim_map); // ssim_map = t3./t1;
cuda::GpuMat ssim_map;
cuda::divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar s = gpu::sum(ssim_map);
Scalar s = cuda::sum(ssim_map);
mssim.val[i] = s.val[0] / (ssim_map.rows * ssim_map.cols);
}
return mssim;
}
Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
Scalar getMSSIM_CUDA_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
{
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
@@ -368,66 +368,65 @@ Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
b.gI1.upload(i1);
b.gI2.upload(i2);
gpu::Stream stream;
cuda::Stream stream;
b.gI1.convertTo(b.t1, CV_32F, stream);
b.gI2.convertTo(b.t2, CV_32F, stream);
gpu::split(b.t1, b.vI1, stream);
gpu::split(b.t2, b.vI2, stream);
cuda::split(b.t1, b.vI1, stream);
cuda::split(b.t2, b.vI2, stream);
Scalar mssim;
Ptr<gpu::Filter> gauss = gpu::createGaussianFilter(b.vI1[0].type(), -1, Size(11, 11), 1.5);
Ptr<cuda::Filter> gauss = cuda::createGaussianFilter(b.vI1[0].type(), -1, Size(11, 11), 1.5);
for( int i = 0; i < b.gI1.channels(); ++i )
{
gpu::multiply(b.vI2[i], b.vI2[i], b.I2_2, 1, -1, stream); // I2^2
gpu::multiply(b.vI1[i], b.vI1[i], b.I1_2, 1, -1, stream); // I1^2
gpu::multiply(b.vI1[i], b.vI2[i], b.I1_I2, 1, -1, stream); // I1 * I2
cuda::multiply(b.vI2[i], b.vI2[i], b.I2_2, 1, -1, stream); // I2^2
cuda::multiply(b.vI1[i], b.vI1[i], b.I1_2, 1, -1, stream); // I1^2
cuda::multiply(b.vI1[i], b.vI2[i], b.I1_I2, 1, -1, stream); // I1 * I2
gauss->apply(b.vI1[i], b.mu1, stream);
gauss->apply(b.vI2[i], b.mu2, stream);
gpu::multiply(b.mu1, b.mu1, b.mu1_2, 1, -1, stream);
gpu::multiply(b.mu2, b.mu2, b.mu2_2, 1, -1, stream);
gpu::multiply(b.mu1, b.mu2, b.mu1_mu2, 1, -1, stream);
cuda::multiply(b.mu1, b.mu1, b.mu1_2, 1, -1, stream);
cuda::multiply(b.mu2, b.mu2, b.mu2_2, 1, -1, stream);
cuda::multiply(b.mu1, b.mu2, b.mu1_mu2, 1, -1, stream);
gauss->apply(b.I1_2, b.sigma1_2, stream);
gpu::subtract(b.sigma1_2, b.mu1_2, b.sigma1_2, gpu::GpuMat(), -1, stream);
cuda::subtract(b.sigma1_2, b.mu1_2, b.sigma1_2, cuda::GpuMat(), -1, stream);
//b.sigma1_2 -= b.mu1_2; - This would result in an extra data transfer operation
gauss->apply(b.I2_2, b.sigma2_2, stream);
gpu::subtract(b.sigma2_2, b.mu2_2, b.sigma2_2, gpu::GpuMat(), -1, stream);
cuda::subtract(b.sigma2_2, b.mu2_2, b.sigma2_2, cuda::GpuMat(), -1, stream);
//b.sigma2_2 -= b.mu2_2;
gauss->apply(b.I1_I2, b.sigma12, stream);
gpu::subtract(b.sigma12, b.mu1_mu2, b.sigma12, gpu::GpuMat(), -1, stream);
cuda::subtract(b.sigma12, b.mu1_mu2, b.sigma12, cuda::GpuMat(), -1, stream);
//b.sigma12 -= b.mu1_mu2;
//here too it would be an extra data transfer due to call of operator*(Scalar, Mat)
gpu::multiply(b.mu1_mu2, 2, b.t1, 1, -1, stream); //b.t1 = 2 * b.mu1_mu2 + C1;
gpu::add(b.t1, C1, b.t1, gpu::GpuMat(), -1, stream);
gpu::multiply(b.sigma12, 2, b.t2, 1, -1, stream); //b.t2 = 2 * b.sigma12 + C2;
gpu::add(b.t2, C2, b.t2, gpu::GpuMat(), -12, stream);
cuda::multiply(b.mu1_mu2, 2, b.t1, 1, -1, stream); //b.t1 = 2 * b.mu1_mu2 + C1;
cuda::add(b.t1, C1, b.t1, cuda::GpuMat(), -1, stream);
cuda::multiply(b.sigma12, 2, b.t2, 1, -1, stream); //b.t2 = 2 * b.sigma12 + C2;
cuda::add(b.t2, C2, b.t2, cuda::GpuMat(), -12, stream);
gpu::multiply(b.t1, b.t2, b.t3, 1, -1, stream); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
cuda::multiply(b.t1, b.t2, b.t3, 1, -1, stream); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
gpu::add(b.mu1_2, b.mu2_2, b.t1, gpu::GpuMat(), -1, stream);
gpu::add(b.t1, C1, b.t1, gpu::GpuMat(), -1, stream);
cuda::add(b.mu1_2, b.mu2_2, b.t1, cuda::GpuMat(), -1, stream);
cuda::add(b.t1, C1, b.t1, cuda::GpuMat(), -1, stream);
gpu::add(b.sigma1_2, b.sigma2_2, b.t2, gpu::GpuMat(), -1, stream);
gpu::add(b.t2, C2, b.t2, gpu::GpuMat(), -1, stream);
cuda::add(b.sigma1_2, b.sigma2_2, b.t2, cuda::GpuMat(), -1, stream);
cuda::add(b.t2, C2, b.t2, cuda::GpuMat(), -1, stream);
gpu::multiply(b.t1, b.t2, b.t1, 1, -1, stream); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
gpu::divide(b.t3, b.t1, b.ssim_map, 1, -1, stream); // ssim_map = t3./t1;
cuda::multiply(b.t1, b.t2, b.t1, 1, -1, stream); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
cuda::divide(b.t3, b.t1, b.ssim_map, 1, -1, stream); // ssim_map = t3./t1;
stream.waitForCompletion();
Scalar s = gpu::sum(b.ssim_map, b.buf);
Scalar s = cuda::sum(b.ssim_map, b.buf);
mssim.val[i] = s.val[0] / (b.ssim_map.rows * b.ssim_map.cols);
}
return mssim;
}

View File

@@ -27,4 +27,4 @@ int main( int argc, char** argv )
waitKey(0); // Wait for a keystroke in the window
return 0;
}
}

View File

@@ -76,8 +76,8 @@ int main(int argc, char *argv[])
// Windows
namedWindow(WIN_RF, WINDOW_AUTOSIZE );
namedWindow(WIN_UT, WINDOW_AUTOSIZE );
moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
moveWindow(WIN_UT, refS.width, 0); //1500, 2
moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
moveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CAP_PROP_FRAME_COUNT) << endl;
@@ -203,4 +203,4 @@ Scalar getMSSIM( const Mat& i1, const Mat& i2)
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
}
}

View File

@@ -127,4 +127,4 @@ int main()
imwrite("result.png", I); // save the Image
imshow("SVM for Non-Linear Training Data", I); // show it to the user
waitKey(0);
}
}

View File

@@ -1,14 +1,6 @@
/**
* @file objectDetection.cpp
* @author A. Huaman ( based in the classic facedetect.cpp in samples/c )
* @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
@@ -20,79 +12,73 @@ using namespace cv;
void detectAndDisplay( Mat frame );
/** Global variables */
//-- Note, either copy these two files from opencv/data/haarscascades to your current folder, or change these locations
string face_cascade_name = "haarcascade_frontalface_alt.xml";
string eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
String window_name = "Capture - Face detection";
/**
* @function main
*/
/** @function main */
int main( void )
{
CvCapture* capture;
Mat frame;
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
for(;;)
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
frame = cv::cvarrToMat(cvQueryFrame( capture ));
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
int c = waitKey(10);
if( (char)c == 27 ) { break; } // escape
}
}
return 0;
return 0;
}
/**
* @function detectAndDisplay
*/
/** @function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
for( size_t i = 0; i < faces.size(); i++ )
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
}
for ( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
//-- Show what you got
imshow( window_name, frame );
}

View File

@@ -3,12 +3,9 @@
* @author A. Huaman ( based in the classic facedetect.cpp in samples/c )
* @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream - Using LBP here
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
@@ -20,46 +17,43 @@ using namespace cv;
void detectAndDisplay( Mat frame );
/** Global variables */
string face_cascade_name = "lbpcascade_frontalface.xml";
string eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
String face_cascade_name = "lbpcascade_frontalface.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
String window_name = "Capture - Face detection";
/**
* @function main
*/
int main( void )
{
CvCapture* capture;
Mat frame;
VideoCapture capture;
Mat frame;
//-- 1. Load the cascade
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 1. Load the cascade
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
for(;;)
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
frame = cv::cvarrToMat(cvQueryFrame( capture ));
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
//-- bail out if escape was pressed
int c = waitKey(10);
if( (char)c == 27 ) { break; }
}
}
return 0;
return 0;
}
/**
@@ -67,37 +61,37 @@ int main( void )
*/
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0, Size(80, 80) );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0, Size(80, 80) );
for( size_t i = 0; i < faces.size(); i++ )
for( size_t i = 0; i < faces.size(); i++ )
{
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
if( eyes.size() == 2)
{
//-- Draw the face
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 0 ), 2, 8, 0 );
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
if( eyes.size() == 2)
{
//-- Draw the face
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 0 ), 2, 8, 0 );
for( size_t j = 0; j < eyes.size(); j++ )
{ //-- Draw the eyes
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 255 ), 3, 8, 0 );
}
}
for( size_t j = 0; j < eyes.size(); j++ )
{ //-- Draw the eyes
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 255 ), 3, 8, 0 );
}
}
}
//-- Show what you got
imshow( window_name, frame );
//-- Show what you got
imshow( window_name, frame );
}

View File

@@ -0,0 +1,53 @@
#include <opencv2/photo.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <iostream>
#include <fstream>
using namespace cv;
using namespace std;
void loadExposureSeq(String, vector<Mat>&, vector<float>&);
int main(int, char**argv)
{
vector<Mat> images;
vector<float> times;
loadExposureSeq(argv[1], images, times);
Mat response;
Ptr<CalibrateDebevec> calibrate = createCalibrateDebevec();
calibrate->process(images, response, times);
Mat hdr;
Ptr<MergeDebevec> merge_debevec = createMergeDebevec();
merge_debevec->process(images, hdr, times, response);
Mat ldr;
Ptr<TonemapDurand> tonemap = createTonemapDurand(2.2f);
tonemap->process(hdr, ldr);
Mat fusion;
Ptr<MergeMertens> merge_mertens = createMergeMertens();
merge_mertens->process(images, fusion);
imwrite("fusion.png", fusion * 255);
imwrite("ldr.png", ldr * 255);
imwrite("hdr.hdr", hdr);
return 0;
}
void loadExposureSeq(String path, vector<Mat>& images, vector<float>& times)
{
path = path + std::string("/");
ifstream list_file((path + "list.txt").c_str());
string name;
float val;
while(list_file >> name >> val) {
Mat img = imread(path + name);
images.push_back(img);
times.push_back(1 / val);
}
list_file.close();
}

View File

@@ -0,0 +1,186 @@
/**
* @file bg_sub.cpp
* @brief Background subtraction tutorial sample code
* @author Domenico D. Bloisi
*/
//opencv
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
// Global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard; //input from keyboard
/** Function Headers */
void help();
void processVideo(char* videoFilename);
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process both videos (-vid) and images (-img)." << endl
<< endl
<< "Usage:" << endl
<< "./bs {-vid <video filename>|-img <image filename>}" << endl
<< "for example: ./bs -vid video.avi" << endl
<< "or: ./bs -img /data/images/1.png" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
/**
* @function main
*/
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG");
namedWindow("FG Mask MOG 2");
//create Background Subtractor objects
pMOG = createBackgroundSubtractorMOG(); //MOG approach
pMOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach
if(strcmp(argv[1], "-vid") == 0) {
//input data coming from a video
processVideo(argv[2]);
}
else if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
/**
* @function processVideo
*/
void processVideo(char* videoFilename) {
//create the capture object
VideoCapture capture(videoFilename);
if(!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open video file: " << videoFilename << endl;
exit(EXIT_FAILURE);
}
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//read the current frame
if(!capture.read(frame)) {
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
pMOG->apply(frame, fgMaskMOG);
pMOG2->apply(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
}
/**
* @function processImages
*/
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(!frame.data){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
}
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
pMOG->apply(frame, fgMaskMOG);
pMOG2->apply(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(!frame.data){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
}
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More