OCL module 2 trash
This commit is contained in:
@@ -13,7 +13,6 @@ if(NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_LIST_DIR)
|
||||
add_subdirectory(c)
|
||||
add_subdirectory(cpp)
|
||||
add_subdirectory(gpu)
|
||||
add_subdirectory(ocl)
|
||||
add_subdirectory(tapi)
|
||||
|
||||
if(WIN32 AND HAVE_DIRECTX)
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
SET(OPENCV_OCL_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui
|
||||
opencv_ml opencv_video opencv_objdetect opencv_features2d
|
||||
opencv_calib3d opencv_legacy opencv_contrib opencv_ocl opencv_nonfree)
|
||||
|
||||
ocv_check_dependencies(${OPENCV_OCL_SAMPLES_REQUIRED_DEPS})
|
||||
|
||||
if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
|
||||
set(project "ocl")
|
||||
string(TOUPPER "${project}" project_upper)
|
||||
|
||||
project("${project}_samples")
|
||||
|
||||
ocv_include_modules(${OPENCV_OCL_SAMPLES_REQUIRED_DEPS})
|
||||
|
||||
if(HAVE_OPENCL)
|
||||
ocv_include_directories(${OPENCL_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ---------------------------------------------
|
||||
# Define executable targets
|
||||
# ---------------------------------------------
|
||||
MACRO(OPENCV_DEFINE_OCL_EXAMPLE name srcs)
|
||||
set(the_target "example_${project}_${name}")
|
||||
add_executable(${the_target} ${srcs})
|
||||
|
||||
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_OCL_SAMPLES_REQUIRED_DEPS})
|
||||
|
||||
set_target_properties(${the_target} PROPERTIES
|
||||
OUTPUT_NAME "${project}-example-${name}"
|
||||
PROJECT_LABEL "(EXAMPLE_${project_upper}) ${name}")
|
||||
|
||||
if(ENABLE_SOLUTION_FOLDERS)
|
||||
set_target_properties(${the_target} PROPERTIES FOLDER "samples//${project}")
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
if(MSVC AND NOT BUILD_SHARED_LIBS)
|
||||
set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
|
||||
endif()
|
||||
install(TARGETS ${the_target} RUNTIME DESTINATION "${OPENCV_SAMPLES_BIN_INSTALL_PATH}/${project}" COMPONENT samples)
|
||||
endif()
|
||||
ENDMACRO()
|
||||
|
||||
file(GLOB all_samples RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp)
|
||||
|
||||
foreach(sample_filename ${all_samples})
|
||||
get_filename_component(sample ${sample_filename} NAME_WE)
|
||||
file(GLOB sample_srcs RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${sample}.*)
|
||||
OPENCV_DEFINE_OCL_EXAMPLE(${sample} ${sample_srcs})
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
if(INSTALL_C_EXAMPLES AND NOT WIN32)
|
||||
file(GLOB install_list *.c *.cpp *.jpg *.png *.data makefile.* build_all.sh *.dsp *.cmd )
|
||||
install(FILES ${install_list}
|
||||
DESTINATION share/OpenCV/samples/${project}
|
||||
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ COMPONENT samples)
|
||||
endif()
|
||||
@@ -1,65 +0,0 @@
|
||||
// This sample shows the difference of adaptive bilateral filter and bilateral filter.
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/ocl.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
{
|
||||
const char* keys =
|
||||
"{ i input | | specify input image }"
|
||||
"{ k ksize | 11 | specify kernel size }"
|
||||
"{ s sSpace | 3 | specify sigma space }"
|
||||
"{ c sColor | 30 | specify max color }"
|
||||
"{ h help | false | print help message }";
|
||||
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.has("help"))
|
||||
{
|
||||
cout << "Usage : adaptive_bilateral_filter [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
string src_path = cmd.get<string>("i");
|
||||
int ks = cmd.get<int>("k");
|
||||
const char * winName[] = {"input", "ABF OpenCL", "BF OpenCL"};
|
||||
|
||||
Mat src = imread(src_path);
|
||||
if (src.empty())
|
||||
{
|
||||
cout << "error read image: " << src_path << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
double sigmaSpace = cmd.get<int>("s");
|
||||
|
||||
// sigma for checking pixel values. This is used as is in the "normal" bilateral filter,
|
||||
// and it is used as an upper clamp on the adaptive case.
|
||||
double sigmacolor = cmd.get<int>("c");
|
||||
|
||||
ocl::oclMat dsrc(src), dABFilter, dBFilter;
|
||||
Size ksize(ks, ks);
|
||||
|
||||
// ksize is the total width/height of neighborhood used to calculate local variance.
|
||||
// sigmaSpace is not a priori related to ksize/2.
|
||||
ocl::adaptiveBilateralFilter(dsrc, dABFilter, ksize, sigmaSpace, sigmacolor);
|
||||
ocl::bilateralFilter(dsrc, dBFilter, ks, sigmacolor, sigmaSpace);
|
||||
Mat abFilter = dABFilter, bFilter = dBFilter;
|
||||
|
||||
ocl::finish();
|
||||
|
||||
imshow(winName[0], src);
|
||||
imshow(winName[1], abFilter);
|
||||
imshow(winName[2], bFilter);
|
||||
|
||||
waitKey();
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/ocl.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::ocl;
|
||||
|
||||
#define M_MOG 1
|
||||
#define M_MOG2 2
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
{
|
||||
cv::CommandLineParser cmd(argc, argv,
|
||||
"{ c camera | false | use camera }"
|
||||
"{ f file | 768x576.avi | input video file }"
|
||||
"{ m method | mog | method (mog, mog2) }"
|
||||
"{ h help | false | print help message }");
|
||||
|
||||
if (cmd.has("help"))
|
||||
{
|
||||
cout << "Usage : bgfg_segm [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
bool useCamera = cmd.get<bool>("camera");
|
||||
string file = cmd.get<string>("file");
|
||||
string method = cmd.get<string>("method");
|
||||
|
||||
if (method != "mog" && method != "mog2")
|
||||
{
|
||||
cerr << "Incorrect method" << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
int m = method == "mog" ? M_MOG : M_MOG2;
|
||||
|
||||
VideoCapture cap;
|
||||
if (useCamera)
|
||||
cap.open(0);
|
||||
else
|
||||
cap.open(file);
|
||||
|
||||
if (!cap.isOpened())
|
||||
{
|
||||
cout << "can not open camera or video file" << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
Mat frame;
|
||||
cap >> frame;
|
||||
|
||||
oclMat d_frame(frame);
|
||||
|
||||
cv::ocl::MOG mog;
|
||||
cv::ocl::MOG2 mog2;
|
||||
|
||||
oclMat d_fgmask, d_fgimg, d_bgimg;
|
||||
|
||||
d_fgimg.create(d_frame.size(), d_frame.type());
|
||||
|
||||
Mat fgmask, fgimg, bgimg;
|
||||
|
||||
switch (m)
|
||||
{
|
||||
case M_MOG:
|
||||
mog(d_frame, d_fgmask, 0.01f);
|
||||
break;
|
||||
|
||||
case M_MOG2:
|
||||
mog2(d_frame, d_fgmask);
|
||||
break;
|
||||
}
|
||||
|
||||
for (;;)
|
||||
{
|
||||
cap >> frame;
|
||||
if (frame.empty())
|
||||
break;
|
||||
d_frame.upload(frame);
|
||||
|
||||
int64 start = cv::getTickCount();
|
||||
|
||||
//update the model
|
||||
switch (m)
|
||||
{
|
||||
case M_MOG:
|
||||
mog(d_frame, d_fgmask, 0.01f);
|
||||
mog.getBackgroundImage(d_bgimg);
|
||||
break;
|
||||
|
||||
case M_MOG2:
|
||||
mog2(d_frame, d_fgmask);
|
||||
mog2.getBackgroundImage(d_bgimg);
|
||||
break;
|
||||
}
|
||||
|
||||
double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
|
||||
std::cout << "FPS : " << fps << std::endl;
|
||||
|
||||
d_fgimg.setTo(Scalar::all(0));
|
||||
d_frame.copyTo(d_fgimg, d_fgmask);
|
||||
|
||||
d_fgmask.download(fgmask);
|
||||
d_fgimg.download(fgimg);
|
||||
if (!d_bgimg.empty())
|
||||
d_bgimg.download(bgimg);
|
||||
|
||||
imshow("image", frame);
|
||||
imshow("foreground mask", fgmask);
|
||||
imshow("foreground image", fgimg);
|
||||
if (!bgimg.empty())
|
||||
imshow("mean background image", bgimg);
|
||||
|
||||
if (27 == waitKey(30))
|
||||
break;
|
||||
}
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
Ptr<CLAHE> pFilter;
|
||||
int tilesize;
|
||||
int cliplimit;
|
||||
|
||||
static void TSize_Callback(int pos)
|
||||
{
|
||||
if(pos==0)
|
||||
pFilter->setTilesGridSize(Size(1,1));
|
||||
else
|
||||
pFilter->setTilesGridSize(Size(tilesize,tilesize));
|
||||
}
|
||||
|
||||
static void Clip_Callback(int)
|
||||
{
|
||||
pFilter->setClipLimit(cliplimit);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const char* keys =
|
||||
"{ i input | | specify input image }"
|
||||
"{ c camera | 0 | specify camera id }"
|
||||
"{ s use_cpu | false | use cpu algorithm }"
|
||||
"{ o output | clahe_output.jpg | specify output save path}"
|
||||
"{ h help | false | print help message }";
|
||||
|
||||
cv::CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.has("help"))
|
||||
{
|
||||
cout << "Usage : clahe [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
string infile = cmd.get<string>("i"), outfile = cmd.get<string>("o");
|
||||
int camid = cmd.get<int>("c");
|
||||
bool use_cpu = cmd.get<bool>("s");
|
||||
VideoCapture capture;
|
||||
|
||||
namedWindow("CLAHE");
|
||||
createTrackbar("Tile Size", "CLAHE", &tilesize, 32, (TrackbarCallback)TSize_Callback);
|
||||
createTrackbar("Clip Limit", "CLAHE", &cliplimit, 20, (TrackbarCallback)Clip_Callback);
|
||||
|
||||
Mat frame, outframe;
|
||||
ocl::oclMat d_outframe, d_frame;
|
||||
|
||||
int cur_clip;
|
||||
Size cur_tilesize;
|
||||
pFilter = use_cpu ? createCLAHE() : ocl::createCLAHE();
|
||||
|
||||
cur_clip = (int)pFilter->getClipLimit();
|
||||
cur_tilesize = pFilter->getTilesGridSize();
|
||||
setTrackbarPos("Tile Size", "CLAHE", cur_tilesize.width);
|
||||
setTrackbarPos("Clip Limit", "CLAHE", cur_clip);
|
||||
|
||||
if(infile != "")
|
||||
{
|
||||
frame = imread(infile);
|
||||
if(frame.empty())
|
||||
{
|
||||
cout << "error read image: " << infile << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
else
|
||||
capture.open(camid);
|
||||
|
||||
cout << "\nControls:\n"
|
||||
<< "\to - save output image\n"
|
||||
<< "\tESC - exit\n";
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if(capture.isOpened())
|
||||
capture.read(frame);
|
||||
else
|
||||
frame = imread(infile);
|
||||
if(frame.empty())
|
||||
continue;
|
||||
|
||||
if(use_cpu)
|
||||
{
|
||||
cvtColor(frame, frame, COLOR_BGR2GRAY);
|
||||
pFilter->apply(frame, outframe);
|
||||
}
|
||||
else
|
||||
{
|
||||
ocl::cvtColor(d_frame = frame, d_outframe, COLOR_BGR2GRAY);
|
||||
pFilter->apply(d_outframe, d_outframe);
|
||||
d_outframe.download(outframe);
|
||||
}
|
||||
|
||||
imshow("CLAHE", outframe);
|
||||
|
||||
char key = (char)waitKey(3);
|
||||
if(key == 'o')
|
||||
imwrite(outfile, outframe);
|
||||
else if(key == 27)
|
||||
break;
|
||||
}
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
@@ -1,390 +0,0 @@
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
#if defined(_MSC_VER) && (_MSC_VER >= 1700)
|
||||
# include <thread>
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
#define LOOP_NUM 1
|
||||
#define MAX_THREADS 10
|
||||
|
||||
|
||||
///////////////////////////single-threading faces detecting///////////////////////////////
|
||||
|
||||
const static Scalar colors[] = { CV_RGB(0,0,255),
|
||||
CV_RGB(0,128,255),
|
||||
CV_RGB(0,255,255),
|
||||
CV_RGB(0,255,0),
|
||||
CV_RGB(255,128,0),
|
||||
CV_RGB(255,255,0),
|
||||
CV_RGB(255,0,0),
|
||||
CV_RGB(255,0,255)
|
||||
} ;
|
||||
|
||||
|
||||
int64 work_begin[MAX_THREADS] = {0};
|
||||
int64 work_total[MAX_THREADS] = {0};
|
||||
string inputName, outputName, cascadeName;
|
||||
|
||||
static void workBegin(int i = 0)
|
||||
{
|
||||
work_begin[i] = getTickCount();
|
||||
}
|
||||
|
||||
static void workEnd(int i = 0)
|
||||
{
|
||||
work_total[i] += (getTickCount() - work_begin[i]);
|
||||
}
|
||||
|
||||
static double getTotalTime(int i = 0)
|
||||
{
|
||||
return work_total[i] /getTickFrequency() * 1000.;
|
||||
}
|
||||
|
||||
|
||||
static void detect( Mat& img, vector<Rect>& faces,
|
||||
ocl::OclCascadeClassifier& cascade,
|
||||
double scale);
|
||||
|
||||
|
||||
static void detectCPU( Mat& img, vector<Rect>& faces,
|
||||
CascadeClassifier& cascade,
|
||||
double scale);
|
||||
|
||||
static void Draw(Mat& img, vector<Rect>& faces, double scale);
|
||||
|
||||
|
||||
// This function test if gpu_rst matches cpu_rst.
|
||||
// If the two vectors are not equal, it will return the difference in vector size
|
||||
// Else if will return (total diff of each cpu and gpu rects covered pixels)/(total cpu rects covered pixels)
|
||||
double checkRectSimilarity(Size sz, vector<Rect>& cpu_rst, vector<Rect>& gpu_rst);
|
||||
|
||||
static int facedetect_one_thread(bool useCPU, double scale )
|
||||
{
|
||||
CvCapture* capture = 0;
|
||||
Mat frame, frameCopy0, frameCopy, image;
|
||||
|
||||
ocl::OclCascadeClassifier cascade;
|
||||
CascadeClassifier cpu_cascade;
|
||||
|
||||
if( !cascade.load( cascadeName ) || !cpu_cascade.load(cascadeName) )
|
||||
{
|
||||
cout << "ERROR: Could not load classifier cascade: " << cascadeName << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if( inputName.empty() )
|
||||
{
|
||||
capture = cvCaptureFromCAM(0);
|
||||
if(!capture)
|
||||
cout << "Capture from CAM 0 didn't work" << endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
image = imread( inputName, CV_LOAD_IMAGE_COLOR );
|
||||
if( image.empty() )
|
||||
{
|
||||
capture = cvCaptureFromAVI( inputName.c_str() );
|
||||
if(!capture)
|
||||
cout << "Capture from AVI didn't work" << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
if( capture )
|
||||
{
|
||||
cout << "In capture ..." << endl;
|
||||
for(;;)
|
||||
{
|
||||
IplImage* iplImg = cvQueryFrame( capture );
|
||||
frame = cv::cvarrToMat(iplImg);
|
||||
vector<Rect> faces;
|
||||
if( frame.empty() )
|
||||
break;
|
||||
if( iplImg->origin == IPL_ORIGIN_TL )
|
||||
frame.copyTo( frameCopy0 );
|
||||
else
|
||||
flip( frame, frameCopy0, 0 );
|
||||
if( scale == 1)
|
||||
frameCopy0.copyTo(frameCopy);
|
||||
else
|
||||
resize(frameCopy0, frameCopy, Size(), 1./scale, 1./scale, INTER_LINEAR);
|
||||
|
||||
if(useCPU)
|
||||
detectCPU(frameCopy, faces, cpu_cascade, 1);
|
||||
else
|
||||
detect(frameCopy, faces, cascade, 1);
|
||||
|
||||
Draw(frameCopy, faces, 1);
|
||||
if( waitKey( 10 ) >= 0 )
|
||||
break;
|
||||
}
|
||||
cvReleaseCapture( &capture );
|
||||
}
|
||||
else
|
||||
{
|
||||
cout << "In image read " << image.size() << endl;
|
||||
vector<Rect> faces;
|
||||
vector<Rect> ref_rst;
|
||||
double accuracy = 0.;
|
||||
detectCPU(image, ref_rst, cpu_cascade, scale);
|
||||
|
||||
cout << "loops: ";
|
||||
for(int i = 0; i <= LOOP_NUM; i ++)
|
||||
{
|
||||
cout << i << ", ";
|
||||
if(useCPU)
|
||||
detectCPU(image, faces, cpu_cascade, scale);
|
||||
else
|
||||
{
|
||||
detect(image, faces, cascade, scale);
|
||||
if(i == 0)
|
||||
{
|
||||
accuracy = checkRectSimilarity(image.size(), ref_rst, faces);
|
||||
}
|
||||
}
|
||||
}
|
||||
cout << "done!" << endl;
|
||||
if (useCPU)
|
||||
cout << "average CPU time (noCamera) : ";
|
||||
else
|
||||
cout << "average GPU time (noCamera) : ";
|
||||
cout << getTotalTime() / LOOP_NUM << " ms" << endl;
|
||||
cout << "accuracy value: " << accuracy <<endl;
|
||||
|
||||
Draw(image, faces, scale);
|
||||
waitKey(0);
|
||||
}
|
||||
|
||||
cvDestroyWindow("result");
|
||||
std::cout<< "single-threaded sample has finished" <<std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
///////////////////////////////////////detectfaces with multithreading////////////////////////////////////////////
|
||||
#if defined(_MSC_VER) && (_MSC_VER >= 1700)
|
||||
|
||||
static void detectFaces(std::string fileName, int threadNum)
|
||||
{
|
||||
ocl::OclCascadeClassifier cascade;
|
||||
if(!cascade.load(cascadeName))
|
||||
{
|
||||
std::cout << "ERROR: Could not load classifier cascade: " << cascadeName << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
Mat img = imread(fileName, CV_LOAD_IMAGE_COLOR);
|
||||
if (img.empty())
|
||||
{
|
||||
std::cout << '[' << threadNum << "] " << "can't open file " + fileName <<std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
ocl::oclMat d_img;
|
||||
d_img.upload(img);
|
||||
|
||||
std::vector<Rect> oclfaces;
|
||||
std::thread::id tid = std::this_thread::get_id();
|
||||
std::cout << '[' << threadNum << "] "
|
||||
<< "ThreadID = " << tid
|
||||
<< ", CommandQueue = " << *(void**)ocl::getClCommandQueuePtr()
|
||||
<< endl;
|
||||
for(int i = 0; i <= LOOP_NUM; i++)
|
||||
{
|
||||
if(i>0) workBegin(threadNum);
|
||||
cascade.detectMultiScale(d_img, oclfaces, 1.1, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30), Size(0, 0));
|
||||
if(i>0) workEnd(threadNum);
|
||||
}
|
||||
std::cout << '[' << threadNum << "] " << "Average time = " << getTotalTime(threadNum) / LOOP_NUM << " ms" << endl;
|
||||
|
||||
for(unsigned int i = 0; i<oclfaces.size(); i++)
|
||||
rectangle(img, Point(oclfaces[i].x, oclfaces[i].y), Point(oclfaces[i].x + oclfaces[i].width, oclfaces[i].y + oclfaces[i].height), colors[i%8], 3);
|
||||
|
||||
std::string::size_type pos = outputName.rfind('.');
|
||||
std::string strTid = std::to_string(_threadid);
|
||||
if( !outputName.empty() )
|
||||
{
|
||||
if(pos == std::string::npos)
|
||||
{
|
||||
std::cout << "Invalid output file name: " << outputName << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string outputNameTid = outputName.substr(0, pos) + "_" + strTid + outputName.substr(pos);
|
||||
imwrite(outputNameTid, img);
|
||||
}
|
||||
}
|
||||
imshow(strTid, img);
|
||||
waitKey(0);
|
||||
}
|
||||
|
||||
static void facedetect_multithreading(int nthreads)
|
||||
{
|
||||
int thread_number = MAX_THREADS < nthreads ? MAX_THREADS : nthreads;
|
||||
std::vector<std::thread> threads;
|
||||
for(int i = 0; i<thread_number; i++)
|
||||
threads.push_back(std::thread(detectFaces, inputName, i));
|
||||
for(int i = 0; i<thread_number; i++)
|
||||
threads[i].join();
|
||||
}
|
||||
#endif
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
{
|
||||
|
||||
const char* keys =
|
||||
"{ h help | false | print help message }"
|
||||
"{ i input | | specify input image }"
|
||||
"{ t template | haarcascade_frontalface_alt.xml |"
|
||||
" specify template file path }"
|
||||
"{ c scale | 1.0 | scale image }"
|
||||
"{ s use_cpu | false | use cpu or gpu to process the image }"
|
||||
"{ o output | | specify output image save path(only works when input is images) }"
|
||||
"{ n thread_num | 1 | set number of threads >= 1 }";
|
||||
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.has("help"))
|
||||
{
|
||||
cout << "Usage : facedetect [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
bool useCPU = cmd.get<bool>("s");
|
||||
inputName = cmd.get<string>("i");
|
||||
outputName = cmd.get<string>("o");
|
||||
cascadeName = cmd.get<string>("t");
|
||||
double scale = cmd.get<double>("c");
|
||||
int n = cmd.get<int>("n");
|
||||
|
||||
if(n > 1)
|
||||
{
|
||||
#if defined(_MSC_VER) && (_MSC_VER >= 1700)
|
||||
std::cout<<"multi-threaded sample is running" <<std::endl;
|
||||
facedetect_multithreading(n);
|
||||
std::cout<<"multi-threaded sample has finished" <<std::endl;
|
||||
return 0;
|
||||
#else
|
||||
std::cout << "std::thread is not supported, running a single-threaded version" << std::endl;
|
||||
#endif
|
||||
}
|
||||
if (n<0)
|
||||
std::cout<<"incorrect number of threads:" << n << ", running a single-threaded version" <<std::endl;
|
||||
else
|
||||
std::cout<<"single-threaded sample is running" <<std::endl;
|
||||
return facedetect_one_thread(useCPU, scale);
|
||||
|
||||
}
|
||||
|
||||
void detect( Mat& img, vector<Rect>& faces,
|
||||
ocl::OclCascadeClassifier& cascade,
|
||||
double scale)
|
||||
{
|
||||
ocl::oclMat image(img);
|
||||
ocl::oclMat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
|
||||
workBegin();
|
||||
ocl::cvtColor( image, gray, COLOR_BGR2GRAY );
|
||||
ocl::resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
|
||||
ocl::equalizeHist( smallImg, smallImg );
|
||||
|
||||
cascade.detectMultiScale( smallImg, faces, 1.1,
|
||||
3, 0
|
||||
|CASCADE_SCALE_IMAGE
|
||||
, Size(30,30), Size(0, 0) );
|
||||
workEnd();
|
||||
}
|
||||
|
||||
void detectCPU( Mat& img, vector<Rect>& faces,
|
||||
CascadeClassifier& cascade,
|
||||
double scale)
|
||||
{
|
||||
workBegin();
|
||||
Mat cpu_gray, cpu_smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
|
||||
cvtColor(img, cpu_gray, COLOR_BGR2GRAY);
|
||||
resize(cpu_gray, cpu_smallImg, cpu_smallImg.size(), 0, 0, INTER_LINEAR);
|
||||
equalizeHist(cpu_smallImg, cpu_smallImg);
|
||||
cascade.detectMultiScale(cpu_smallImg, faces, 1.1,
|
||||
3, 0 | CASCADE_SCALE_IMAGE,
|
||||
Size(30, 30), Size(0, 0));
|
||||
workEnd();
|
||||
}
|
||||
|
||||
|
||||
void Draw(Mat& img, vector<Rect>& faces, double scale)
|
||||
{
|
||||
int i = 0;
|
||||
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
|
||||
{
|
||||
Point center;
|
||||
Scalar color = colors[i%8];
|
||||
int radius;
|
||||
center.x = cvRound((r->x + r->width*0.5)*scale);
|
||||
center.y = cvRound((r->y + r->height*0.5)*scale);
|
||||
radius = cvRound((r->width + r->height)*0.25*scale);
|
||||
circle( img, center, radius, color, 3, 8, 0 );
|
||||
}
|
||||
//if( !outputName.empty() ) imwrite( outputName, img );
|
||||
if( abs(scale-1.0)>.001 )
|
||||
{
|
||||
resize(img, img, Size((int)(img.cols/scale), (int)(img.rows/scale)));
|
||||
}
|
||||
imshow( "result", img );
|
||||
|
||||
}
|
||||
|
||||
|
||||
double checkRectSimilarity(Size sz, vector<Rect>& ob1, vector<Rect>& ob2)
|
||||
{
|
||||
double final_test_result = 0.0;
|
||||
size_t sz1 = ob1.size();
|
||||
size_t sz2 = ob2.size();
|
||||
|
||||
if(sz1 != sz2)
|
||||
{
|
||||
return sz1 > sz2 ? (double)(sz1 - sz2) : (double)(sz2 - sz1);
|
||||
}
|
||||
else
|
||||
{
|
||||
if(sz1==0 && sz2==0)
|
||||
return 0;
|
||||
Mat cpu_result(sz, CV_8UC1);
|
||||
cpu_result.setTo(0);
|
||||
|
||||
for(vector<Rect>::const_iterator r = ob1.begin(); r != ob1.end(); r++)
|
||||
{
|
||||
Mat cpu_result_roi(cpu_result, *r);
|
||||
cpu_result_roi.setTo(1);
|
||||
cpu_result.copyTo(cpu_result);
|
||||
}
|
||||
int cpu_area = countNonZero(cpu_result > 0);
|
||||
|
||||
|
||||
Mat gpu_result(sz, CV_8UC1);
|
||||
gpu_result.setTo(0);
|
||||
for(vector<Rect>::const_iterator r2 = ob2.begin(); r2 != ob2.end(); r2++)
|
||||
{
|
||||
cv::Mat gpu_result_roi(gpu_result, *r2);
|
||||
gpu_result_roi.setTo(1);
|
||||
gpu_result.copyTo(gpu_result);
|
||||
}
|
||||
|
||||
Mat result_;
|
||||
multiply(cpu_result, gpu_result, result_);
|
||||
int result = countNonZero(result_ > 0);
|
||||
if(cpu_area!=0 && result!=0)
|
||||
final_test_result = 1.0 - (double)result/(double)cpu_area;
|
||||
else if(cpu_area==0 && result!=0)
|
||||
final_test_result = -1;
|
||||
}
|
||||
return final_test_result;
|
||||
}
|
||||
@@ -1,448 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
#include <stdexcept>
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/ocl.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
class App
|
||||
{
|
||||
public:
|
||||
App(CommandLineParser& cmd);
|
||||
void run();
|
||||
void handleKey(char key);
|
||||
void hogWorkBegin();
|
||||
void hogWorkEnd();
|
||||
string hogWorkFps() const;
|
||||
void workBegin();
|
||||
void workEnd();
|
||||
string workFps() const;
|
||||
string message() const;
|
||||
|
||||
|
||||
// This function test if gpu_rst matches cpu_rst.
|
||||
// If the two vectors are not equal, it will return the difference in vector size
|
||||
// Else if will return
|
||||
// (total diff of each cpu and gpu rects covered pixels)/(total cpu rects covered pixels)
|
||||
double checkRectSimilarity(Size sz,
|
||||
std::vector<Rect>& cpu_rst,
|
||||
std::vector<Rect>& gpu_rst);
|
||||
private:
|
||||
App operator=(App&);
|
||||
|
||||
//Args args;
|
||||
bool running;
|
||||
bool use_gpu;
|
||||
bool make_gray;
|
||||
double scale;
|
||||
double resize_scale;
|
||||
int win_width;
|
||||
int win_stride_width, win_stride_height;
|
||||
int gr_threshold;
|
||||
int nlevels;
|
||||
double hit_threshold;
|
||||
bool gamma_corr;
|
||||
|
||||
int64 hog_work_begin;
|
||||
double hog_work_fps;
|
||||
int64 work_begin;
|
||||
double work_fps;
|
||||
|
||||
string img_source;
|
||||
string vdo_source;
|
||||
string output;
|
||||
int camera_id;
|
||||
bool write_once;
|
||||
};
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const char* keys =
|
||||
"{ h | help | false | print help message }"
|
||||
"{ i | input | | specify input image}"
|
||||
"{ c | camera | -1 | enable camera capturing }"
|
||||
"{ v | video | | use video as input }"
|
||||
"{ g | gray | false | convert image to gray one or not}"
|
||||
"{ s | scale | 1.0 | resize the image before detect}"
|
||||
"{ l |larger_win| false | use 64x128 window}"
|
||||
"{ o | output | | specify output path when input is images}";
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.has("help"))
|
||||
{
|
||||
cout << "Usage : hog [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
App app(cmd);
|
||||
try
|
||||
{
|
||||
app.run();
|
||||
}
|
||||
catch (const Exception& e)
|
||||
{
|
||||
return cout << "error: " << e.what() << endl, 1;
|
||||
}
|
||||
catch (const exception& e)
|
||||
{
|
||||
return cout << "error: " << e.what() << endl, 1;
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
return cout << "unknown exception" << endl, 1;
|
||||
}
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
App::App(CommandLineParser& cmd)
|
||||
{
|
||||
cout << "\nControls:\n"
|
||||
<< "\tESC - exit\n"
|
||||
<< "\tm - change mode GPU <-> CPU\n"
|
||||
<< "\tg - convert image to gray or not\n"
|
||||
<< "\to - save output image once, or switch on/off video save\n"
|
||||
<< "\t1/q - increase/decrease HOG scale\n"
|
||||
<< "\t2/w - increase/decrease levels count\n"
|
||||
<< "\t3/e - increase/decrease HOG group threshold\n"
|
||||
<< "\t4/r - increase/decrease hit threshold\n"
|
||||
<< endl;
|
||||
|
||||
|
||||
use_gpu = true;
|
||||
make_gray = cmd.get<bool>("g");
|
||||
resize_scale = cmd.get<double>("s");
|
||||
win_width = cmd.get<bool>("l") == true ? 64 : 48;
|
||||
vdo_source = cmd.get<string>("v");
|
||||
img_source = cmd.get<string>("i");
|
||||
output = cmd.get<string>("o");
|
||||
camera_id = cmd.get<int>("c");
|
||||
|
||||
win_stride_width = 8;
|
||||
win_stride_height = 8;
|
||||
gr_threshold = 8;
|
||||
nlevels = 13;
|
||||
hit_threshold = win_width == 48 ? 1.4 : 0.;
|
||||
scale = 1.05;
|
||||
gamma_corr = true;
|
||||
write_once = false;
|
||||
|
||||
cout << "Group threshold: " << gr_threshold << endl;
|
||||
cout << "Levels number: " << nlevels << endl;
|
||||
cout << "Win width: " << win_width << endl;
|
||||
cout << "Win stride: (" << win_stride_width << ", " << win_stride_height << ")\n";
|
||||
cout << "Hit threshold: " << hit_threshold << endl;
|
||||
cout << "Gamma correction: " << gamma_corr << endl;
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
void App::run()
|
||||
{
|
||||
running = true;
|
||||
VideoWriter video_writer;
|
||||
|
||||
Size win_size(win_width, win_width * 2);
|
||||
Size win_stride(win_stride_width, win_stride_height);
|
||||
|
||||
// Create HOG descriptors and detectors here
|
||||
vector<float> detector;
|
||||
if (win_size == Size(64, 128))
|
||||
detector = ocl::HOGDescriptor::getPeopleDetector64x128();
|
||||
else
|
||||
detector = ocl::HOGDescriptor::getPeopleDetector48x96();
|
||||
|
||||
|
||||
ocl::HOGDescriptor gpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9,
|
||||
ocl::HOGDescriptor::DEFAULT_WIN_SIGMA, 0.2, gamma_corr,
|
||||
ocl::HOGDescriptor::DEFAULT_NLEVELS);
|
||||
HOGDescriptor cpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9, 1, -1,
|
||||
HOGDescriptor::L2Hys, 0.2, gamma_corr, cv::HOGDescriptor::DEFAULT_NLEVELS);
|
||||
gpu_hog.setSVMDetector(detector);
|
||||
cpu_hog.setSVMDetector(detector);
|
||||
|
||||
while (running)
|
||||
{
|
||||
VideoCapture vc;
|
||||
Mat frame;
|
||||
|
||||
if (vdo_source!="")
|
||||
{
|
||||
vc.open(vdo_source.c_str());
|
||||
if (!vc.isOpened())
|
||||
throw runtime_error(string("can't open video file: " + vdo_source));
|
||||
vc >> frame;
|
||||
}
|
||||
else if (camera_id != -1)
|
||||
{
|
||||
vc.open(camera_id);
|
||||
if (!vc.isOpened())
|
||||
{
|
||||
stringstream msg;
|
||||
msg << "can't open camera: " << camera_id;
|
||||
throw runtime_error(msg.str());
|
||||
}
|
||||
vc >> frame;
|
||||
}
|
||||
else
|
||||
{
|
||||
frame = imread(img_source);
|
||||
if (frame.empty())
|
||||
throw runtime_error(string("can't open image file: " + img_source));
|
||||
}
|
||||
|
||||
Mat img_aux, img, img_to_show;
|
||||
ocl::oclMat gpu_img;
|
||||
|
||||
// Iterate over all frames
|
||||
bool verify = false;
|
||||
while (running && !frame.empty())
|
||||
{
|
||||
workBegin();
|
||||
|
||||
// Change format of the image
|
||||
if (make_gray) cvtColor(frame, img_aux, COLOR_BGR2GRAY);
|
||||
else if (use_gpu) cvtColor(frame, img_aux, COLOR_BGR2BGRA);
|
||||
else frame.copyTo(img_aux);
|
||||
|
||||
// Resize image
|
||||
if (abs(scale-1.0)>0.001)
|
||||
{
|
||||
Size sz((int)((double)img_aux.cols/resize_scale), (int)((double)img_aux.rows/resize_scale));
|
||||
resize(img_aux, img, sz);
|
||||
}
|
||||
else img = img_aux;
|
||||
img_to_show = img;
|
||||
gpu_hog.nlevels = nlevels;
|
||||
cpu_hog.nlevels = nlevels;
|
||||
vector<Rect> found;
|
||||
|
||||
// Perform HOG classification
|
||||
hogWorkBegin();
|
||||
if (use_gpu)
|
||||
{
|
||||
gpu_img.upload(img);
|
||||
gpu_hog.detectMultiScale(gpu_img, found, hit_threshold, win_stride,
|
||||
Size(0, 0), scale, gr_threshold);
|
||||
if (!verify)
|
||||
{
|
||||
// verify if GPU output same objects with CPU at 1st run
|
||||
verify = true;
|
||||
vector<Rect> ref_rst;
|
||||
cvtColor(img, img, COLOR_BGRA2BGR);
|
||||
cpu_hog.detectMultiScale(img, ref_rst, hit_threshold, win_stride,
|
||||
Size(0, 0), scale, gr_threshold-2);
|
||||
double accuracy = checkRectSimilarity(img.size(), ref_rst, found);
|
||||
cout << "\naccuracy value: " << accuracy << endl;
|
||||
}
|
||||
}
|
||||
else cpu_hog.detectMultiScale(img, found, hit_threshold, win_stride,
|
||||
Size(0, 0), scale, gr_threshold);
|
||||
hogWorkEnd();
|
||||
|
||||
|
||||
// Draw positive classified windows
|
||||
for (size_t i = 0; i < found.size(); i++)
|
||||
{
|
||||
Rect r = found[i];
|
||||
rectangle(img_to_show, r.tl(), r.br(), Scalar(0, 255, 0), 3);
|
||||
}
|
||||
|
||||
if (use_gpu)
|
||||
putText(img_to_show, "Mode: GPU", Point(5, 25), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);
|
||||
else
|
||||
putText(img_to_show, "Mode: CPU", Point(5, 25), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);
|
||||
putText(img_to_show, "FPS (HOG only): " + hogWorkFps(), Point(5, 65), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);
|
||||
putText(img_to_show, "FPS (total): " + workFps(), Point(5, 105), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);
|
||||
imshow("opencv_gpu_hog", img_to_show);
|
||||
if (vdo_source!="" || camera_id!=-1) vc >> frame;
|
||||
|
||||
workEnd();
|
||||
|
||||
if (output!="" && write_once)
|
||||
{
|
||||
if (img_source!="") // wirte image
|
||||
{
|
||||
write_once = false;
|
||||
imwrite(output, img_to_show);
|
||||
}
|
||||
else //write video
|
||||
{
|
||||
if (!video_writer.isOpened())
|
||||
{
|
||||
video_writer.open(output, VideoWriter::fourcc('x','v','i','d'), 24,
|
||||
img_to_show.size(), true);
|
||||
if (!video_writer.isOpened())
|
||||
throw std::runtime_error("can't create video writer");
|
||||
}
|
||||
|
||||
if (make_gray) cvtColor(img_to_show, img, COLOR_GRAY2BGR);
|
||||
else cvtColor(img_to_show, img, COLOR_BGRA2BGR);
|
||||
|
||||
video_writer << img;
|
||||
}
|
||||
}
|
||||
|
||||
handleKey((char)waitKey(3));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void App::handleKey(char key)
|
||||
{
|
||||
switch (key)
|
||||
{
|
||||
case 27:
|
||||
running = false;
|
||||
break;
|
||||
case 'm':
|
||||
case 'M':
|
||||
use_gpu = !use_gpu;
|
||||
cout << "Switched to " << (use_gpu ? "CUDA" : "CPU") << " mode\n";
|
||||
break;
|
||||
case 'g':
|
||||
case 'G':
|
||||
make_gray = !make_gray;
|
||||
cout << "Convert image to gray: " << (make_gray ? "YES" : "NO") << endl;
|
||||
break;
|
||||
case '1':
|
||||
scale *= 1.05;
|
||||
cout << "Scale: " << scale << endl;
|
||||
break;
|
||||
case 'q':
|
||||
case 'Q':
|
||||
scale /= 1.05;
|
||||
cout << "Scale: " << scale << endl;
|
||||
break;
|
||||
case '2':
|
||||
nlevels++;
|
||||
cout << "Levels number: " << nlevels << endl;
|
||||
break;
|
||||
case 'w':
|
||||
case 'W':
|
||||
nlevels = max(nlevels - 1, 1);
|
||||
cout << "Levels number: " << nlevels << endl;
|
||||
break;
|
||||
case '3':
|
||||
gr_threshold++;
|
||||
cout << "Group threshold: " << gr_threshold << endl;
|
||||
break;
|
||||
case 'e':
|
||||
case 'E':
|
||||
gr_threshold = max(0, gr_threshold - 1);
|
||||
cout << "Group threshold: " << gr_threshold << endl;
|
||||
break;
|
||||
case '4':
|
||||
hit_threshold+=0.25;
|
||||
cout << "Hit threshold: " << hit_threshold << endl;
|
||||
break;
|
||||
case 'r':
|
||||
case 'R':
|
||||
hit_threshold = max(0.0, hit_threshold - 0.25);
|
||||
cout << "Hit threshold: " << hit_threshold << endl;
|
||||
break;
|
||||
case 'c':
|
||||
case 'C':
|
||||
gamma_corr = !gamma_corr;
|
||||
cout << "Gamma correction: " << gamma_corr << endl;
|
||||
break;
|
||||
case 'o':
|
||||
case 'O':
|
||||
write_once = !write_once;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void App::hogWorkBegin()
|
||||
{
|
||||
hog_work_begin = getTickCount();
|
||||
}
|
||||
|
||||
inline void App::hogWorkEnd()
|
||||
{
|
||||
int64 delta = getTickCount() - hog_work_begin;
|
||||
double freq = getTickFrequency();
|
||||
hog_work_fps = freq / delta;
|
||||
}
|
||||
|
||||
inline string App::hogWorkFps() const
|
||||
{
|
||||
stringstream ss;
|
||||
ss << hog_work_fps;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
inline void App::workBegin()
|
||||
{
|
||||
work_begin = getTickCount();
|
||||
}
|
||||
|
||||
inline void App::workEnd()
|
||||
{
|
||||
int64 delta = getTickCount() - work_begin;
|
||||
double freq = getTickFrequency();
|
||||
work_fps = freq / delta;
|
||||
}
|
||||
|
||||
inline string App::workFps() const
|
||||
{
|
||||
stringstream ss;
|
||||
ss << work_fps;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
|
||||
double App::checkRectSimilarity(Size sz,
|
||||
std::vector<Rect>& ob1,
|
||||
std::vector<Rect>& ob2)
|
||||
{
|
||||
double final_test_result = 0.0;
|
||||
size_t sz1 = ob1.size();
|
||||
size_t sz2 = ob2.size();
|
||||
|
||||
if(sz1 != sz2)
|
||||
{
|
||||
return sz1 > sz2 ? (double)(sz1 - sz2) : (double)(sz2 - sz1);
|
||||
}
|
||||
else
|
||||
{
|
||||
if(sz1==0 && sz2==0)
|
||||
return 0;
|
||||
cv::Mat cpu_result(sz, CV_8UC1);
|
||||
cpu_result.setTo(0);
|
||||
|
||||
|
||||
for(vector<Rect>::const_iterator r = ob1.begin(); r != ob1.end(); r++)
|
||||
{
|
||||
cv::Mat cpu_result_roi(cpu_result, *r);
|
||||
cpu_result_roi.setTo(1);
|
||||
cpu_result.copyTo(cpu_result);
|
||||
}
|
||||
int cpu_area = cv::countNonZero(cpu_result > 0);
|
||||
|
||||
|
||||
cv::Mat gpu_result(sz, CV_8UC1);
|
||||
gpu_result.setTo(0);
|
||||
for(vector<Rect>::const_iterator r2 = ob2.begin(); r2 != ob2.end(); r2++)
|
||||
{
|
||||
cv::Mat gpu_result_roi(gpu_result, *r2);
|
||||
gpu_result_roi.setTo(1);
|
||||
gpu_result.copyTo(gpu_result);
|
||||
}
|
||||
|
||||
cv::Mat result_;
|
||||
multiply(cpu_result, gpu_result, result_);
|
||||
int result = cv::countNonZero(result_ > 0);
|
||||
if(cpu_area!=0 && result!=0)
|
||||
final_test_result = 1.0 - (double)result/(double)cpu_area;
|
||||
else if(cpu_area==0 && result!=0)
|
||||
final_test_result = -1;
|
||||
}
|
||||
return final_test_result;
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <iomanip>
|
||||
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
#include "opencv2/video/video.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::ocl;
|
||||
|
||||
typedef unsigned char uchar;
|
||||
#define LOOP_NUM 10
|
||||
int64 work_begin = 0;
|
||||
int64 work_end = 0;
|
||||
|
||||
static void workBegin()
|
||||
{
|
||||
work_begin = getTickCount();
|
||||
}
|
||||
static void workEnd()
|
||||
{
|
||||
work_end += (getTickCount() - work_begin);
|
||||
}
|
||||
static double getTime()
|
||||
{
|
||||
return work_end * 1000. / getTickFrequency();
|
||||
}
|
||||
|
||||
static void download(const oclMat& d_mat, vector<Point2f>& vec)
|
||||
{
|
||||
vec.clear();
|
||||
vec.resize(d_mat.cols);
|
||||
Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]);
|
||||
d_mat.download(mat);
|
||||
}
|
||||
|
||||
static void download(const oclMat& d_mat, vector<uchar>& vec)
|
||||
{
|
||||
vec.clear();
|
||||
vec.resize(d_mat.cols);
|
||||
Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);
|
||||
d_mat.download(mat);
|
||||
}
|
||||
|
||||
static void drawArrows(Mat& frame, const vector<Point2f>& prevPts, const vector<Point2f>& nextPts, const vector<uchar>& status,
|
||||
Scalar line_color = Scalar(0, 0, 255))
|
||||
{
|
||||
for (size_t i = 0; i < prevPts.size(); ++i)
|
||||
{
|
||||
if (status[i])
|
||||
{
|
||||
int line_thickness = 1;
|
||||
|
||||
Point p = prevPts[i];
|
||||
Point q = nextPts[i];
|
||||
|
||||
double angle = atan2((double) p.y - q.y, (double) p.x - q.x);
|
||||
|
||||
double hypotenuse = sqrt( (double)(p.y - q.y)*(p.y - q.y) + (double)(p.x - q.x)*(p.x - q.x) );
|
||||
|
||||
if (hypotenuse < 1.0)
|
||||
continue;
|
||||
|
||||
// Here we lengthen the arrow by a factor of three.
|
||||
q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
|
||||
q.y = (int) (p.y - 3 * hypotenuse * sin(angle));
|
||||
|
||||
// Now we draw the main line of the arrow.
|
||||
line(frame, p, q, line_color, line_thickness);
|
||||
|
||||
// Now draw the tips of the arrow. I do some scaling so that the
|
||||
// tips look proportional to the main line of the arrow.
|
||||
|
||||
p.x = (int) (q.x + 9 * cos(angle + CV_PI / 4));
|
||||
p.y = (int) (q.y + 9 * sin(angle + CV_PI / 4));
|
||||
line(frame, p, q, line_color, line_thickness);
|
||||
|
||||
p.x = (int) (q.x + 9 * cos(angle - CV_PI / 4));
|
||||
p.y = (int) (q.y + 9 * sin(angle - CV_PI / 4));
|
||||
line(frame, p, q, line_color, line_thickness);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, const char* argv[])
|
||||
{
|
||||
const char* keys =
|
||||
"{ help h | false | print help message }"
|
||||
"{ left l | | specify left image }"
|
||||
"{ right r | | specify right image }"
|
||||
"{ camera c | 0 | enable camera capturing }"
|
||||
"{ use_cpu s | false | use cpu or gpu to process the image }"
|
||||
"{ video v | | use video as input }"
|
||||
"{ output o | pyrlk_output.jpg| specify output save path when input is images }"
|
||||
"{ points | 1000 | specify points count [GoodFeatureToTrack] }"
|
||||
"{ min_dist | 0 | specify minimal distance between points [GoodFeatureToTrack] }";
|
||||
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
|
||||
if (cmd.has("help"))
|
||||
{
|
||||
cout << "Usage: pyrlk_optical_flow [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
bool defaultPicturesFail = false;
|
||||
string fname0 = cmd.get<string>("left");
|
||||
string fname1 = cmd.get<string>("right");
|
||||
string vdofile = cmd.get<string>("video");
|
||||
string outfile = cmd.get<string>("output");
|
||||
int points = cmd.get<int>("points");
|
||||
double minDist = cmd.get<double>("min_dist");
|
||||
bool useCPU = cmd.has("s");
|
||||
int inputName = cmd.get<int>("c");
|
||||
|
||||
oclMat d_nextPts, d_status;
|
||||
GoodFeaturesToTrackDetector_OCL d_features(points);
|
||||
Mat frame0 = imread(fname0, cv::IMREAD_GRAYSCALE);
|
||||
Mat frame1 = imread(fname1, cv::IMREAD_GRAYSCALE);
|
||||
PyrLKOpticalFlow d_pyrLK;
|
||||
vector<cv::Point2f> pts(points);
|
||||
vector<cv::Point2f> nextPts(points);
|
||||
vector<unsigned char> status(points);
|
||||
vector<float> err;
|
||||
|
||||
cout << "Points count : " << points << endl << endl;
|
||||
|
||||
if (frame0.empty() || frame1.empty())
|
||||
{
|
||||
VideoCapture capture;
|
||||
Mat frame, frameCopy;
|
||||
Mat frame0Gray, frame1Gray;
|
||||
Mat ptr0, ptr1;
|
||||
|
||||
if(vdofile.empty())
|
||||
capture.open( inputName );
|
||||
else
|
||||
capture.open(vdofile.c_str());
|
||||
|
||||
int c = inputName ;
|
||||
if(!capture.isOpened())
|
||||
{
|
||||
if(vdofile.empty())
|
||||
cout << "Capture from CAM " << c << " didn't work" << endl;
|
||||
else
|
||||
cout << "Capture from file " << vdofile << " failed" <<endl;
|
||||
if (defaultPicturesFail)
|
||||
return EXIT_FAILURE;
|
||||
goto nocamera;
|
||||
}
|
||||
|
||||
cout << "In capture ..." << endl;
|
||||
for(int i = 0;; i++)
|
||||
{
|
||||
if( !capture.read(frame) )
|
||||
break;
|
||||
|
||||
if (i == 0)
|
||||
{
|
||||
frame.copyTo( frame0 );
|
||||
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (i%2 == 1)
|
||||
{
|
||||
frame.copyTo(frame1);
|
||||
cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
|
||||
ptr0 = frame0Gray;
|
||||
ptr1 = frame1Gray;
|
||||
}
|
||||
else
|
||||
{
|
||||
frame.copyTo(frame0);
|
||||
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
|
||||
ptr0 = frame1Gray;
|
||||
ptr1 = frame0Gray;
|
||||
}
|
||||
|
||||
if (useCPU)
|
||||
{
|
||||
pts.clear();
|
||||
goodFeaturesToTrack(ptr0, pts, points, 0.01, 0.0);
|
||||
if(pts.size() == 0)
|
||||
continue;
|
||||
calcOpticalFlowPyrLK(ptr0, ptr1, pts, nextPts, status, err);
|
||||
}
|
||||
else
|
||||
{
|
||||
oclMat d_img(ptr0), d_prevPts;
|
||||
d_features(d_img, d_prevPts);
|
||||
if(!d_prevPts.rows || !d_prevPts.cols)
|
||||
continue;
|
||||
d_pyrLK.sparse(d_img, oclMat(ptr1), d_prevPts, d_nextPts, d_status);
|
||||
d_features.downloadPoints(d_prevPts,pts);
|
||||
download(d_nextPts, nextPts);
|
||||
download(d_status, status);
|
||||
}
|
||||
if (i%2 == 1)
|
||||
frame1.copyTo(frameCopy);
|
||||
else
|
||||
frame0.copyTo(frameCopy);
|
||||
drawArrows(frameCopy, pts, nextPts, status, Scalar(255, 0, 0));
|
||||
imshow("PyrLK [Sparse]", frameCopy);
|
||||
}
|
||||
|
||||
if( waitKey( 10 ) >= 0 )
|
||||
break;
|
||||
}
|
||||
|
||||
capture.release();
|
||||
}
|
||||
else
|
||||
{
|
||||
nocamera:
|
||||
for(int i = 0; i <= LOOP_NUM; i ++)
|
||||
{
|
||||
cout << "loop" << i << endl;
|
||||
if (i > 0) workBegin();
|
||||
|
||||
if (useCPU)
|
||||
{
|
||||
goodFeaturesToTrack(frame0, pts, points, 0.01, minDist);
|
||||
calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err);
|
||||
}
|
||||
else
|
||||
{
|
||||
oclMat d_img(frame0), d_prevPts;
|
||||
d_features(d_img, d_prevPts);
|
||||
d_pyrLK.sparse(d_img, oclMat(frame1), d_prevPts, d_nextPts, d_status);
|
||||
d_features.downloadPoints(d_prevPts, pts);
|
||||
download(d_nextPts, nextPts);
|
||||
download(d_status, status);
|
||||
}
|
||||
|
||||
if (i > 0 && i <= LOOP_NUM)
|
||||
workEnd();
|
||||
|
||||
if (i == LOOP_NUM)
|
||||
{
|
||||
if (useCPU)
|
||||
cout << "average CPU time (noCamera) : ";
|
||||
else
|
||||
cout << "average GPU time (noCamera) : ";
|
||||
|
||||
cout << getTime() / LOOP_NUM << " ms" << endl;
|
||||
|
||||
drawArrows(frame0, pts, nextPts, status, Scalar(255, 0, 0));
|
||||
imshow("PyrLK [Sparse]", frame0);
|
||||
imwrite(outfile, frame0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
waitKey();
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
@@ -1,341 +0,0 @@
|
||||
// The "Square Detector" program.
|
||||
// It loads several images sequentially and tries to find squares in
|
||||
// each image
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
#define ACCURACY_CHECK
|
||||
|
||||
#ifdef ACCURACY_CHECK
|
||||
// check if two vectors of vector of points are near or not
|
||||
// prior assumption is that they are in correct order
|
||||
static bool checkPoints(
|
||||
vector< vector<Point> > set1,
|
||||
vector< vector<Point> > set2,
|
||||
int maxDiff = 5)
|
||||
{
|
||||
if(set1.size() != set2.size())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
for(vector< vector<Point> >::iterator it1 = set1.begin(), it2 = set2.begin();
|
||||
it1 < set1.end() && it2 < set2.end(); it1 ++, it2 ++)
|
||||
{
|
||||
vector<Point> pts1 = *it1;
|
||||
vector<Point> pts2 = *it2;
|
||||
|
||||
|
||||
if(pts1.size() != pts2.size())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
for(size_t i = 0; i < pts1.size(); i ++)
|
||||
{
|
||||
Point pt1 = pts1[i], pt2 = pts2[i];
|
||||
if(std::abs(pt1.x - pt2.x) > maxDiff ||
|
||||
std::abs(pt1.y - pt2.y) > maxDiff)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
int thresh = 50, N = 11;
|
||||
const char* wndname = "OpenCL Square Detection Demo";
|
||||
|
||||
|
||||
// helper function:
|
||||
// finds a cosine of angle between vectors
|
||||
// from pt0->pt1 and from pt0->pt2
|
||||
static double angle( Point pt1, Point pt2, Point pt0 )
|
||||
{
|
||||
double dx1 = pt1.x - pt0.x;
|
||||
double dy1 = pt1.y - pt0.y;
|
||||
double dx2 = pt2.x - pt0.x;
|
||||
double dy2 = pt2.y - pt0.y;
|
||||
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
|
||||
}
|
||||
|
||||
|
||||
// returns sequence of squares detected on the image.
|
||||
// the sequence is stored in the specified memory storage
|
||||
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
|
||||
{
|
||||
squares.clear();
|
||||
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
|
||||
|
||||
// down-scale and upscale the image to filter out the noise
|
||||
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
|
||||
pyrUp(pyr, timg, image.size());
|
||||
vector<vector<Point> > contours;
|
||||
|
||||
// find squares in every color plane of the image
|
||||
for( int c = 0; c < 3; c++ )
|
||||
{
|
||||
int ch[] = {c, 0};
|
||||
mixChannels(&timg, 1, &gray0, 1, ch, 1);
|
||||
|
||||
// try several threshold levels
|
||||
for( int l = 0; l < N; l++ )
|
||||
{
|
||||
// hack: use Canny instead of zero threshold level.
|
||||
// Canny helps to catch squares with gradient shading
|
||||
if( l == 0 )
|
||||
{
|
||||
// apply Canny. Take the upper threshold from slider
|
||||
// and set the lower to 0 (which forces edges merging)
|
||||
Canny(gray0, gray, 0, thresh, 5);
|
||||
// dilate canny output to remove potential
|
||||
// holes between edge segments
|
||||
dilate(gray, gray, Mat(), Point(-1,-1));
|
||||
}
|
||||
else
|
||||
{
|
||||
// apply threshold if l!=0:
|
||||
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
|
||||
cv::threshold(gray0, gray, (l+1)*255/N, 255, THRESH_BINARY);
|
||||
}
|
||||
|
||||
// find contours and store them all as a list
|
||||
findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
|
||||
|
||||
vector<Point> approx;
|
||||
|
||||
// test each contour
|
||||
for( size_t i = 0; i < contours.size(); i++ )
|
||||
{
|
||||
// approximate contour with accuracy proportional
|
||||
// to the contour perimeter
|
||||
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
|
||||
|
||||
// square contours should have 4 vertices after approximation
|
||||
// relatively large area (to filter out noisy contours)
|
||||
// and be convex.
|
||||
// Note: absolute value of an area is used because
|
||||
// area may be positive or negative - in accordance with the
|
||||
// contour orientation
|
||||
if( approx.size() == 4 &&
|
||||
fabs(contourArea(Mat(approx))) > 1000 &&
|
||||
isContourConvex(Mat(approx)) )
|
||||
{
|
||||
double maxCosine = 0;
|
||||
|
||||
for( int j = 2; j < 5; j++ )
|
||||
{
|
||||
// find the maximum cosine of the angle between joint edges
|
||||
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
|
||||
maxCosine = MAX(maxCosine, cosine);
|
||||
}
|
||||
|
||||
// if cosines of all angles are small
|
||||
// (all angles are ~90 degree) then write quandrange
|
||||
// vertices to resultant sequence
|
||||
if( maxCosine < 0.3 )
|
||||
squares.push_back(approx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// returns sequence of squares detected on the image.
|
||||
// the sequence is stored in the specified memory storage
|
||||
static void findSquares_ocl( const Mat& image, vector<vector<Point> >& squares )
|
||||
{
|
||||
squares.clear();
|
||||
|
||||
Mat gray;
|
||||
cv::ocl::oclMat pyr_ocl, timg_ocl, gray0_ocl, gray_ocl;
|
||||
|
||||
// down-scale and upscale the image to filter out the noise
|
||||
ocl::pyrDown(ocl::oclMat(image), pyr_ocl);
|
||||
ocl::pyrUp(pyr_ocl, timg_ocl);
|
||||
|
||||
vector<vector<Point> > contours;
|
||||
vector<cv::ocl::oclMat> gray0s;
|
||||
ocl::split(timg_ocl, gray0s); // split 3 channels into a vector of oclMat
|
||||
// find squares in every color plane of the image
|
||||
for( int c = 0; c < 3; c++ )
|
||||
{
|
||||
gray0_ocl = gray0s[c];
|
||||
// try several threshold levels
|
||||
for( int l = 0; l < N; l++ )
|
||||
{
|
||||
// hack: use Canny instead of zero threshold level.
|
||||
// Canny helps to catch squares with gradient shading
|
||||
if( l == 0 )
|
||||
{
|
||||
// do canny on OpenCL device
|
||||
// apply Canny. Take the upper threshold from slider
|
||||
// and set the lower to 0 (which forces edges merging)
|
||||
cv::ocl::Canny(gray0_ocl, gray_ocl, 0, thresh, 5);
|
||||
// dilate canny output to remove potential
|
||||
// holes between edge segments
|
||||
ocl::dilate(gray_ocl, gray_ocl, Mat(), Point(-1,-1));
|
||||
gray = Mat(gray_ocl);
|
||||
}
|
||||
else
|
||||
{
|
||||
// apply threshold if l!=0:
|
||||
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
|
||||
cv::ocl::threshold(gray0_ocl, gray_ocl, (l+1)*255/N, 255, THRESH_BINARY);
|
||||
gray = gray_ocl;
|
||||
}
|
||||
|
||||
// find contours and store them all as a list
|
||||
findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
|
||||
|
||||
vector<Point> approx;
|
||||
// test each contour
|
||||
for( size_t i = 0; i < contours.size(); i++ )
|
||||
{
|
||||
// approximate contour with accuracy proportional
|
||||
// to the contour perimeter
|
||||
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
|
||||
|
||||
// square contours should have 4 vertices after approximation
|
||||
// relatively large area (to filter out noisy contours)
|
||||
// and be convex.
|
||||
// Note: absolute value of an area is used because
|
||||
// area may be positive or negative - in accordance with the
|
||||
// contour orientation
|
||||
if( approx.size() == 4 &&
|
||||
fabs(contourArea(Mat(approx))) > 1000 &&
|
||||
isContourConvex(Mat(approx)) )
|
||||
{
|
||||
double maxCosine = 0;
|
||||
for( int j = 2; j < 5; j++ )
|
||||
{
|
||||
// find the maximum cosine of the angle between joint edges
|
||||
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
|
||||
maxCosine = MAX(maxCosine, cosine);
|
||||
}
|
||||
|
||||
// if cosines of all angles are small
|
||||
// (all angles are ~90 degree) then write quandrange
|
||||
// vertices to resultant sequence
|
||||
if( maxCosine < 0.3 )
|
||||
squares.push_back(approx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// the function draws all the squares in the image
|
||||
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
|
||||
{
|
||||
for( size_t i = 0; i < squares.size(); i++ )
|
||||
{
|
||||
const Point* p = &squares[i][0];
|
||||
int n = (int)squares[i].size();
|
||||
polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, LINE_AA);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// draw both pure-C++ and ocl square results onto a single image
|
||||
static Mat drawSquaresBoth( const Mat& image,
|
||||
const vector<vector<Point> >& sqsCPP,
|
||||
const vector<vector<Point> >& sqsOCL
|
||||
)
|
||||
{
|
||||
Mat imgToShow(Size(image.cols * 2, image.rows), image.type());
|
||||
Mat lImg = imgToShow(Rect(Point(0, 0), image.size()));
|
||||
Mat rImg = imgToShow(Rect(Point(image.cols, 0), image.size()));
|
||||
image.copyTo(lImg);
|
||||
image.copyTo(rImg);
|
||||
drawSquares(lImg, sqsCPP);
|
||||
drawSquares(rImg, sqsOCL);
|
||||
float fontScale = 0.8f;
|
||||
Scalar white = Scalar::all(255), black = Scalar::all(0);
|
||||
|
||||
putText(lImg, "C++", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, black, 2);
|
||||
putText(rImg, "OCL", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, black, 2);
|
||||
putText(lImg, "C++", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, white, 1);
|
||||
putText(rImg, "OCL", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, white, 1);
|
||||
|
||||
return imgToShow;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const char* keys =
|
||||
"{ i | input | | specify input image }"
|
||||
"{ o | output | squares_output.jpg | specify output save path}"
|
||||
"{ h | help | false | print help message }";
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
string inputName = cmd.get<string>("i");
|
||||
string outfile = cmd.get<string>("o");
|
||||
|
||||
if(cmd.get<bool>("help"))
|
||||
{
|
||||
cout << "Usage : squares [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
int iterations = 10;
|
||||
namedWindow( wndname, WINDOW_AUTOSIZE );
|
||||
vector<vector<Point> > squares_cpu, squares_ocl;
|
||||
|
||||
Mat image = imread(inputName, 1);
|
||||
if( image.empty() )
|
||||
{
|
||||
cout << "Couldn't load " << inputName << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
int j = iterations;
|
||||
int64 t_ocl = 0, t_cpp = 0;
|
||||
//warm-ups
|
||||
cout << "warming up ..." << endl;
|
||||
findSquares(image, squares_cpu);
|
||||
findSquares_ocl(image, squares_ocl);
|
||||
|
||||
|
||||
#ifdef ACCURACY_CHECK
|
||||
cout << "Checking ocl accuracy ... " << endl;
|
||||
cout << (checkPoints(squares_cpu, squares_ocl) ? "Pass" : "Failed") << endl;
|
||||
#endif
|
||||
do
|
||||
{
|
||||
int64 t_start = cv::getTickCount();
|
||||
findSquares(image, squares_cpu);
|
||||
t_cpp += cv::getTickCount() - t_start;
|
||||
|
||||
|
||||
t_start = cv::getTickCount();
|
||||
findSquares_ocl(image, squares_ocl);
|
||||
t_ocl += cv::getTickCount() - t_start;
|
||||
cout << "run loop: " << j << endl;
|
||||
}
|
||||
while(--j);
|
||||
cout << "cpp average time: " << 1000.0f * (double)t_cpp / getTickFrequency() / iterations << "ms" << endl;
|
||||
cout << "ocl average time: " << 1000.0f * (double)t_ocl / getTickFrequency() / iterations << "ms" << endl;
|
||||
|
||||
Mat result = drawSquaresBoth(image, squares_cpu, squares_ocl);
|
||||
imshow(wndname, result);
|
||||
imwrite(outfile, result);
|
||||
waitKey(0);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
@@ -1,384 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
using namespace ocl;
|
||||
|
||||
|
||||
struct App
|
||||
{
|
||||
App(CommandLineParser& cmd);
|
||||
void run();
|
||||
void handleKey(char key);
|
||||
void printParams() const;
|
||||
|
||||
void workBegin()
|
||||
{
|
||||
work_begin = getTickCount();
|
||||
}
|
||||
void workEnd()
|
||||
{
|
||||
int64 d = getTickCount() - work_begin;
|
||||
double f = getTickFrequency();
|
||||
work_fps = f / d;
|
||||
}
|
||||
string method_str() const
|
||||
{
|
||||
switch (method)
|
||||
{
|
||||
case BM:
|
||||
return "BM";
|
||||
case BP:
|
||||
return "BP";
|
||||
case CSBP:
|
||||
return "CSBP";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
string text() const
|
||||
{
|
||||
stringstream ss;
|
||||
ss << "(" << method_str() << ") FPS: " << setiosflags(ios::left)
|
||||
<< setprecision(4) << work_fps;
|
||||
return ss.str();
|
||||
}
|
||||
private:
|
||||
bool running, write_once;
|
||||
|
||||
Mat left_src, right_src;
|
||||
Mat left, right;
|
||||
oclMat d_left, d_right;
|
||||
|
||||
StereoBM_OCL bm;
|
||||
StereoBeliefPropagation bp;
|
||||
StereoConstantSpaceBP csbp;
|
||||
|
||||
int64 work_begin;
|
||||
double work_fps;
|
||||
|
||||
string l_img, r_img;
|
||||
string out_img;
|
||||
enum {BM, BP, CSBP} method;
|
||||
int ndisp; // Max disparity + 1
|
||||
enum {GPU, CPU} type;
|
||||
};
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const char* keys =
|
||||
"{ h | help | false | print help message }"
|
||||
"{ l | left | | specify left image }"
|
||||
"{ r | right | | specify right image }"
|
||||
"{ m | method | BM | specify match method(BM/BP/CSBP) }"
|
||||
"{ n | ndisp | 64 | specify number of disparity levels }"
|
||||
"{ o | output | stereo_match_output.jpg | specify output path when input is images}";
|
||||
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.get<bool>("help"))
|
||||
{
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return 0;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
App app(cmd);
|
||||
cout << "Device name:" << cv::ocl::Context::getContext()->getDeviceInfo().deviceName << endl;
|
||||
|
||||
app.run();
|
||||
}
|
||||
catch (const exception& e)
|
||||
{
|
||||
cout << "error: " << e.what() << endl;
|
||||
}
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
App::App(CommandLineParser& cmd)
|
||||
: running(false),method(BM)
|
||||
{
|
||||
cout << "stereo_match_ocl sample\n";
|
||||
cout << "\nControls:\n"
|
||||
<< "\tesc - exit\n"
|
||||
<< "\to - save output image once\n"
|
||||
<< "\tp - print current parameters\n"
|
||||
<< "\tg - convert source images into gray\n"
|
||||
<< "\tm - change stereo match method\n"
|
||||
<< "\ts - change Sobel prefiltering flag (for BM only)\n"
|
||||
<< "\t1/q - increase/decrease maximum disparity\n"
|
||||
<< "\t2/w - increase/decrease window size (for BM only)\n"
|
||||
<< "\t3/e - increase/decrease iteration count (for BP and CSBP only)\n"
|
||||
<< "\t4/r - increase/decrease level count (for BP and CSBP only)\n";
|
||||
|
||||
l_img = cmd.get<string>("l");
|
||||
r_img = cmd.get<string>("r");
|
||||
string mstr = cmd.get<string>("m");
|
||||
if(mstr == "BM") method = BM;
|
||||
else if(mstr == "BP") method = BP;
|
||||
else if(mstr == "CSBP") method = CSBP;
|
||||
else cout << "unknown method!\n";
|
||||
ndisp = cmd.get<int>("n");
|
||||
out_img = cmd.get<string>("o");
|
||||
write_once = false;
|
||||
}
|
||||
|
||||
|
||||
void App::run()
|
||||
{
|
||||
// Load images
|
||||
left_src = imread(l_img);
|
||||
right_src = imread(r_img);
|
||||
if (left_src.empty()) throw runtime_error("can't open file \"" + l_img + "\"");
|
||||
if (right_src.empty()) throw runtime_error("can't open file \"" + r_img + "\"");
|
||||
|
||||
cvtColor(left_src, left, COLOR_BGR2GRAY);
|
||||
cvtColor(right_src, right, COLOR_BGR2GRAY);
|
||||
|
||||
d_left.upload(left);
|
||||
d_right.upload(right);
|
||||
|
||||
imshow("left", left);
|
||||
imshow("right", right);
|
||||
|
||||
// Set common parameters
|
||||
bm.ndisp = ndisp;
|
||||
bp.ndisp = ndisp;
|
||||
csbp.ndisp = ndisp;
|
||||
|
||||
cout << endl;
|
||||
printParams();
|
||||
|
||||
running = true;
|
||||
while (running)
|
||||
{
|
||||
// Prepare disparity map of specified type
|
||||
Mat disp;
|
||||
oclMat d_disp;
|
||||
workBegin();
|
||||
switch (method)
|
||||
{
|
||||
case BM:
|
||||
if (d_left.channels() > 1 || d_right.channels() > 1)
|
||||
{
|
||||
cout << "BM doesn't support color images\n";
|
||||
cvtColor(left_src, left, COLOR_BGR2GRAY);
|
||||
cvtColor(right_src, right, COLOR_BGR2GRAY);
|
||||
cout << "image_channels: " << left.channels() << endl;
|
||||
d_left.upload(left);
|
||||
d_right.upload(right);
|
||||
imshow("left", left);
|
||||
imshow("right", right);
|
||||
}
|
||||
bm(d_left, d_right, d_disp);
|
||||
break;
|
||||
case BP:
|
||||
bp(d_left, d_right, d_disp);
|
||||
break;
|
||||
case CSBP:
|
||||
csbp(d_left, d_right, d_disp);
|
||||
break;
|
||||
}
|
||||
|
||||
// Show results
|
||||
d_disp.download(disp);
|
||||
workEnd();
|
||||
|
||||
if (method != BM)
|
||||
{
|
||||
disp.convertTo(disp, 0);
|
||||
}
|
||||
putText(disp, text(), Point(5, 25), FONT_HERSHEY_SIMPLEX, 1.0, Scalar::all(255));
|
||||
imshow("disparity", disp);
|
||||
if(write_once)
|
||||
{
|
||||
imwrite(out_img, disp);
|
||||
write_once = false;
|
||||
}
|
||||
handleKey((char)waitKey(3));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void App::printParams() const
|
||||
{
|
||||
cout << "--- Parameters ---\n";
|
||||
cout << "image_size: (" << left.cols << ", " << left.rows << ")\n";
|
||||
cout << "image_channels: " << left.channels() << endl;
|
||||
cout << "method: " << method_str() << endl
|
||||
<< "ndisp: " << ndisp << endl;
|
||||
switch (method)
|
||||
{
|
||||
case BM:
|
||||
cout << "win_size: " << bm.winSize << endl;
|
||||
cout << "prefilter_sobel: " << bm.preset << endl;
|
||||
break;
|
||||
case BP:
|
||||
cout << "iter_count: " << bp.iters << endl;
|
||||
cout << "level_count: " << bp.levels << endl;
|
||||
break;
|
||||
case CSBP:
|
||||
cout << "iter_count: " << csbp.iters << endl;
|
||||
cout << "level_count: " << csbp.levels << endl;
|
||||
break;
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
|
||||
void App::handleKey(char key)
|
||||
{
|
||||
switch (key)
|
||||
{
|
||||
case 27:
|
||||
running = false;
|
||||
break;
|
||||
case 'p':
|
||||
case 'P':
|
||||
printParams();
|
||||
break;
|
||||
case 'g':
|
||||
case 'G':
|
||||
if (left.channels() == 1 && method != BM)
|
||||
{
|
||||
left = left_src;
|
||||
right = right_src;
|
||||
}
|
||||
else
|
||||
{
|
||||
cvtColor(left_src, left, COLOR_BGR2GRAY);
|
||||
cvtColor(right_src, right, COLOR_BGR2GRAY);
|
||||
}
|
||||
d_left.upload(left);
|
||||
d_right.upload(right);
|
||||
cout << "image_channels: " << left.channels() << endl;
|
||||
imshow("left", left);
|
||||
imshow("right", right);
|
||||
break;
|
||||
case 'm':
|
||||
case 'M':
|
||||
switch (method)
|
||||
{
|
||||
case BM:
|
||||
method = BP;
|
||||
break;
|
||||
case BP:
|
||||
method = CSBP;
|
||||
break;
|
||||
case CSBP:
|
||||
method = BM;
|
||||
break;
|
||||
}
|
||||
cout << "method: " << method_str() << endl;
|
||||
break;
|
||||
case 's':
|
||||
case 'S':
|
||||
if (method == BM)
|
||||
{
|
||||
switch (bm.preset)
|
||||
{
|
||||
case StereoBM_OCL::BASIC_PRESET:
|
||||
bm.preset = StereoBM_OCL::PREFILTER_XSOBEL;
|
||||
break;
|
||||
case StereoBM_OCL::PREFILTER_XSOBEL:
|
||||
bm.preset = StereoBM_OCL::BASIC_PRESET;
|
||||
break;
|
||||
}
|
||||
cout << "prefilter_sobel: " << bm.preset << endl;
|
||||
}
|
||||
break;
|
||||
case '1':
|
||||
ndisp == 1 ? ndisp = 8 : ndisp += 8;
|
||||
cout << "ndisp: " << ndisp << endl;
|
||||
bm.ndisp = ndisp;
|
||||
bp.ndisp = ndisp;
|
||||
csbp.ndisp = ndisp;
|
||||
break;
|
||||
case 'q':
|
||||
case 'Q':
|
||||
ndisp = max(ndisp - 8, 1);
|
||||
cout << "ndisp: " << ndisp << endl;
|
||||
bm.ndisp = ndisp;
|
||||
bp.ndisp = ndisp;
|
||||
csbp.ndisp = ndisp;
|
||||
break;
|
||||
case '2':
|
||||
if (method == BM)
|
||||
{
|
||||
bm.winSize = min(bm.winSize + 1, 51);
|
||||
cout << "win_size: " << bm.winSize << endl;
|
||||
}
|
||||
break;
|
||||
case 'w':
|
||||
case 'W':
|
||||
if (method == BM)
|
||||
{
|
||||
bm.winSize = max(bm.winSize - 1, 2);
|
||||
cout << "win_size: " << bm.winSize << endl;
|
||||
}
|
||||
break;
|
||||
case '3':
|
||||
if (method == BP)
|
||||
{
|
||||
bp.iters += 1;
|
||||
cout << "iter_count: " << bp.iters << endl;
|
||||
}
|
||||
else if (method == CSBP)
|
||||
{
|
||||
csbp.iters += 1;
|
||||
cout << "iter_count: " << csbp.iters << endl;
|
||||
}
|
||||
break;
|
||||
case 'e':
|
||||
case 'E':
|
||||
if (method == BP)
|
||||
{
|
||||
bp.iters = max(bp.iters - 1, 1);
|
||||
cout << "iter_count: " << bp.iters << endl;
|
||||
}
|
||||
else if (method == CSBP)
|
||||
{
|
||||
csbp.iters = max(csbp.iters - 1, 1);
|
||||
cout << "iter_count: " << csbp.iters << endl;
|
||||
}
|
||||
break;
|
||||
case '4':
|
||||
if (method == BP)
|
||||
{
|
||||
bp.levels += 1;
|
||||
cout << "level_count: " << bp.levels << endl;
|
||||
}
|
||||
else if (method == CSBP)
|
||||
{
|
||||
csbp.levels += 1;
|
||||
cout << "level_count: " << csbp.levels << endl;
|
||||
}
|
||||
break;
|
||||
case 'r':
|
||||
case 'R':
|
||||
if (method == BP)
|
||||
{
|
||||
bp.levels = max(bp.levels - 1, 1);
|
||||
cout << "level_count: " << bp.levels << endl;
|
||||
}
|
||||
else if (method == CSBP)
|
||||
{
|
||||
csbp.levels = max(csbp.levels - 1, 1);
|
||||
cout << "level_count: " << csbp.levels << endl;
|
||||
}
|
||||
break;
|
||||
case 'o':
|
||||
case 'O':
|
||||
write_once = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1,329 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
#include "opencv2/nonfree/ocl.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::ocl;
|
||||
|
||||
const int LOOP_NUM = 10;
|
||||
const int GOOD_PTS_MAX = 50;
|
||||
const float GOOD_PORTION = 0.15f;
|
||||
|
||||
int64 work_begin = 0;
|
||||
int64 work_end = 0;
|
||||
|
||||
static void workBegin()
|
||||
{
|
||||
work_begin = getTickCount();
|
||||
}
|
||||
|
||||
static void workEnd()
|
||||
{
|
||||
work_end = getTickCount() - work_begin;
|
||||
}
|
||||
|
||||
static double getTime()
|
||||
{
|
||||
return work_end /((double)getTickFrequency() * 1000.);
|
||||
}
|
||||
|
||||
template<class KPDetector>
|
||||
struct SURFDetector
|
||||
{
|
||||
KPDetector surf;
|
||||
SURFDetector(double hessian = 800.0)
|
||||
:surf(hessian)
|
||||
{
|
||||
}
|
||||
template<class T>
|
||||
void operator()(const T& in, const T& mask, std::vector<cv::KeyPoint>& pts, T& descriptors, bool useProvided = false)
|
||||
{
|
||||
surf(in, mask, pts, descriptors, useProvided);
|
||||
}
|
||||
};
|
||||
|
||||
template<class KPMatcher>
|
||||
struct SURFMatcher
|
||||
{
|
||||
KPMatcher matcher;
|
||||
template<class T>
|
||||
void match(const T& in1, const T& in2, std::vector<cv::DMatch>& matches)
|
||||
{
|
||||
matcher.match(in1, in2, matches);
|
||||
}
|
||||
};
|
||||
|
||||
static Mat drawGoodMatches(
|
||||
const Mat& cpu_img1,
|
||||
const Mat& cpu_img2,
|
||||
const std::vector<KeyPoint>& keypoints1,
|
||||
const std::vector<KeyPoint>& keypoints2,
|
||||
std::vector<DMatch>& matches,
|
||||
std::vector<Point2f>& scene_corners_
|
||||
)
|
||||
{
|
||||
//-- Sort matches and preserve top 10% matches
|
||||
std::sort(matches.begin(), matches.end());
|
||||
std::vector< DMatch > good_matches;
|
||||
double minDist = matches.front().distance,
|
||||
maxDist = matches.back().distance;
|
||||
|
||||
const int ptsPairs = std::min(GOOD_PTS_MAX, (int)(matches.size() * GOOD_PORTION));
|
||||
for( int i = 0; i < ptsPairs; i++ )
|
||||
{
|
||||
good_matches.push_back( matches[i] );
|
||||
}
|
||||
std::cout << "\nMax distance: " << maxDist << std::endl;
|
||||
std::cout << "Min distance: " << minDist << std::endl;
|
||||
|
||||
std::cout << "Calculating homography using " << ptsPairs << " point pairs." << std::endl;
|
||||
|
||||
// drawing the results
|
||||
Mat img_matches;
|
||||
drawMatches( cpu_img1, keypoints1, cpu_img2, keypoints2,
|
||||
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
|
||||
std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
|
||||
|
||||
//-- Localize the object
|
||||
std::vector<Point2f> obj;
|
||||
std::vector<Point2f> scene;
|
||||
|
||||
for( size_t i = 0; i < good_matches.size(); i++ )
|
||||
{
|
||||
//-- Get the keypoints from the good matches
|
||||
obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
|
||||
scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
|
||||
}
|
||||
//-- Get the corners from the image_1 ( the object to be "detected" )
|
||||
std::vector<Point2f> obj_corners(4);
|
||||
obj_corners[0] = Point(0,0);
|
||||
obj_corners[1] = Point( cpu_img1.cols, 0 );
|
||||
obj_corners[2] = Point( cpu_img1.cols, cpu_img1.rows );
|
||||
obj_corners[3] = Point( 0, cpu_img1.rows );
|
||||
std::vector<Point2f> scene_corners(4);
|
||||
|
||||
Mat H = findHomography( obj, scene, RANSAC );
|
||||
perspectiveTransform( obj_corners, scene_corners, H);
|
||||
|
||||
scene_corners_ = scene_corners;
|
||||
|
||||
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
|
||||
line( img_matches,
|
||||
scene_corners[0] + Point2f( (float)cpu_img1.cols, 0), scene_corners[1] + Point2f( (float)cpu_img1.cols, 0),
|
||||
Scalar( 0, 255, 0), 2, LINE_AA );
|
||||
line( img_matches,
|
||||
scene_corners[1] + Point2f( (float)cpu_img1.cols, 0), scene_corners[2] + Point2f( (float)cpu_img1.cols, 0),
|
||||
Scalar( 0, 255, 0), 2, LINE_AA );
|
||||
line( img_matches,
|
||||
scene_corners[2] + Point2f( (float)cpu_img1.cols, 0), scene_corners[3] + Point2f( (float)cpu_img1.cols, 0),
|
||||
Scalar( 0, 255, 0), 2, LINE_AA );
|
||||
line( img_matches,
|
||||
scene_corners[3] + Point2f( (float)cpu_img1.cols, 0), scene_corners[0] + Point2f( (float)cpu_img1.cols, 0),
|
||||
Scalar( 0, 255, 0), 2, LINE_AA );
|
||||
return img_matches;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////
|
||||
// This program demonstrates the usage of SURF_OCL.
|
||||
// use cpu findHomography interface to calculate the transformation matrix
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
const char* keys =
|
||||
"{ help h | false | print help message }"
|
||||
"{ left l | | specify left image }"
|
||||
"{ right r | | specify right image }"
|
||||
"{ output o | SURF_output.jpg | specify output save path (only works in CPU or GPU only mode) }"
|
||||
"{ use_cpu c | false | use CPU algorithms }"
|
||||
"{ use_all a | false | use both CPU and GPU algorithms}";
|
||||
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.get<bool>("help"))
|
||||
{
|
||||
std::cout << "Usage: surf_matcher [options]" << std::endl;
|
||||
std::cout << "Available options:" << std::endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
Mat cpu_img1, cpu_img2, cpu_img1_grey, cpu_img2_grey;
|
||||
oclMat img1, img2;
|
||||
bool useCPU = cmd.get<bool>("c");
|
||||
bool useGPU = false;
|
||||
bool useALL = cmd.get<bool>("a");
|
||||
|
||||
std::string outpath = cmd.get<std::string>("o");
|
||||
|
||||
cpu_img1 = imread(cmd.get<std::string>("l"));
|
||||
CV_Assert(!cpu_img1.empty());
|
||||
cvtColor(cpu_img1, cpu_img1_grey, COLOR_BGR2GRAY);
|
||||
img1 = cpu_img1_grey;
|
||||
|
||||
cpu_img2 = imread(cmd.get<std::string>("r"));
|
||||
CV_Assert(!cpu_img2.empty());
|
||||
cvtColor(cpu_img2, cpu_img2_grey, COLOR_BGR2GRAY);
|
||||
img2 = cpu_img2_grey;
|
||||
|
||||
if (useALL)
|
||||
useCPU = useGPU = false;
|
||||
else if(!useCPU && !useALL)
|
||||
useGPU = true;
|
||||
|
||||
if(!useCPU)
|
||||
std::cout
|
||||
<< "Device name:"
|
||||
<< cv::ocl::Context::getContext()->getDeviceInfo().deviceName
|
||||
<< std::endl;
|
||||
|
||||
double surf_time = 0.;
|
||||
|
||||
//declare input/output
|
||||
std::vector<KeyPoint> keypoints1, keypoints2;
|
||||
std::vector<DMatch> matches;
|
||||
|
||||
std::vector<KeyPoint> gpu_keypoints1;
|
||||
std::vector<KeyPoint> gpu_keypoints2;
|
||||
std::vector<DMatch> gpu_matches;
|
||||
|
||||
Mat descriptors1CPU, descriptors2CPU;
|
||||
|
||||
oclMat keypoints1GPU, keypoints2GPU;
|
||||
oclMat descriptors1GPU, descriptors2GPU;
|
||||
|
||||
//instantiate detectors/matchers
|
||||
SURFDetector<SURF> cpp_surf;
|
||||
SURFDetector<SURF_OCL> ocl_surf;
|
||||
|
||||
SURFMatcher<BFMatcher> cpp_matcher;
|
||||
SURFMatcher<BFMatcher_OCL> ocl_matcher;
|
||||
|
||||
//-- start of timing section
|
||||
if (useCPU)
|
||||
{
|
||||
for (int i = 0; i <= LOOP_NUM; i++)
|
||||
{
|
||||
if(i == 1) workBegin();
|
||||
cpp_surf(cpu_img1_grey, Mat(), keypoints1, descriptors1CPU);
|
||||
cpp_surf(cpu_img2_grey, Mat(), keypoints2, descriptors2CPU);
|
||||
cpp_matcher.match(descriptors1CPU, descriptors2CPU, matches);
|
||||
}
|
||||
workEnd();
|
||||
std::cout << "CPP: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
|
||||
std::cout << "CPP: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
|
||||
|
||||
surf_time = getTime();
|
||||
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
|
||||
}
|
||||
else if(useGPU)
|
||||
{
|
||||
for (int i = 0; i <= LOOP_NUM; i++)
|
||||
{
|
||||
if(i == 1) workBegin();
|
||||
ocl_surf(img1, oclMat(), keypoints1, descriptors1GPU);
|
||||
ocl_surf(img2, oclMat(), keypoints2, descriptors2GPU);
|
||||
ocl_matcher.match(descriptors1GPU, descriptors2GPU, matches);
|
||||
}
|
||||
workEnd();
|
||||
std::cout << "OCL: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
|
||||
std::cout << "OCL: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
|
||||
|
||||
surf_time = getTime();
|
||||
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
//cpu runs
|
||||
for (int i = 0; i <= LOOP_NUM; i++)
|
||||
{
|
||||
if(i == 1) workBegin();
|
||||
cpp_surf(cpu_img1_grey, Mat(), keypoints1, descriptors1CPU);
|
||||
cpp_surf(cpu_img2_grey, Mat(), keypoints2, descriptors2CPU);
|
||||
cpp_matcher.match(descriptors1CPU, descriptors2CPU, matches);
|
||||
}
|
||||
workEnd();
|
||||
std::cout << "\nCPP: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
|
||||
std::cout << "CPP: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
|
||||
|
||||
surf_time = getTime();
|
||||
std::cout << "(CPP)SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl;
|
||||
|
||||
//gpu runs
|
||||
for (int i = 0; i <= LOOP_NUM; i++)
|
||||
{
|
||||
if(i == 1) workBegin();
|
||||
ocl_surf(img1, oclMat(), gpu_keypoints1, descriptors1GPU);
|
||||
ocl_surf(img2, oclMat(), gpu_keypoints2, descriptors2GPU);
|
||||
ocl_matcher.match(descriptors1GPU, descriptors2GPU, gpu_matches);
|
||||
}
|
||||
workEnd();
|
||||
std::cout << "\nOCL: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
|
||||
std::cout << "OCL: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
|
||||
|
||||
surf_time = getTime();
|
||||
std::cout << "(OCL)SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
|
||||
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
std::vector<Point2f> cpu_corner;
|
||||
Mat img_matches = drawGoodMatches(cpu_img1, cpu_img2, keypoints1, keypoints2, matches, cpu_corner);
|
||||
|
||||
std::vector<Point2f> gpu_corner;
|
||||
Mat ocl_img_matches;
|
||||
if(useALL || (!useCPU&&!useGPU))
|
||||
{
|
||||
ocl_img_matches = drawGoodMatches(cpu_img1, cpu_img2, gpu_keypoints1, gpu_keypoints2, gpu_matches, gpu_corner);
|
||||
|
||||
//check accuracy
|
||||
std::cout<<"\nCheck accuracy:\n";
|
||||
|
||||
if(cpu_corner.size()!=gpu_corner.size())
|
||||
std::cout<<"Failed\n";
|
||||
else
|
||||
{
|
||||
bool result = false;
|
||||
for(size_t i = 0; i < cpu_corner.size(); i++)
|
||||
{
|
||||
if((std::abs(cpu_corner[i].x - gpu_corner[i].x) > 10)
|
||||
||(std::abs(cpu_corner[i].y - gpu_corner[i].y) > 10))
|
||||
{
|
||||
std::cout<<"Failed\n";
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
result = true;
|
||||
}
|
||||
if(result)
|
||||
std::cout<<"Passed\n";
|
||||
}
|
||||
}
|
||||
|
||||
//-- Show detected matches
|
||||
if (useCPU)
|
||||
{
|
||||
namedWindow("cpu surf matches", 0);
|
||||
imshow("cpu surf matches", img_matches);
|
||||
imwrite(outpath, img_matches);
|
||||
}
|
||||
else if(useGPU)
|
||||
{
|
||||
namedWindow("ocl surf matches", 0);
|
||||
imshow("ocl surf matches", img_matches);
|
||||
imwrite(outpath, img_matches);
|
||||
}
|
||||
else
|
||||
{
|
||||
namedWindow("cpu surf matches", 0);
|
||||
imshow("cpu surf matches", img_matches);
|
||||
|
||||
namedWindow("ocl surf matches", 0);
|
||||
imshow("ocl surf matches", ocl_img_matches);
|
||||
}
|
||||
waitKey(0);
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
@@ -1,237 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <iomanip>
|
||||
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
#include "opencv2/video/video.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::ocl;
|
||||
|
||||
typedef unsigned char uchar;
|
||||
#define LOOP_NUM 10
|
||||
int64 work_begin = 0;
|
||||
int64 work_end = 0;
|
||||
|
||||
static void workBegin()
|
||||
{
|
||||
work_begin = getTickCount();
|
||||
}
|
||||
static void workEnd()
|
||||
{
|
||||
work_end += (getTickCount() - work_begin);
|
||||
}
|
||||
static double getTime()
|
||||
{
|
||||
return work_end * 1000. / getTickFrequency();
|
||||
}
|
||||
|
||||
template <typename T> inline T clamp (T x, T a, T b)
|
||||
{
|
||||
return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a));
|
||||
}
|
||||
|
||||
template <typename T> inline T mapValue(T x, T a, T b, T c, T d)
|
||||
{
|
||||
x = clamp(x, a, b);
|
||||
return c + (d - c) * (x - a) / (b - a);
|
||||
}
|
||||
|
||||
static void getFlowField(const Mat& u, const Mat& v, Mat& flowField)
|
||||
{
|
||||
float maxDisplacement = 1.0f;
|
||||
|
||||
for (int i = 0; i < u.rows; ++i)
|
||||
{
|
||||
const float* ptr_u = u.ptr<float>(i);
|
||||
const float* ptr_v = v.ptr<float>(i);
|
||||
|
||||
for (int j = 0; j < u.cols; ++j)
|
||||
{
|
||||
float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j]));
|
||||
|
||||
if (d > maxDisplacement)
|
||||
maxDisplacement = d;
|
||||
}
|
||||
}
|
||||
|
||||
flowField.create(u.size(), CV_8UC4);
|
||||
|
||||
for (int i = 0; i < flowField.rows; ++i)
|
||||
{
|
||||
const float* ptr_u = u.ptr<float>(i);
|
||||
const float* ptr_v = v.ptr<float>(i);
|
||||
|
||||
|
||||
Vec4b* row = flowField.ptr<Vec4b>(i);
|
||||
|
||||
for (int j = 0; j < flowField.cols; ++j)
|
||||
{
|
||||
row[j][0] = 0;
|
||||
row[j][1] = static_cast<unsigned char> (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
|
||||
row[j][2] = static_cast<unsigned char> (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
|
||||
row[j][3] = 255;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, const char* argv[])
|
||||
{
|
||||
const char* keys =
|
||||
"{ h | help | false | print help message }"
|
||||
"{ l | left | | specify left image }"
|
||||
"{ r | right | | specify right image }"
|
||||
"{ o | output | tvl1_output.jpg | specify output save path }"
|
||||
"{ c | camera | 0 | enable camera capturing }"
|
||||
"{ s | use_cpu | false | use cpu or gpu to process the image }"
|
||||
"{ v | video | | use video as input }";
|
||||
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
|
||||
if (cmd.get<bool>("help"))
|
||||
{
|
||||
cout << "Usage: pyrlk_optical_flow [options]" << endl;
|
||||
cout << "Available options:" << endl;
|
||||
cmd.printMessage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
string fname0 = cmd.get<string>("l");
|
||||
string fname1 = cmd.get<string>("r");
|
||||
string vdofile = cmd.get<string>("v");
|
||||
string outpath = cmd.get<string>("o");
|
||||
bool useCPU = cmd.get<bool>("s");
|
||||
bool useCamera = cmd.get<bool>("c");
|
||||
int inputName = cmd.get<int>("c");
|
||||
|
||||
Mat frame0 = imread(fname0, cv::IMREAD_GRAYSCALE);
|
||||
Mat frame1 = imread(fname1, cv::IMREAD_GRAYSCALE);
|
||||
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
||||
cv::ocl::OpticalFlowDual_TVL1_OCL d_alg;
|
||||
|
||||
Mat flow, show_flow;
|
||||
Mat flow_vec[2];
|
||||
if (frame0.empty() || frame1.empty())
|
||||
useCamera = true;
|
||||
|
||||
if (useCamera)
|
||||
{
|
||||
VideoCapture capture;
|
||||
Mat frame, frameCopy;
|
||||
Mat frame0Gray, frame1Gray;
|
||||
Mat ptr0, ptr1;
|
||||
|
||||
if(vdofile.empty())
|
||||
capture.open( inputName );
|
||||
else
|
||||
capture.open(vdofile.c_str());
|
||||
|
||||
if(!capture.isOpened())
|
||||
{
|
||||
if(vdofile.empty())
|
||||
cout << "Capture from CAM " << inputName << " didn't work" << endl;
|
||||
else
|
||||
cout << "Capture from file " << vdofile << " failed" <<endl;
|
||||
goto nocamera;
|
||||
}
|
||||
|
||||
cout << "In capture ..." << endl;
|
||||
for(int i = 0;; i++)
|
||||
{
|
||||
if( !capture.read(frame) )
|
||||
break;
|
||||
|
||||
if (i == 0)
|
||||
{
|
||||
frame.copyTo( frame0 );
|
||||
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (i%2 == 1)
|
||||
{
|
||||
frame.copyTo(frame1);
|
||||
cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
|
||||
ptr0 = frame0Gray;
|
||||
ptr1 = frame1Gray;
|
||||
}
|
||||
else
|
||||
{
|
||||
frame.copyTo(frame0);
|
||||
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
|
||||
ptr0 = frame1Gray;
|
||||
ptr1 = frame0Gray;
|
||||
}
|
||||
|
||||
if (useCPU)
|
||||
{
|
||||
alg->calc(ptr0, ptr1, flow);
|
||||
split(flow, flow_vec);
|
||||
}
|
||||
else
|
||||
{
|
||||
oclMat d_flowx, d_flowy;
|
||||
d_alg(oclMat(ptr0), oclMat(ptr1), d_flowx, d_flowy);
|
||||
d_flowx.download(flow_vec[0]);
|
||||
d_flowy.download(flow_vec[1]);
|
||||
}
|
||||
if (i%2 == 1)
|
||||
frame1.copyTo(frameCopy);
|
||||
else
|
||||
frame0.copyTo(frameCopy);
|
||||
getFlowField(flow_vec[0], flow_vec[1], show_flow);
|
||||
imshow("tvl1 optical flow field", show_flow);
|
||||
}
|
||||
|
||||
if( waitKey( 10 ) >= 0 )
|
||||
break;
|
||||
}
|
||||
|
||||
capture.release();
|
||||
}
|
||||
else
|
||||
{
|
||||
nocamera:
|
||||
oclMat d_flowx, d_flowy;
|
||||
for(int i = 0; i <= LOOP_NUM; i ++)
|
||||
{
|
||||
cout << "loop" << i << endl;
|
||||
|
||||
if (i > 0) workBegin();
|
||||
if (useCPU)
|
||||
{
|
||||
alg->calc(frame0, frame1, flow);
|
||||
split(flow, flow_vec);
|
||||
}
|
||||
else
|
||||
{
|
||||
d_alg(oclMat(frame0), oclMat(frame1), d_flowx, d_flowy);
|
||||
d_flowx.download(flow_vec[0]);
|
||||
d_flowy.download(flow_vec[1]);
|
||||
}
|
||||
if (i > 0 && i <= LOOP_NUM)
|
||||
workEnd();
|
||||
|
||||
if (i == LOOP_NUM)
|
||||
{
|
||||
if (useCPU)
|
||||
cout << "average CPU time (noCamera) : ";
|
||||
else
|
||||
cout << "average GPU time (noCamera) : ";
|
||||
cout << getTime() / LOOP_NUM << " ms" << endl;
|
||||
|
||||
getFlowField(flow_vec[0], flow_vec[1], show_flow);
|
||||
imshow("PyrLK [Sparse]", show_flow);
|
||||
imwrite(outpath, show_flow);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
waitKey();
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
Reference in New Issue
Block a user