Fix build of samples

This commit is contained in:
Andrey Kamaev 2012-10-16 20:03:07 +04:00
parent 7225f89ea2
commit f4e33ea0ba
5 changed files with 53 additions and 186 deletions

View File

@ -1,104 +0,0 @@
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/contrib/detection_based_tracker.hpp>
#include <stdio.h>
#include <string>
#include <vector>
using namespace std;
using namespace cv;
const string WindowName = "Face Detection example";
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
{
public:
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
IDetector(),
Detector(detector)
{
CV_Assert(!detector.empty());
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
{
Detector->detectMultiScale(Image, objects, scaleFactor, minNeighbours, 0, minObjSize, maxObjSize);
}
virtual ~CascadeDetectorAdapter()
{}
private:
CascadeDetectorAdapter();
cv::Ptr<cv::CascadeClassifier> Detector;
};
int main(int , char** )
{
namedWindow(WindowName);
VideoCapture VideoStream(0);
if (!VideoStream.isOpened())
{
printf("Error: Cannot open video stream from camera\n");
return 1;
}
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker Detector(MainDetector, TrackingDetector, params);
if (!Detector.run())
{
printf("Error: Detector initialization failed\n");
return 2;
}
Mat ReferenceFrame;
Mat GrayFrame;
vector<Rect> Faces;
while(true)
{
VideoStream >> ReferenceFrame;
cvtColor(ReferenceFrame, GrayFrame, COLOR_RGB2GRAY);
Detector.process(GrayFrame);
Detector.getObjects(Faces);
for (size_t i = 0; i < Faces.size(); i++)
{
rectangle(ReferenceFrame, Faces[i], CV_RGB(0,255,0));
}
imshow(WindowName, ReferenceFrame);
if (cvWaitKey(30) >= 0) break;
}
Detector.stop();
return 0;
}
#else
#include <stdio.h>
int main()
{
printf("This sample works for UNIX or ANDROID only\n");
return 0;
}
#endif

View File

@ -43,6 +43,8 @@
#define LOGE(...) do{} while(0) #define LOGE(...) do{} while(0)
#endif #endif
using namespace cv; using namespace cv;
using namespace std; using namespace std;
@ -61,31 +63,9 @@ static void usage()
LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" "); LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" ");
} }
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
{
public:
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
CV_Assert(!detector.empty());
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
{
Detector->detectMultiScale(Image, objects, 1.1, 3, 0, minObjSize, maxObjSize);
}
virtual ~CascadeDetectorAdapter()
{}
private:
CascadeDetectorAdapter();
cv::Ptr<cv::CascadeClassifier> Detector;
};
static int test_FaceDetector(int argc, char *argv[]) static int test_FaceDetector(int argc, char *argv[])
{ {
if (argc < 4) if (argc < 4) {
{
usage(); usage();
return -1; return -1;
} }
@ -100,14 +80,12 @@ static int test_FaceDetector(int argc, char *argv[])
vector<Mat> images; vector<Mat> images;
{ {
char filename[256]; char filename[256];
for(int n=1; ; n++) for(int n=1; ; n++) {
{
snprintf(filename, sizeof(filename), filepattern, n); snprintf(filename, sizeof(filename), filepattern, n);
LOGD("filename='%s'", filename); LOGD("filename='%s'", filename);
Mat m0; Mat m0;
m0=imread(filename); m0=imread(filename);
if (m0.empty()) if (m0.empty()) {
{
LOGI0("Cannot read the file --- break"); LOGI0("Cannot read the file --- break");
break; break;
} }
@ -116,15 +94,10 @@ static int test_FaceDetector(int argc, char *argv[])
LOGD("read %d images", (int)images.size()); LOGD("read %d images", (int)images.size());
} }
std::string cascadeFrontalfilename=cascadefile;
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
DetectionBasedTracker::Parameters params; DetectionBasedTracker::Parameters params;
DetectionBasedTracker fd(MainDetector, TrackingDetector, params); std::string cascadeFrontalfilename=cascadefile;
DetectionBasedTracker fd(cascadeFrontalfilename, params);
fd.run(); fd.run();
@ -135,13 +108,12 @@ static int test_FaceDetector(int argc, char *argv[])
double freq=getTickFrequency(); double freq=getTickFrequency();
int num_images=images.size(); int num_images=images.size();
for(int n=1; n <= num_images; n++) for(int n=1; n <= num_images; n++) {
{
int64 tcur=getTickCount(); int64 tcur=getTickCount();
int64 dt=tcur-tprev; int64 dt=tcur-tprev;
tprev=tcur; tprev=tcur;
double t_ms=((double)dt)/freq * 1000.0; double t_ms=((double)dt)/freq * 1000.0;
LOGD("\n\nSTEP n=%d from prev step %f ms\n", n, t_ms); LOGD("\n\nSTEP n=%d from prev step %f ms\n\n", n, t_ms);
m=images[n-1]; m=images[n-1];
CV_Assert(! m.empty()); CV_Assert(! m.empty());
cvtColor(m, gray, CV_BGR2GRAY); cvtColor(m, gray, CV_BGR2GRAY);
@ -151,8 +123,11 @@ static int test_FaceDetector(int argc, char *argv[])
vector<Rect> result; vector<Rect> result;
fd.getObjects(result); fd.getObjects(result);
for(size_t i=0; i < result.size(); i++)
{
for(size_t i=0; i < result.size(); i++) {
Rect r=result[i]; Rect r=result[i];
CV_Assert(r.area() > 0); CV_Assert(r.area() > 0);
Point tl=r.tl(); Point tl=r.tl();
@ -161,14 +136,14 @@ static int test_FaceDetector(int argc, char *argv[])
rectangle(m, tl, br, color, 3); rectangle(m, tl, br, color, 3);
} }
} }
char outfilename[256];
for(int n=1; n <= num_images; n++)
{ {
snprintf(outfilename, sizeof(outfilename), outfilepattern, n); char outfilename[256];
LOGD("outfilename='%s'", outfilename); for(int n=1; n <= num_images; n++) {
m=images[n-1]; snprintf(outfilename, sizeof(outfilename), outfilepattern, n);
imwrite(outfilename, m); LOGD("outfilename='%s'", outfilename);
m=images[n-1];
imwrite(outfilename, m);
}
} }
fd.stop(); fd.stop();
@ -176,6 +151,8 @@ static int test_FaceDetector(int argc, char *argv[])
return 0; return 0;
} }
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
return test_FaceDetector(argc, argv); return test_FaceDetector(argc, argv);

View File

@ -26,41 +26,41 @@ static Mat loadImage(const string& name)
int main(int argc, const char* argv[]) int main(int argc, const char* argv[])
{ {
CommandLineParser cmd(argc, argv, CommandLineParser cmd(argc, argv,
"{ image i | pic1.png | input image }" "{ i | image | pic1.png | input image }"
"{ template t | templ.png | template image }" "{ t | template | templ.png | template image }"
"{ scale s | | estimate scale }" "{ s | scale | | estimate scale }"
"{ rotation r | | estimate rotation }" "{ r | rotation | | estimate rotation }"
"{ gpu | | use gpu version }" "{ | gpu | | use gpu version }"
"{ minDist | 100 | minimum distance between the centers of the detected objects }" "{ | minDist | 100 | minimum distance between the centers of the detected objects }"
"{ levels | 360 | R-Table levels }" "{ | levels | 360 | R-Table levels }"
"{ votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }" "{ | votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }"
"{ angleThresh | 10000 | angle votes treshold }" "{ | angleThresh | 10000 | angle votes treshold }"
"{ scaleThresh | 1000 | scale votes treshold }" "{ | scaleThresh | 1000 | scale votes treshold }"
"{ posThresh | 100 | position votes threshold }" "{ | posThresh | 100 | position votes threshold }"
"{ dp | 2 | inverse ratio of the accumulator resolution to the image resolution }" "{ | dp | 2 | inverse ratio of the accumulator resolution to the image resolution }"
"{ minScale | 0.5 | minimal scale to detect }" "{ | minScale | 0.5 | minimal scale to detect }"
"{ maxScale | 2 | maximal scale to detect }" "{ | maxScale | 2 | maximal scale to detect }"
"{ scaleStep | 0.05 | scale step }" "{ | scaleStep | 0.05 | scale step }"
"{ minAngle | 0 | minimal rotation angle to detect in degrees }" "{ | minAngle | 0 | minimal rotation angle to detect in degrees }"
"{ maxAngle | 360 | maximal rotation angle to detect in degrees }" "{ | maxAngle | 360 | maximal rotation angle to detect in degrees }"
"{ angleStep | 1 | angle step in degrees }" "{ | angleStep | 1 | angle step in degrees }"
"{ maxSize | 1000 | maximal size of inner buffers }" "{ | maxSize | 1000 | maximal size of inner buffers }"
"{ help h ? | | print help message }" "{ h | help | | print help message }"
); );
cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform."); //cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform.");
if (cmd.has("help")) if (cmd.get<bool>("help"))
{ {
cmd.printMessage(); cmd.printParams();
return 0; return 0;
} }
const string templName = cmd.get<string>("template"); const string templName = cmd.get<string>("template");
const string imageName = cmd.get<string>("image"); const string imageName = cmd.get<string>("image");
const bool estimateScale = cmd.has("scale"); const bool estimateScale = cmd.get<bool>("scale");
const bool estimateRotation = cmd.has("rotation"); const bool estimateRotation = cmd.get<bool>("rotation");
const bool useGpu = cmd.has("gpu"); const bool useGpu = cmd.get<bool>("gpu");
const double minDist = cmd.get<double>("minDist"); const double minDist = cmd.get<double>("minDist");
const int levels = cmd.get<int>("levels"); const int levels = cmd.get<int>("levels");
const int votesThreshold = cmd.get<int>("votesThreshold"); const int votesThreshold = cmd.get<int>("votesThreshold");
@ -76,12 +76,6 @@ int main(int argc, const char* argv[])
const double angleStep = cmd.get<double>("angleStep"); const double angleStep = cmd.get<double>("angleStep");
const int maxSize = cmd.get<int>("maxSize"); const int maxSize = cmd.get<int>("maxSize");
if (!cmd.check())
{
cmd.printErrors();
return -1;
}
Mat templ = loadImage(templName); Mat templ = loadImage(templName);
Mat image = loadImage(imageName); Mat image = loadImage(imageName);

View File

@ -364,7 +364,7 @@ TEST(BruteForceMatcher)
// Init GPU matcher // Init GPU matcher
gpu::BFMatcher_GPU d_matcher(NORM_L2); gpu::BruteForceMatcher_GPU_base d_matcher(gpu::BruteForceMatcher_GPU_base::L2Dist);
gpu::GpuMat d_query(query); gpu::GpuMat d_query(query);
gpu::GpuMat d_train(train); gpu::GpuMat d_train(train);

View File

@ -57,7 +57,7 @@ int main(int argc, char* argv[])
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl; cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
// matching descriptors // matching descriptors
BFMatcher_GPU matcher(NORM_L2); gpu::BruteForceMatcher_GPU_base matcher(gpu::BruteForceMatcher_GPU_base::L2Dist);
GpuMat trainIdx, distance; GpuMat trainIdx, distance;
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance); matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);
@ -69,7 +69,7 @@ int main(int argc, char* argv[])
surf.downloadKeypoints(keypoints2GPU, keypoints2); surf.downloadKeypoints(keypoints2GPU, keypoints2);
surf.downloadDescriptors(descriptors1GPU, descriptors1); surf.downloadDescriptors(descriptors1GPU, descriptors1);
surf.downloadDescriptors(descriptors2GPU, descriptors2); surf.downloadDescriptors(descriptors2GPU, descriptors2);
BFMatcher_GPU::matchDownload(trainIdx, distance, matches); BruteForceMatcher_GPU_base::matchDownload(trainIdx, distance, matches);
// drawing the results // drawing the results
Mat img_matches; Mat img_matches;