diff --git a/cmake/OpenCVFindLibsPerf.cmake b/cmake/OpenCVFindLibsPerf.cmake index a046b8fc3..c7d085845 100644 --- a/cmake/OpenCVFindLibsPerf.cmake +++ b/cmake/OpenCVFindLibsPerf.cmake @@ -93,25 +93,15 @@ else() set(HAVE_CSTRIPES 0) endif() -# --- OpenMP --- -if(WITH_OPENMP AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES) - find_package(OpenMP) - if(OPENMP_FOUND) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") - endif() - set(HAVE_OPENMP "${OPENMP_FOUND}") -endif() - # --- GCD --- -if(APPLE AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES AND NOT HAVE_OPENMP) +if(APPLE AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES) set(HAVE_GCD 1) else() set(HAVE_GCD 0) endif() # --- Concurrency --- -if(MSVC AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES AND NOT HAVE_OPENMP) +if(MSVC AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES) set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/concurrencytest.cpp") file(WRITE "${_fname}" "#if _MSC_VER < 1600\n#error\n#endif\nint main() { return 0; }\n") try_compile(HAVE_CONCURRENCY "${CMAKE_BINARY_DIR}" "${_fname}") @@ -119,3 +109,13 @@ if(MSVC AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES AND NOT HAVE_OPENMP) else() set(HAVE_CONCURRENCY 0) endif() + +# --- OpenMP --- +if(WITH_OPENMP) + find_package(OpenMP) + if(OPENMP_FOUND) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") + endif() + set(HAVE_OPENMP "${OPENMP_FOUND}") +endif() diff --git a/modules/objdetect/src/hog.cpp b/modules/objdetect/src/hog.cpp index 4697a0105..c257c80e2 100644 --- a/modules/objdetect/src/hog.cpp +++ b/modules/objdetect/src/hog.cpp @@ -46,6 +46,7 @@ #include #include +#include /****************************************************************************************\ The code below is implementation of HOG (Histogram-of-Oriented Gradients) @@ -3533,7 +3534,7 @@ void HOGDescriptor::groupRectangles(std::vector& rectList, std::vector std::vector > rrects(nclasses); std::vector numInClass(nclasses, 0); - std::vector foundWeights(nclasses, DBL_MIN); + std::vector foundWeights(nclasses, -std::numeric_limits::max()); int i, j, nlabels = (int)labels.size(); for( i = 0; i < nlabels; i++ ) diff --git a/modules/superres/src/frame_source.cpp b/modules/superres/src/frame_source.cpp index f7b9685ab..12790b29f 100644 --- a/modules/superres/src/frame_source.cpp +++ b/modules/superres/src/frame_source.cpp @@ -250,7 +250,7 @@ namespace Ptr cv::superres::createFrameSource_Video_CUDA(const String& fileName) { - return makePtr(fileName); + return makePtr(fileName); } #endif // HAVE_OPENCV_CUDACODEC diff --git a/modules/videoio/include/opencv2/videoio/videoio_c.h b/modules/videoio/include/opencv2/videoio/videoio_c.h index 692def900..f5166bf01 100644 --- a/modules/videoio/include/opencv2/videoio/videoio_c.h +++ b/modules/videoio/include/opencv2/videoio/videoio_c.h @@ -183,6 +183,7 @@ enum CV_CAP_PROP_ROLL =35, CV_CAP_PROP_IRIS =36, CV_CAP_PROP_SETTINGS =37, + CV_CAP_PROP_BUFFERSIZE =38, CV_CAP_PROP_AUTOGRAB =1024, // property for videoio class CvCapture_Android only CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed diff --git a/modules/videoio/src/cap_dc1394_v2.cpp b/modules/videoio/src/cap_dc1394_v2.cpp index 20ec79bb5..d120b5923 100644 --- a/modules/videoio/src/cap_dc1394_v2.cpp +++ b/modules/videoio/src/cap_dc1394_v2.cpp @@ -278,6 +278,7 @@ CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP() dcCam = 0; isoSpeed = 400; fps = 15; + // Resetted the value here to 1 in order to ensure only a single frame is stored in the buffer! nDMABufs = 8; started = false; cameraId = 0; @@ -688,6 +689,8 @@ double CvCaptureCAM_DC1394_v2_CPP::getProperty(int propId) const break; case CV_CAP_PROP_ISO_SPEED: return (double) isoSpeed; + case CV_CAP_PROP_BUFFERSIZE: + return (double) nDMABufs; default: if (propId +#include +#include +#include + +using namespace std; +using namespace cv; + +// Functions to parse command-line arguments +static string getCommandOption(const vector&, const string&); +static void setCommandOptions(vector&, int, char**); +static bool doesCmdOptionExist(const vector& , const string&); + +// Functions for facial feature detection +static void help(); +static void detectFaces(Mat&, vector >&, string); +static void detectEyes(Mat&, vector >&, string); +static void detectNose(Mat&, vector >&, string); +static void detectMouth(Mat&, vector >&, string); +static void detectFacialFeaures(Mat&, const vector >, string, string, string); + +string input_image_path; +string face_cascade_path, eye_cascade_path, nose_cascade_path, mouth_cascade_path; + +int main(int argc, char** argv) +{ + if(argc < 3) + { + help(); + return 1; + } + + // Extract command-line options + vector args; + setCommandOptions(args, argc, argv); + + input_image_path = argv[1]; + face_cascade_path = argv[2]; + eye_cascade_path = (doesCmdOptionExist(args, "-eyes")) ? getCommandOption(args, "-eyes") : ""; + nose_cascade_path = (doesCmdOptionExist(args, "-nose")) ? getCommandOption(args, "-nose") : ""; + mouth_cascade_path = (doesCmdOptionExist(args, "-mouth")) ? getCommandOption(args, "-mouth") : ""; + + // Load image and cascade classifier files + Mat image; + image = imread(input_image_path); + + // Detect faces and facial features + vector > faces; + detectFaces(image, faces, face_cascade_path); + detectFacialFeaures(image, faces, eye_cascade_path, nose_cascade_path, mouth_cascade_path); + + imshow("Result", image); + + waitKey(0); + return 0; +} + +void setCommandOptions(vector& args, int argc, char** argv) +{ + for(int i = 1; i < argc; ++i) + { + args.push_back(argv[i]); + } + return; +} + +string getCommandOption(const vector& args, const string& opt) +{ + string answer; + vector::const_iterator it = find(args.begin(), args.end(), opt); + if(it != args.end() && (++it != args.end())) + answer = *it; + return answer; +} + +bool doesCmdOptionExist(const vector& args, const string& opt) +{ + vector::const_iterator it = find(args.begin(), args.end(), opt); + return (it != args.end()); +} + +static void help() +{ + cout << "\nThis file demonstrates facial feature points detection using Haarcascade classifiers.\n" + "The program detects a face and eyes, nose and mouth inside the face." + "The code has been tested on the Japanese Female Facial Expression (JAFFE) database and found" + "to give reasonably accurate results. \n"; + + cout << "\nUSAGE: ./cpp-example-facial_features [IMAGE] [FACE_CASCADE] [OPTIONS]\n" + "IMAGE\n\tPath to the image of a face taken as input.\n" + "FACE_CASCSDE\n\t Path to a haarcascade classifier for face detection.\n" + "OPTIONS: \nThere are 3 options available which are described in detail. There must be a " + "space between the option and it's argument (All three options accept arguments).\n" + "\t-eyes : Specify the haarcascade classifier for eye detection.\n" + "\t-nose : Specify the haarcascade classifier for nose detection.\n" + "\t-mouth : Specify the haarcascade classifier for mouth detection.\n"; + + + cout << "EXAMPLE:\n" + "(1) ./cpp-example-facial_features image.jpg face.xml -eyes eyes.xml -mouth mouth.xml\n" + "\tThis will detect the face, eyes and mouth in image.jpg.\n" + "(2) ./cpp-example-facial_features image.jpg face.xml -nose nose.xml\n" + "\tThis will detect the face and nose in image.jpg.\n" + "(3) ./cpp-example-facial_features image.jpg face.xml\n" + "\tThis will detect only the face in image.jpg.\n"; + + cout << " \n\nThe classifiers for face and eyes can be downloaded from : " + " \nhttps://github.com/Itseez/opencv/tree/master/data/haarcascades"; + + cout << "\n\nThe classifiers for nose and mouth can be downloaded from : " + " \nhttps://github.com/Itseez/opencv_contrib/tree/master/modules/face/data/cascades\n"; +} + +static void detectFaces(Mat& img, vector >& faces, string cascade_path) +{ + CascadeClassifier face_cascade; + face_cascade.load(cascade_path); + + face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + return; +} + +static void detectFacialFeaures(Mat& img, const vector > faces, string eye_cascade, + string nose_cascade, string mouth_cascade) +{ + for(unsigned int i = 0; i < faces.size(); ++i) + { + // Mark the bounding box enclosing the face + Rect face = faces[i]; + rectangle(img, Point(face.x, face.y), Point(face.x+face.width, face.y+face.height), + Scalar(255, 0, 0), 1, 4); + + // Eyes, nose and mouth will be detected inside the face (region of interest) + Mat ROI = img(Rect(face.x, face.y, face.width, face.height)); + + // Check if all features (eyes, nose and mouth) are being detected + bool is_full_detection = false; + if( (!eye_cascade.empty()) && (!nose_cascade.empty()) && (!mouth_cascade.empty()) ) + is_full_detection = true; + + // Detect eyes if classifier provided by the user + if(!eye_cascade.empty()) + { + vector > eyes; + detectEyes(ROI, eyes, eye_cascade); + + // Mark points corresponding to the centre of the eyes + for(unsigned int j = 0; j < eyes.size(); ++j) + { + Rect e = eyes[j]; + circle(ROI, Point(e.x+e.width/2, e.y+e.height/2), 3, Scalar(0, 255, 0), -1, 8); + /* rectangle(ROI, Point(e.x, e.y), Point(e.x+e.width, e.y+e.height), + Scalar(0, 255, 0), 1, 4); */ + } + } + + // Detect nose if classifier provided by the user + double nose_center_height = 0.0; + if(!nose_cascade.empty()) + { + vector > nose; + detectNose(ROI, nose, nose_cascade); + + // Mark points corresponding to the centre (tip) of the nose + for(unsigned int j = 0; j < nose.size(); ++j) + { + Rect n = nose[j]; + circle(ROI, Point(n.x+n.width/2, n.y+n.height/2), 3, Scalar(0, 255, 0), -1, 8); + nose_center_height = (n.y + n.height/2); + } + } + + // Detect mouth if classifier provided by the user + double mouth_center_height = 0.0; + if(!mouth_cascade.empty()) + { + vector > mouth; + detectMouth(ROI, mouth, mouth_cascade); + + for(unsigned int j = 0; j < mouth.size(); ++j) + { + Rect m = mouth[j]; + mouth_center_height = (m.y + m.height/2); + + // The mouth should lie below the nose + if( (is_full_detection) && (mouth_center_height > nose_center_height) ) + { + rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4); + } + else if( (is_full_detection) && (mouth_center_height <= nose_center_height) ) + continue; + else + rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4); + } + } + + } + + return; +} + +static void detectEyes(Mat& img, vector >& eyes, string cascade_path) +{ + CascadeClassifier eyes_cascade; + eyes_cascade.load(cascade_path); + + eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + return; +} + +static void detectNose(Mat& img, vector >& nose, string cascade_path) +{ + CascadeClassifier nose_cascade; + nose_cascade.load(cascade_path); + + nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + return; +} + +static void detectMouth(Mat& img, vector >& mouth, string cascade_path) +{ + CascadeClassifier mouth_cascade; + mouth_cascade.load(cascade_path); + + mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + return; +}