Merge pull request #645 from taka-no-me:bump_headers
This commit is contained in:
commit
d9cd753835
@ -1,3 +1,5 @@
|
||||
add_definitions(-D__OPENCV_BUILD=1)
|
||||
|
||||
add_subdirectory(haartraining)
|
||||
add_subdirectory(traincascade)
|
||||
add_subdirectory(sft)
|
||||
|
@ -42,7 +42,7 @@
|
||||
#ifndef __CVCOMMON_H_
|
||||
#define __CVCOMMON_H_
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "cxcore.h"
|
||||
|
@ -44,7 +44,7 @@
|
||||
*
|
||||
* Measure performance of classifier
|
||||
*/
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "cv.h"
|
||||
|
@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include <sft/dataset.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
@ -74,4 +74,4 @@ int sft::ScaledDataset::available(SampleType type) const
|
||||
return (int)((type == POSITIVE)? pos.size():neg.size());
|
||||
}
|
||||
|
||||
sft::ScaledDataset::~ScaledDataset(){}
|
||||
sft::ScaledDataset::~ScaledDataset(){}
|
||||
|
@ -43,8 +43,8 @@
|
||||
#ifndef __SFT_COMMON_HPP__
|
||||
#define __SFT_COMMON_HPP__
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/softcascade/softcascade.hpp>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/softcascade.hpp>
|
||||
|
||||
namespace cv {using namespace softcascade;}
|
||||
namespace sft
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "HOGfeatures.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "boost.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "cascadeclassifier.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "traincascade_features.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "haarfeatures.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "cv.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "lbpfeatures.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
#include "cv.h"
|
||||
|
@ -429,7 +429,7 @@ endmacro()
|
||||
macro(ocv_glob_module_sources)
|
||||
file(GLOB lib_srcs "src/*.cpp")
|
||||
file(GLOB lib_int_hdrs "src/*.hpp" "src/*.h")
|
||||
file(GLOB lib_hdrs "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h")
|
||||
file(GLOB lib_hdrs "include/opencv2/*.hpp" "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h")
|
||||
file(GLOB lib_hdrs_detail "include/opencv2/${name}/detail/*.hpp" "include/opencv2/${name}/detail/*.h")
|
||||
|
||||
file(GLOB lib_device_srcs "src/cuda/*.cu")
|
||||
|
@ -6,15 +6,15 @@ sys.path.append("../modules/python/src2/")
|
||||
import hdr_parser as hp
|
||||
|
||||
opencv_hdr_list = [
|
||||
"../modules/core/include/opencv2/core/core.hpp",
|
||||
"../modules/ml/include/opencv2/ml/ml.hpp",
|
||||
"../modules/imgproc/include/opencv2/imgproc/imgproc.hpp",
|
||||
"../modules/calib3d/include/opencv2/calib3d/calib3d.hpp",
|
||||
"../modules/features2d/include/opencv2/features2d/features2d.hpp",
|
||||
"../modules/core/include/opencv2/core.hpp",
|
||||
"../modules/ml/include/opencv2/ml.hpp",
|
||||
"../modules/imgproc/include/opencv2/imgproc.hpp",
|
||||
"../modules/calib3d/include/opencv2/calib3d.hpp",
|
||||
"../modules/features2d/include/opencv2/features2d.hpp",
|
||||
"../modules/video/include/opencv2/video/tracking.hpp",
|
||||
"../modules/video/include/opencv2/video/background_segm.hpp",
|
||||
"../modules/objdetect/include/opencv2/objdetect/objdetect.hpp",
|
||||
"../modules/highgui/include/opencv2/highgui/highgui.hpp",
|
||||
"../modules/objdetect/include/opencv2/objdetect.hpp",
|
||||
"../modules/highgui/include/opencv2/highgui.hpp",
|
||||
]
|
||||
|
||||
opencv_module_list = [
|
||||
|
@ -23,9 +23,9 @@ OpenCV 2 received reorganization. No longer are all the functions crammed into a
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
|
||||
|
||||
All the OpenCV related stuff is put into the *cv* namespace to avoid name conflicts with other libraries data structures and functions. Therefore, either you need to prepend the *cv::* keyword before everything that comes from OpenCV or after the includes, you just add a directive to use this:
|
||||
|
@ -29,10 +29,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/nonfree/features2d.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/nonfree.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
|
@ -28,9 +28,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
|
@ -25,9 +25,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
|
@ -26,10 +26,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
|
@ -23,8 +23,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -22,8 +22,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -155,8 +155,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
File diff suppressed because one or more lines are too long
@ -74,8 +74,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "highgui.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
@ -122,8 +122,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
@ -107,8 +107,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
|
@ -88,8 +88,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -86,8 +86,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -89,8 +89,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -131,8 +131,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -90,8 +90,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -52,8 +52,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -77,8 +77,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -48,8 +48,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -104,8 +104,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
|
@ -59,8 +59,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -63,8 +63,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -125,8 +125,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -97,8 +97,8 @@ Code
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -115,8 +115,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -84,8 +84,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
@ -25,8 +25,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -25,8 +25,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -23,8 +23,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -23,8 +23,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -25,8 +25,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -23,8 +23,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -134,8 +134,8 @@ The tutorial code's is shown lines below. You can also download it from `here <h
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -26,9 +26,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
@ -61,14 +61,14 @@
|
||||
//CV_WARNING("This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module")
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/flann/flann.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/flann.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/legacy/compat.hpp"
|
||||
|
||||
#if !defined(CV_IMPL)
|
||||
|
@ -47,18 +47,17 @@
|
||||
//#endif
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/video/background_segm.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/legacy/legacy.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/legacy.hpp"
|
||||
#include "opencv2/legacy/compat.hpp"
|
||||
#include "opencv2/legacy/blobtrack.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -48,6 +48,6 @@
|
||||
//#endif
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
|
||||
#endif
|
||||
|
@ -43,8 +43,8 @@
|
||||
#define __OPENCV_OLD_HIGHGUI_H__
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#endif
|
||||
|
@ -42,7 +42,7 @@
|
||||
#define __OPENCV_OLD_ML_H__
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/ml/ml.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/ml.hpp"
|
||||
|
||||
#endif
|
||||
|
@ -44,18 +44,18 @@
|
||||
#define __OPENCV_ALL_HPP__
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/flann/miniflann.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/photo/photo.hpp"
|
||||
#include "opencv2/video/video.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/ml/ml.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/ml.hpp"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
|
||||
#endif
|
||||
|
@ -1,3 +1,5 @@
|
||||
add_definitions(-D__OPENCV_BUILD=1)
|
||||
|
||||
if(NOT OPENCV_MODULES_PATH)
|
||||
set(OPENCV_MODULES_PATH "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
endif()
|
||||
|
@ -2,7 +2,6 @@
|
||||
#define _CAMERAACTIVITY_H_
|
||||
|
||||
#include <camera_properties.h>
|
||||
//#include <opencv2/core/core.hpp>
|
||||
|
||||
class CameraActivity
|
||||
{
|
||||
|
780
modules/calib3d/include/opencv2/calib3d.hpp
Normal file
780
modules/calib3d/include/opencv2/calib3d.hpp
Normal file
@ -0,0 +1,780 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CALIB3D_HPP__
|
||||
#define __OPENCV_CALIB3D_HPP__
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/****************************************************************************************\
|
||||
* Camera Calibration, Pose Estimation and Stereo *
|
||||
\****************************************************************************************/
|
||||
|
||||
typedef struct CvPOSITObject CvPOSITObject;
|
||||
|
||||
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
|
||||
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
|
||||
|
||||
|
||||
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
|
||||
an object given its model and projection in a weak-perspective case */
|
||||
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
|
||||
double focal_length, CvTermCriteria criteria,
|
||||
float* rotation_matrix, float* translation_vector);
|
||||
|
||||
/* Releases CvPOSITObject structure */
|
||||
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
|
||||
|
||||
/* updates the number of RANSAC iterations */
|
||||
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
|
||||
int model_points, int max_iters );
|
||||
|
||||
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
|
||||
|
||||
/* Calculates fundamental matrix given a set of corresponding points */
|
||||
#define CV_FM_7POINT 1
|
||||
#define CV_FM_8POINT 2
|
||||
|
||||
#define CV_LMEDS 4
|
||||
#define CV_RANSAC 8
|
||||
|
||||
#define CV_FM_LMEDS_ONLY CV_LMEDS
|
||||
#define CV_FM_RANSAC_ONLY CV_RANSAC
|
||||
#define CV_FM_LMEDS CV_LMEDS
|
||||
#define CV_FM_RANSAC CV_RANSAC
|
||||
|
||||
enum
|
||||
{
|
||||
CV_ITERATIVE = 0,
|
||||
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
|
||||
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
|
||||
};
|
||||
|
||||
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
|
||||
CvMat* fundamental_matrix,
|
||||
int method CV_DEFAULT(CV_FM_RANSAC),
|
||||
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
|
||||
CvMat* status CV_DEFAULT(NULL) );
|
||||
|
||||
/* For each input point on one of images
|
||||
computes parameters of the corresponding
|
||||
epipolar line on the other image */
|
||||
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
|
||||
int which_image,
|
||||
const CvMat* fundamental_matrix,
|
||||
CvMat* correspondent_lines );
|
||||
|
||||
/* Triangulation functions */
|
||||
|
||||
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
|
||||
CvMat* projPoints1, CvMat* projPoints2,
|
||||
CvMat* points4D);
|
||||
|
||||
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
|
||||
CvMat* new_points1, CvMat* new_points2);
|
||||
|
||||
|
||||
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
|
||||
alpha=0 - only valid pixels will be retained in the undistorted image
|
||||
alpha=1 - all the source image pixels will be retained in the undistorted image
|
||||
*/
|
||||
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
|
||||
const CvMat* dist_coeffs,
|
||||
CvSize image_size, double alpha,
|
||||
CvMat* new_camera_matrix,
|
||||
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
|
||||
CvRect* valid_pixel_ROI CV_DEFAULT(0),
|
||||
int center_principal_point CV_DEFAULT(0));
|
||||
|
||||
/* Converts rotation vector to rotation matrix or vice versa */
|
||||
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
|
||||
CvMat* jacobian CV_DEFAULT(0) );
|
||||
|
||||
/* Finds perspective transformation between the object plane and image (view) plane */
|
||||
CVAPI(int) cvFindHomography( const CvMat* src_points,
|
||||
const CvMat* dst_points,
|
||||
CvMat* homography,
|
||||
int method CV_DEFAULT(0),
|
||||
double ransacReprojThreshold CV_DEFAULT(3),
|
||||
CvMat* mask CV_DEFAULT(0));
|
||||
|
||||
/* Computes RQ decomposition for 3x3 matrices */
|
||||
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
|
||||
CvMat *matrixQx CV_DEFAULT(NULL),
|
||||
CvMat *matrixQy CV_DEFAULT(NULL),
|
||||
CvMat *matrixQz CV_DEFAULT(NULL),
|
||||
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
||||
|
||||
/* Computes projection matrix decomposition */
|
||||
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
|
||||
CvMat *rotMatr, CvMat *posVect,
|
||||
CvMat *rotMatrX CV_DEFAULT(NULL),
|
||||
CvMat *rotMatrY CV_DEFAULT(NULL),
|
||||
CvMat *rotMatrZ CV_DEFAULT(NULL),
|
||||
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
||||
|
||||
/* Computes d(AB)/dA and d(AB)/dB */
|
||||
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
|
||||
|
||||
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
|
||||
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
|
||||
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
|
||||
const CvMat* _rvec2, const CvMat* _tvec2,
|
||||
CvMat* _rvec3, CvMat* _tvec3,
|
||||
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
|
||||
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
|
||||
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
|
||||
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
|
||||
|
||||
/* Projects object points to the view plane using
|
||||
the specified extrinsic and intrinsic camera parameters */
|
||||
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
|
||||
const CvMat* translation_vector, const CvMat* camera_matrix,
|
||||
const CvMat* distortion_coeffs, CvMat* image_points,
|
||||
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
|
||||
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
|
||||
CvMat* dpddist CV_DEFAULT(NULL),
|
||||
double aspect_ratio CV_DEFAULT(0));
|
||||
|
||||
/* Finds extrinsic camera parameters from
|
||||
a few known corresponding point pairs and intrinsic parameters */
|
||||
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
|
||||
const CvMat* image_points,
|
||||
const CvMat* camera_matrix,
|
||||
const CvMat* distortion_coeffs,
|
||||
CvMat* rotation_vector,
|
||||
CvMat* translation_vector,
|
||||
int use_extrinsic_guess CV_DEFAULT(0) );
|
||||
|
||||
/* Computes initial estimate of the intrinsic camera parameters
|
||||
in case of planar calibration target (e.g. chessboard) */
|
||||
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
|
||||
const CvMat* image_points,
|
||||
const CvMat* npoints, CvSize image_size,
|
||||
CvMat* camera_matrix,
|
||||
double aspect_ratio CV_DEFAULT(1.) );
|
||||
|
||||
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
|
||||
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
|
||||
#define CV_CALIB_CB_FILTER_QUADS 4
|
||||
#define CV_CALIB_CB_FAST_CHECK 8
|
||||
|
||||
// Performs a fast check if a chessboard is in the input image. This is a workaround to
|
||||
// a problem of cvFindChessboardCorners being slow on images with no chessboard
|
||||
// - src: input image
|
||||
// - size: chessboard size
|
||||
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
|
||||
// 0 if there is no chessboard, -1 in case of error
|
||||
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
|
||||
|
||||
/* Detects corners on a chessboard calibration pattern */
|
||||
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
|
||||
CvPoint2D32f* corners,
|
||||
int* corner_count CV_DEFAULT(NULL),
|
||||
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
|
||||
|
||||
/* Draws individual chessboard corners or the whole chessboard detected */
|
||||
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
|
||||
CvPoint2D32f* corners,
|
||||
int count, int pattern_was_found );
|
||||
|
||||
#define CV_CALIB_USE_INTRINSIC_GUESS 1
|
||||
#define CV_CALIB_FIX_ASPECT_RATIO 2
|
||||
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
|
||||
#define CV_CALIB_ZERO_TANGENT_DIST 8
|
||||
#define CV_CALIB_FIX_FOCAL_LENGTH 16
|
||||
#define CV_CALIB_FIX_K1 32
|
||||
#define CV_CALIB_FIX_K2 64
|
||||
#define CV_CALIB_FIX_K3 128
|
||||
#define CV_CALIB_FIX_K4 2048
|
||||
#define CV_CALIB_FIX_K5 4096
|
||||
#define CV_CALIB_FIX_K6 8192
|
||||
#define CV_CALIB_RATIONAL_MODEL 16384
|
||||
#define CV_CALIB_THIN_PRISM_MODEL 32768
|
||||
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
|
||||
|
||||
|
||||
/* Finds intrinsic and extrinsic camera parameters
|
||||
from a few views of known calibration pattern */
|
||||
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
|
||||
const CvMat* image_points,
|
||||
const CvMat* point_counts,
|
||||
CvSize image_size,
|
||||
CvMat* camera_matrix,
|
||||
CvMat* distortion_coeffs,
|
||||
CvMat* rotation_vectors CV_DEFAULT(NULL),
|
||||
CvMat* translation_vectors CV_DEFAULT(NULL),
|
||||
int flags CV_DEFAULT(0),
|
||||
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
||||
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
|
||||
|
||||
/* Computes various useful characteristics of the camera from the data computed by
|
||||
cvCalibrateCamera2 */
|
||||
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
|
||||
CvSize image_size,
|
||||
double aperture_width CV_DEFAULT(0),
|
||||
double aperture_height CV_DEFAULT(0),
|
||||
double *fovx CV_DEFAULT(NULL),
|
||||
double *fovy CV_DEFAULT(NULL),
|
||||
double *focal_length CV_DEFAULT(NULL),
|
||||
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
|
||||
double *pixel_aspect_ratio CV_DEFAULT(NULL));
|
||||
|
||||
#define CV_CALIB_FIX_INTRINSIC 256
|
||||
#define CV_CALIB_SAME_FOCAL_LENGTH 512
|
||||
|
||||
/* Computes the transformation from one camera coordinate system to another one
|
||||
from a few correspondent views of the same calibration target. Optionally, calibrates
|
||||
both cameras */
|
||||
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
|
||||
const CvMat* image_points2, const CvMat* npoints,
|
||||
CvMat* camera_matrix1, CvMat* dist_coeffs1,
|
||||
CvMat* camera_matrix2, CvMat* dist_coeffs2,
|
||||
CvSize image_size, CvMat* R, CvMat* T,
|
||||
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
|
||||
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
||||
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
|
||||
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
|
||||
|
||||
#define CV_CALIB_ZERO_DISPARITY 1024
|
||||
|
||||
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
|
||||
views parallel (=> to make all the epipolar lines horizontal or vertical) */
|
||||
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
|
||||
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
|
||||
CvSize image_size, const CvMat* R, const CvMat* T,
|
||||
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
|
||||
CvMat* Q CV_DEFAULT(0),
|
||||
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
|
||||
double alpha CV_DEFAULT(-1),
|
||||
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
|
||||
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
|
||||
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
|
||||
|
||||
/* Computes rectification transformations for uncalibrated pair of images using a set
|
||||
of point correspondences */
|
||||
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
|
||||
const CvMat* F, CvSize img_size,
|
||||
CvMat* H1, CvMat* H2,
|
||||
double threshold CV_DEFAULT(5));
|
||||
|
||||
|
||||
|
||||
/* stereo correspondence parameters and functions */
|
||||
|
||||
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
|
||||
#define CV_STEREO_BM_XSOBEL 1
|
||||
|
||||
/* Block matching algorithm structure */
|
||||
typedef struct CvStereoBMState
|
||||
{
|
||||
// pre-filtering (normalization of input images)
|
||||
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
|
||||
int preFilterSize; // averaging window size: ~5x5..21x21
|
||||
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
|
||||
|
||||
// correspondence using Sum of Absolute Difference (SAD)
|
||||
int SADWindowSize; // ~5x5..21x21
|
||||
int minDisparity; // minimum disparity (can be negative)
|
||||
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
|
||||
|
||||
// post-filtering
|
||||
int textureThreshold; // the disparity is only computed for pixels
|
||||
// with textured enough neighborhood
|
||||
int uniquenessRatio; // accept the computed disparity d* only if
|
||||
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
|
||||
// for any d != d*+/-1 within the search range.
|
||||
int speckleWindowSize; // disparity variation window
|
||||
int speckleRange; // acceptable range of variation in window
|
||||
|
||||
int trySmallerWindows; // if 1, the results may be more accurate,
|
||||
// at the expense of slower processing
|
||||
CvRect roi1, roi2;
|
||||
int disp12MaxDiff;
|
||||
|
||||
// temporary buffers
|
||||
CvMat* preFilteredImg0;
|
||||
CvMat* preFilteredImg1;
|
||||
CvMat* slidingSumBuf;
|
||||
CvMat* cost;
|
||||
CvMat* disp;
|
||||
} CvStereoBMState;
|
||||
|
||||
#define CV_STEREO_BM_BASIC 0
|
||||
#define CV_STEREO_BM_FISH_EYE 1
|
||||
#define CV_STEREO_BM_NARROW 2
|
||||
|
||||
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
|
||||
int numberOfDisparities CV_DEFAULT(0));
|
||||
|
||||
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
|
||||
|
||||
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
|
||||
CvArr* disparity, CvStereoBMState* state );
|
||||
|
||||
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
|
||||
int numberOfDisparities, int SADWindowSize );
|
||||
|
||||
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
|
||||
int minDisparity, int numberOfDisparities,
|
||||
int disp12MaxDiff CV_DEFAULT(1) );
|
||||
|
||||
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
|
||||
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
|
||||
CvArr* _3dImage, const CvMat* Q,
|
||||
int handleMissingValues CV_DEFAULT(0) );
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
class CV_EXPORTS CvLevMarq
|
||||
{
|
||||
public:
|
||||
CvLevMarq();
|
||||
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
|
||||
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
||||
bool completeSymmFlag=false );
|
||||
~CvLevMarq();
|
||||
void init( int nparams, int nerrs, CvTermCriteria criteria=
|
||||
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
||||
bool completeSymmFlag=false );
|
||||
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
|
||||
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
|
||||
|
||||
void clear();
|
||||
void step();
|
||||
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
|
||||
|
||||
cv::Ptr<CvMat> mask;
|
||||
cv::Ptr<CvMat> prevParam;
|
||||
cv::Ptr<CvMat> param;
|
||||
cv::Ptr<CvMat> J;
|
||||
cv::Ptr<CvMat> err;
|
||||
cv::Ptr<CvMat> JtJ;
|
||||
cv::Ptr<CvMat> JtJN;
|
||||
cv::Ptr<CvMat> JtErr;
|
||||
cv::Ptr<CvMat> JtJV;
|
||||
cv::Ptr<CvMat> JtJW;
|
||||
double prevErrNorm, errNorm;
|
||||
int lambdaLg10;
|
||||
CvTermCriteria criteria;
|
||||
int state;
|
||||
int iters;
|
||||
bool completeSymmFlag;
|
||||
};
|
||||
|
||||
namespace cv
|
||||
{
|
||||
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
|
||||
CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray());
|
||||
|
||||
//! type of the robust estimation algorithm
|
||||
enum
|
||||
{
|
||||
LMEDS=CV_LMEDS, //!< least-median algorithm
|
||||
RANSAC=CV_RANSAC //!< RANSAC algorithm
|
||||
};
|
||||
|
||||
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
|
||||
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
||||
int method=0, double ransacReprojThreshold=3,
|
||||
OutputArray mask=noArray());
|
||||
|
||||
//! variant of findHomography for backward compatibility
|
||||
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
||||
OutputArray mask, int method=0, double ransacReprojThreshold=3);
|
||||
|
||||
//! Computes RQ decomposition of 3x3 matrix
|
||||
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
|
||||
OutputArray Qx=noArray(),
|
||||
OutputArray Qy=noArray(),
|
||||
OutputArray Qz=noArray());
|
||||
|
||||
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
|
||||
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
|
||||
OutputArray rotMatrix, OutputArray transVect,
|
||||
OutputArray rotMatrixX=noArray(),
|
||||
OutputArray rotMatrixY=noArray(),
|
||||
OutputArray rotMatrixZ=noArray(),
|
||||
OutputArray eulerAngles=noArray() );
|
||||
|
||||
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
|
||||
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
|
||||
OutputArray dABdA,
|
||||
OutputArray dABdB );
|
||||
|
||||
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
|
||||
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
|
||||
InputArray rvec2, InputArray tvec2,
|
||||
OutputArray rvec3, OutputArray tvec3,
|
||||
OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(),
|
||||
OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(),
|
||||
OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(),
|
||||
OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() );
|
||||
|
||||
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
|
||||
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
|
||||
InputArray rvec, InputArray tvec,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
OutputArray imagePoints,
|
||||
OutputArray jacobian=noArray(),
|
||||
double aspectRatio=0 );
|
||||
|
||||
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
|
||||
enum
|
||||
{
|
||||
ITERATIVE=CV_ITERATIVE,
|
||||
EPNP=CV_EPNP,
|
||||
P3P=CV_P3P
|
||||
};
|
||||
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
OutputArray rvec, OutputArray tvec,
|
||||
bool useExtrinsicGuess=false, int flags=ITERATIVE);
|
||||
|
||||
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
|
||||
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
|
||||
InputArray imagePoints,
|
||||
InputArray cameraMatrix,
|
||||
InputArray distCoeffs,
|
||||
OutputArray rvec,
|
||||
OutputArray tvec,
|
||||
bool useExtrinsicGuess = false,
|
||||
int iterationsCount = 100,
|
||||
float reprojectionError = 8.0,
|
||||
int minInliersCount = 100,
|
||||
OutputArray inliers = noArray(),
|
||||
int flags = ITERATIVE);
|
||||
|
||||
//! initializes camera matrix from a few 3D points and the corresponding projections.
|
||||
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
|
||||
InputArrayOfArrays imagePoints,
|
||||
Size imageSize, double aspectRatio=1. );
|
||||
|
||||
enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
|
||||
CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 };
|
||||
|
||||
//! finds checkerboard pattern of the specified size in the image
|
||||
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
|
||||
OutputArray corners,
|
||||
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
|
||||
|
||||
//! finds subpixel-accurate positions of the chessboard corners
|
||||
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
|
||||
|
||||
//! draws the checkerboard pattern (found or partly found) in the image
|
||||
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
|
||||
InputArray corners, bool patternWasFound );
|
||||
|
||||
enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2,
|
||||
CALIB_CB_CLUSTERING = 4 };
|
||||
|
||||
//! finds circles' grid pattern of the specified size in the image
|
||||
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
|
||||
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID,
|
||||
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
|
||||
|
||||
//! the deprecated function. Use findCirclesGrid() instead of it.
|
||||
CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize,
|
||||
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID );
|
||||
enum
|
||||
{
|
||||
CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,
|
||||
CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,
|
||||
CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,
|
||||
CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,
|
||||
CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,
|
||||
CALIB_FIX_K1 = CV_CALIB_FIX_K1,
|
||||
CALIB_FIX_K2 = CV_CALIB_FIX_K2,
|
||||
CALIB_FIX_K3 = CV_CALIB_FIX_K3,
|
||||
CALIB_FIX_K4 = CV_CALIB_FIX_K4,
|
||||
CALIB_FIX_K5 = CV_CALIB_FIX_K5,
|
||||
CALIB_FIX_K6 = CV_CALIB_FIX_K6,
|
||||
CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL,
|
||||
CALIB_THIN_PRISM_MODEL = CV_CALIB_THIN_PRISM_MODEL,
|
||||
CALIB_FIX_S1_S2_S3_S4=CV_CALIB_FIX_S1_S2_S3_S4,
|
||||
// only for stereo
|
||||
CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,
|
||||
CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,
|
||||
// for stereo rectification
|
||||
CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY
|
||||
};
|
||||
|
||||
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
|
||||
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
|
||||
InputArrayOfArrays imagePoints,
|
||||
Size imageSize,
|
||||
CV_OUT InputOutputArray cameraMatrix,
|
||||
CV_OUT InputOutputArray distCoeffs,
|
||||
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
|
||||
int flags=0, TermCriteria criteria = TermCriteria(
|
||||
TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) );
|
||||
|
||||
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
|
||||
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
|
||||
Size imageSize,
|
||||
double apertureWidth,
|
||||
double apertureHeight,
|
||||
CV_OUT double& fovx,
|
||||
CV_OUT double& fovy,
|
||||
CV_OUT double& focalLength,
|
||||
CV_OUT Point2d& principalPoint,
|
||||
CV_OUT double& aspectRatio );
|
||||
|
||||
//! finds intrinsic and extrinsic parameters of a stereo camera
|
||||
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
|
||||
InputArrayOfArrays imagePoints1,
|
||||
InputArrayOfArrays imagePoints2,
|
||||
CV_OUT InputOutputArray cameraMatrix1,
|
||||
CV_OUT InputOutputArray distCoeffs1,
|
||||
CV_OUT InputOutputArray cameraMatrix2,
|
||||
CV_OUT InputOutputArray distCoeffs2,
|
||||
Size imageSize, OutputArray R,
|
||||
OutputArray T, OutputArray E, OutputArray F,
|
||||
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
|
||||
int flags=CALIB_FIX_INTRINSIC );
|
||||
|
||||
|
||||
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
|
||||
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
|
||||
InputArray cameraMatrix2, InputArray distCoeffs2,
|
||||
Size imageSize, InputArray R, InputArray T,
|
||||
OutputArray R1, OutputArray R2,
|
||||
OutputArray P1, OutputArray P2,
|
||||
OutputArray Q, int flags=CALIB_ZERO_DISPARITY,
|
||||
double alpha=-1, Size newImageSize=Size(),
|
||||
CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 );
|
||||
|
||||
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
|
||||
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
|
||||
InputArray F, Size imgSize,
|
||||
OutputArray H1, OutputArray H2,
|
||||
double threshold=5 );
|
||||
|
||||
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
||||
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
|
||||
InputArray cameraMatrix2, InputArray distCoeffs2,
|
||||
InputArray cameraMatrix3, InputArray distCoeffs3,
|
||||
InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
|
||||
Size imageSize, InputArray R12, InputArray T12,
|
||||
InputArray R13, InputArray T13,
|
||||
OutputArray R1, OutputArray R2, OutputArray R3,
|
||||
OutputArray P1, OutputArray P2, OutputArray P3,
|
||||
OutputArray Q, double alpha, Size newImgSize,
|
||||
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
|
||||
|
||||
//! returns the optimal new camera matrix
|
||||
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
|
||||
Size imageSize, double alpha, Size newImgSize=Size(),
|
||||
CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false);
|
||||
|
||||
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
|
||||
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
|
||||
|
||||
//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
|
||||
CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
|
||||
|
||||
//! for backward compatibility
|
||||
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
|
||||
|
||||
//! the algorithm for finding fundamental matrix
|
||||
enum
|
||||
{
|
||||
FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
|
||||
FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
|
||||
FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
|
||||
FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm
|
||||
};
|
||||
|
||||
//! finds fundamental matrix from a set of corresponding 2D points
|
||||
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
|
||||
int method=FM_RANSAC,
|
||||
double param1=3., double param2=0.99,
|
||||
OutputArray mask=noArray());
|
||||
|
||||
//! variant of findFundamentalMat for backward compatibility
|
||||
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
|
||||
OutputArray mask, int method=FM_RANSAC,
|
||||
double param1=3., double param2=0.99);
|
||||
|
||||
//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
|
||||
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2, double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||
int method = CV_RANSAC,
|
||||
double prob = 0.999, double threshold = 1.0, OutputArray mask = noArray() );
|
||||
|
||||
//! decompose essential matrix to possible rotation matrix and one translation vector
|
||||
CV_EXPORTS void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
|
||||
|
||||
//! recover relative camera pose from a set of corresponding 2D points
|
||||
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2, OutputArray R, OutputArray t,
|
||||
double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||
InputOutputArray mask = noArray());
|
||||
|
||||
|
||||
//! finds coordinates of epipolar lines corresponding the specified points
|
||||
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
|
||||
int whichImage, InputArray F,
|
||||
OutputArray lines );
|
||||
|
||||
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
|
||||
InputArray projPoints1, InputArray projPoints2,
|
||||
OutputArray points4D );
|
||||
|
||||
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
|
||||
OutputArray newPoints1, OutputArray newPoints2 );
|
||||
|
||||
|
||||
class CV_EXPORTS_W StereoMatcher : public Algorithm
|
||||
{
|
||||
public:
|
||||
CV_WRAP virtual void compute( InputArray left, InputArray right,
|
||||
OutputArray disparity ) = 0;
|
||||
};
|
||||
|
||||
enum { STEREO_DISP_SCALE=16, STEREO_PREFILTER_NORMALIZED_RESPONSE = 0, STEREO_PREFILTER_XSOBEL = 1 };
|
||||
|
||||
CV_EXPORTS Ptr<StereoMatcher> createStereoBM(int numDisparities=0, int SADWindowSize=21);
|
||||
|
||||
CV_EXPORTS Ptr<StereoMatcher> createStereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
|
||||
int P1=0, int P2=0, int disp12MaxDiff=0,
|
||||
int preFilterCap=0, int uniquenessRatio=0,
|
||||
int speckleWindowSize=0, int speckleRange=0,
|
||||
bool fullDP=false);
|
||||
|
||||
template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
|
||||
|
||||
// to be moved to "compat" module
|
||||
class CV_EXPORTS_W StereoBM
|
||||
{
|
||||
public:
|
||||
enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1,
|
||||
BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 };
|
||||
|
||||
//! the default constructor
|
||||
CV_WRAP StereoBM();
|
||||
//! the full constructor taking the camera-specific preset, number of disparities and the SAD window size
|
||||
CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);
|
||||
//! the method that reinitializes the state. The previous content is destroyed
|
||||
void init(int preset, int ndisparities=0, int SADWindowSize=21);
|
||||
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
|
||||
CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right,
|
||||
OutputArray disparity, int disptype=CV_16S );
|
||||
|
||||
//! pointer to the underlying CvStereoBMState
|
||||
Ptr<CvStereoBMState> state;
|
||||
};
|
||||
|
||||
|
||||
// to be moved to "compat" module
|
||||
class CV_EXPORTS_W StereoSGBM
|
||||
{
|
||||
public:
|
||||
enum { DISP_SHIFT=4, DISP_SCALE = (1<<DISP_SHIFT) };
|
||||
|
||||
//! the default constructor
|
||||
CV_WRAP StereoSGBM();
|
||||
|
||||
//! the full constructor taking all the necessary algorithm parameters
|
||||
CV_WRAP StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
|
||||
int P1=0, int P2=0, int disp12MaxDiff=0,
|
||||
int preFilterCap=0, int uniquenessRatio=0,
|
||||
int speckleWindowSize=0, int speckleRange=0,
|
||||
bool fullDP=false);
|
||||
//! the destructor
|
||||
virtual ~StereoSGBM();
|
||||
|
||||
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
|
||||
CV_WRAP_AS(compute) virtual void operator()(InputArray left, InputArray right,
|
||||
OutputArray disp);
|
||||
|
||||
CV_PROP_RW int minDisparity;
|
||||
CV_PROP_RW int numberOfDisparities;
|
||||
CV_PROP_RW int SADWindowSize;
|
||||
CV_PROP_RW int preFilterCap;
|
||||
CV_PROP_RW int uniquenessRatio;
|
||||
CV_PROP_RW int P1;
|
||||
CV_PROP_RW int P2;
|
||||
CV_PROP_RW int speckleWindowSize;
|
||||
CV_PROP_RW int speckleRange;
|
||||
CV_PROP_RW int disp12MaxDiff;
|
||||
CV_PROP_RW bool fullDP;
|
||||
|
||||
protected:
|
||||
Ptr<StereoMatcher> sm;
|
||||
};
|
||||
|
||||
//! filters off speckles (small regions of incorrectly computed disparity)
|
||||
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff,
|
||||
InputOutputArray buf=noArray() );
|
||||
|
||||
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
|
||||
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
|
||||
int minDisparity, int numberOfDisparities,
|
||||
int SADWindowSize );
|
||||
|
||||
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
|
||||
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
|
||||
int minDisparity, int numberOfDisparities,
|
||||
int disp12MaxDisp=1 );
|
||||
|
||||
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
|
||||
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
|
||||
OutputArray _3dImage, InputArray Q,
|
||||
bool handleMissingValues=false,
|
||||
int ddepth=-1 );
|
||||
|
||||
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
|
||||
OutputArray out, OutputArray inliers,
|
||||
double ransacThreshold=3, double confidence=0.99);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -7,11 +7,12 @@
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
@ -40,741 +41,8 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CALIB3D_HPP__
|
||||
#define __OPENCV_CALIB3D_HPP__
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#ifdef __OPENCV_BUILD
|
||||
#error this is a compatibility header which should not be used inside the OpenCV library
|
||||
#endif
|
||||
|
||||
/****************************************************************************************\
|
||||
* Camera Calibration, Pose Estimation and Stereo *
|
||||
\****************************************************************************************/
|
||||
|
||||
typedef struct CvPOSITObject CvPOSITObject;
|
||||
|
||||
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
|
||||
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
|
||||
|
||||
|
||||
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
|
||||
an object given its model and projection in a weak-perspective case */
|
||||
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
|
||||
double focal_length, CvTermCriteria criteria,
|
||||
float* rotation_matrix, float* translation_vector);
|
||||
|
||||
/* Releases CvPOSITObject structure */
|
||||
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
|
||||
|
||||
/* updates the number of RANSAC iterations */
|
||||
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
|
||||
int model_points, int max_iters );
|
||||
|
||||
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
|
||||
|
||||
/* Calculates fundamental matrix given a set of corresponding points */
|
||||
#define CV_FM_7POINT 1
|
||||
#define CV_FM_8POINT 2
|
||||
|
||||
#define CV_LMEDS 4
|
||||
#define CV_RANSAC 8
|
||||
|
||||
#define CV_FM_LMEDS_ONLY CV_LMEDS
|
||||
#define CV_FM_RANSAC_ONLY CV_RANSAC
|
||||
#define CV_FM_LMEDS CV_LMEDS
|
||||
#define CV_FM_RANSAC CV_RANSAC
|
||||
|
||||
enum
|
||||
{
|
||||
CV_ITERATIVE = 0,
|
||||
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
|
||||
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
|
||||
};
|
||||
|
||||
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
|
||||
CvMat* fundamental_matrix,
|
||||
int method CV_DEFAULT(CV_FM_RANSAC),
|
||||
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
|
||||
CvMat* status CV_DEFAULT(NULL) );
|
||||
|
||||
/* For each input point on one of images
|
||||
computes parameters of the corresponding
|
||||
epipolar line on the other image */
|
||||
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
|
||||
int which_image,
|
||||
const CvMat* fundamental_matrix,
|
||||
CvMat* correspondent_lines );
|
||||
|
||||
/* Triangulation functions */
|
||||
|
||||
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
|
||||
CvMat* projPoints1, CvMat* projPoints2,
|
||||
CvMat* points4D);
|
||||
|
||||
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
|
||||
CvMat* new_points1, CvMat* new_points2);
|
||||
|
||||
|
||||
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
|
||||
alpha=0 - only valid pixels will be retained in the undistorted image
|
||||
alpha=1 - all the source image pixels will be retained in the undistorted image
|
||||
*/
|
||||
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
|
||||
const CvMat* dist_coeffs,
|
||||
CvSize image_size, double alpha,
|
||||
CvMat* new_camera_matrix,
|
||||
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
|
||||
CvRect* valid_pixel_ROI CV_DEFAULT(0),
|
||||
int center_principal_point CV_DEFAULT(0));
|
||||
|
||||
/* Converts rotation vector to rotation matrix or vice versa */
|
||||
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
|
||||
CvMat* jacobian CV_DEFAULT(0) );
|
||||
|
||||
/* Finds perspective transformation between the object plane and image (view) plane */
|
||||
CVAPI(int) cvFindHomography( const CvMat* src_points,
|
||||
const CvMat* dst_points,
|
||||
CvMat* homography,
|
||||
int method CV_DEFAULT(0),
|
||||
double ransacReprojThreshold CV_DEFAULT(3),
|
||||
CvMat* mask CV_DEFAULT(0));
|
||||
|
||||
/* Computes RQ decomposition for 3x3 matrices */
|
||||
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
|
||||
CvMat *matrixQx CV_DEFAULT(NULL),
|
||||
CvMat *matrixQy CV_DEFAULT(NULL),
|
||||
CvMat *matrixQz CV_DEFAULT(NULL),
|
||||
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
||||
|
||||
/* Computes projection matrix decomposition */
|
||||
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
|
||||
CvMat *rotMatr, CvMat *posVect,
|
||||
CvMat *rotMatrX CV_DEFAULT(NULL),
|
||||
CvMat *rotMatrY CV_DEFAULT(NULL),
|
||||
CvMat *rotMatrZ CV_DEFAULT(NULL),
|
||||
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
||||
|
||||
/* Computes d(AB)/dA and d(AB)/dB */
|
||||
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
|
||||
|
||||
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
|
||||
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
|
||||
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
|
||||
const CvMat* _rvec2, const CvMat* _tvec2,
|
||||
CvMat* _rvec3, CvMat* _tvec3,
|
||||
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
|
||||
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
|
||||
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
|
||||
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
|
||||
|
||||
/* Projects object points to the view plane using
|
||||
the specified extrinsic and intrinsic camera parameters */
|
||||
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
|
||||
const CvMat* translation_vector, const CvMat* camera_matrix,
|
||||
const CvMat* distortion_coeffs, CvMat* image_points,
|
||||
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
|
||||
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
|
||||
CvMat* dpddist CV_DEFAULT(NULL),
|
||||
double aspect_ratio CV_DEFAULT(0));
|
||||
|
||||
/* Finds extrinsic camera parameters from
|
||||
a few known corresponding point pairs and intrinsic parameters */
|
||||
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
|
||||
const CvMat* image_points,
|
||||
const CvMat* camera_matrix,
|
||||
const CvMat* distortion_coeffs,
|
||||
CvMat* rotation_vector,
|
||||
CvMat* translation_vector,
|
||||
int use_extrinsic_guess CV_DEFAULT(0) );
|
||||
|
||||
/* Computes initial estimate of the intrinsic camera parameters
|
||||
in case of planar calibration target (e.g. chessboard) */
|
||||
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
|
||||
const CvMat* image_points,
|
||||
const CvMat* npoints, CvSize image_size,
|
||||
CvMat* camera_matrix,
|
||||
double aspect_ratio CV_DEFAULT(1.) );
|
||||
|
||||
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
|
||||
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
|
||||
#define CV_CALIB_CB_FILTER_QUADS 4
|
||||
#define CV_CALIB_CB_FAST_CHECK 8
|
||||
|
||||
// Performs a fast check if a chessboard is in the input image. This is a workaround to
|
||||
// a problem of cvFindChessboardCorners being slow on images with no chessboard
|
||||
// - src: input image
|
||||
// - size: chessboard size
|
||||
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
|
||||
// 0 if there is no chessboard, -1 in case of error
|
||||
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
|
||||
|
||||
/* Detects corners on a chessboard calibration pattern */
|
||||
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
|
||||
CvPoint2D32f* corners,
|
||||
int* corner_count CV_DEFAULT(NULL),
|
||||
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
|
||||
|
||||
/* Draws individual chessboard corners or the whole chessboard detected */
|
||||
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
|
||||
CvPoint2D32f* corners,
|
||||
int count, int pattern_was_found );
|
||||
|
||||
#define CV_CALIB_USE_INTRINSIC_GUESS 1
|
||||
#define CV_CALIB_FIX_ASPECT_RATIO 2
|
||||
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
|
||||
#define CV_CALIB_ZERO_TANGENT_DIST 8
|
||||
#define CV_CALIB_FIX_FOCAL_LENGTH 16
|
||||
#define CV_CALIB_FIX_K1 32
|
||||
#define CV_CALIB_FIX_K2 64
|
||||
#define CV_CALIB_FIX_K3 128
|
||||
#define CV_CALIB_FIX_K4 2048
|
||||
#define CV_CALIB_FIX_K5 4096
|
||||
#define CV_CALIB_FIX_K6 8192
|
||||
#define CV_CALIB_RATIONAL_MODEL 16384
|
||||
#define CV_CALIB_THIN_PRISM_MODEL 32768
|
||||
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
|
||||
|
||||
|
||||
/* Finds intrinsic and extrinsic camera parameters
|
||||
from a few views of known calibration pattern */
|
||||
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
|
||||
const CvMat* image_points,
|
||||
const CvMat* point_counts,
|
||||
CvSize image_size,
|
||||
CvMat* camera_matrix,
|
||||
CvMat* distortion_coeffs,
|
||||
CvMat* rotation_vectors CV_DEFAULT(NULL),
|
||||
CvMat* translation_vectors CV_DEFAULT(NULL),
|
||||
int flags CV_DEFAULT(0),
|
||||
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
||||
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
|
||||
|
||||
/* Computes various useful characteristics of the camera from the data computed by
|
||||
cvCalibrateCamera2 */
|
||||
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
|
||||
CvSize image_size,
|
||||
double aperture_width CV_DEFAULT(0),
|
||||
double aperture_height CV_DEFAULT(0),
|
||||
double *fovx CV_DEFAULT(NULL),
|
||||
double *fovy CV_DEFAULT(NULL),
|
||||
double *focal_length CV_DEFAULT(NULL),
|
||||
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
|
||||
double *pixel_aspect_ratio CV_DEFAULT(NULL));
|
||||
|
||||
#define CV_CALIB_FIX_INTRINSIC 256
|
||||
#define CV_CALIB_SAME_FOCAL_LENGTH 512
|
||||
|
||||
/* Computes the transformation from one camera coordinate system to another one
|
||||
from a few correspondent views of the same calibration target. Optionally, calibrates
|
||||
both cameras */
|
||||
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
|
||||
const CvMat* image_points2, const CvMat* npoints,
|
||||
CvMat* camera_matrix1, CvMat* dist_coeffs1,
|
||||
CvMat* camera_matrix2, CvMat* dist_coeffs2,
|
||||
CvSize image_size, CvMat* R, CvMat* T,
|
||||
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
|
||||
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
||||
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
|
||||
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
|
||||
|
||||
#define CV_CALIB_ZERO_DISPARITY 1024
|
||||
|
||||
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
|
||||
views parallel (=> to make all the epipolar lines horizontal or vertical) */
|
||||
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
|
||||
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
|
||||
CvSize image_size, const CvMat* R, const CvMat* T,
|
||||
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
|
||||
CvMat* Q CV_DEFAULT(0),
|
||||
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
|
||||
double alpha CV_DEFAULT(-1),
|
||||
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
|
||||
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
|
||||
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
|
||||
|
||||
/* Computes rectification transformations for uncalibrated pair of images using a set
|
||||
of point correspondences */
|
||||
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
|
||||
const CvMat* F, CvSize img_size,
|
||||
CvMat* H1, CvMat* H2,
|
||||
double threshold CV_DEFAULT(5));
|
||||
|
||||
|
||||
|
||||
/* stereo correspondence parameters and functions */
|
||||
|
||||
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
|
||||
#define CV_STEREO_BM_XSOBEL 1
|
||||
|
||||
/* Block matching algorithm structure */
|
||||
typedef struct CvStereoBMState
|
||||
{
|
||||
// pre-filtering (normalization of input images)
|
||||
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
|
||||
int preFilterSize; // averaging window size: ~5x5..21x21
|
||||
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
|
||||
|
||||
// correspondence using Sum of Absolute Difference (SAD)
|
||||
int SADWindowSize; // ~5x5..21x21
|
||||
int minDisparity; // minimum disparity (can be negative)
|
||||
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
|
||||
|
||||
// post-filtering
|
||||
int textureThreshold; // the disparity is only computed for pixels
|
||||
// with textured enough neighborhood
|
||||
int uniquenessRatio; // accept the computed disparity d* only if
|
||||
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
|
||||
// for any d != d*+/-1 within the search range.
|
||||
int speckleWindowSize; // disparity variation window
|
||||
int speckleRange; // acceptable range of variation in window
|
||||
|
||||
int trySmallerWindows; // if 1, the results may be more accurate,
|
||||
// at the expense of slower processing
|
||||
CvRect roi1, roi2;
|
||||
int disp12MaxDiff;
|
||||
|
||||
// temporary buffers
|
||||
CvMat* preFilteredImg0;
|
||||
CvMat* preFilteredImg1;
|
||||
CvMat* slidingSumBuf;
|
||||
CvMat* cost;
|
||||
CvMat* disp;
|
||||
} CvStereoBMState;
|
||||
|
||||
#define CV_STEREO_BM_BASIC 0
|
||||
#define CV_STEREO_BM_FISH_EYE 1
|
||||
#define CV_STEREO_BM_NARROW 2
|
||||
|
||||
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
|
||||
int numberOfDisparities CV_DEFAULT(0));
|
||||
|
||||
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
|
||||
|
||||
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
|
||||
CvArr* disparity, CvStereoBMState* state );
|
||||
|
||||
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
|
||||
int numberOfDisparities, int SADWindowSize );
|
||||
|
||||
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
|
||||
int minDisparity, int numberOfDisparities,
|
||||
int disp12MaxDiff CV_DEFAULT(1) );
|
||||
|
||||
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
|
||||
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
|
||||
CvArr* _3dImage, const CvMat* Q,
|
||||
int handleMissingValues CV_DEFAULT(0) );
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
class CV_EXPORTS CvLevMarq
|
||||
{
|
||||
public:
|
||||
CvLevMarq();
|
||||
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
|
||||
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
||||
bool completeSymmFlag=false );
|
||||
~CvLevMarq();
|
||||
void init( int nparams, int nerrs, CvTermCriteria criteria=
|
||||
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
||||
bool completeSymmFlag=false );
|
||||
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
|
||||
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
|
||||
|
||||
void clear();
|
||||
void step();
|
||||
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
|
||||
|
||||
cv::Ptr<CvMat> mask;
|
||||
cv::Ptr<CvMat> prevParam;
|
||||
cv::Ptr<CvMat> param;
|
||||
cv::Ptr<CvMat> J;
|
||||
cv::Ptr<CvMat> err;
|
||||
cv::Ptr<CvMat> JtJ;
|
||||
cv::Ptr<CvMat> JtJN;
|
||||
cv::Ptr<CvMat> JtErr;
|
||||
cv::Ptr<CvMat> JtJV;
|
||||
cv::Ptr<CvMat> JtJW;
|
||||
double prevErrNorm, errNorm;
|
||||
int lambdaLg10;
|
||||
CvTermCriteria criteria;
|
||||
int state;
|
||||
int iters;
|
||||
bool completeSymmFlag;
|
||||
};
|
||||
|
||||
namespace cv
|
||||
{
|
||||
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
|
||||
CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray());
|
||||
|
||||
//! type of the robust estimation algorithm
|
||||
enum
|
||||
{
|
||||
LMEDS=CV_LMEDS, //!< least-median algorithm
|
||||
RANSAC=CV_RANSAC //!< RANSAC algorithm
|
||||
};
|
||||
|
||||
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
|
||||
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
||||
int method=0, double ransacReprojThreshold=3,
|
||||
OutputArray mask=noArray());
|
||||
|
||||
//! variant of findHomography for backward compatibility
|
||||
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
||||
OutputArray mask, int method=0, double ransacReprojThreshold=3);
|
||||
|
||||
//! Computes RQ decomposition of 3x3 matrix
|
||||
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
|
||||
OutputArray Qx=noArray(),
|
||||
OutputArray Qy=noArray(),
|
||||
OutputArray Qz=noArray());
|
||||
|
||||
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
|
||||
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
|
||||
OutputArray rotMatrix, OutputArray transVect,
|
||||
OutputArray rotMatrixX=noArray(),
|
||||
OutputArray rotMatrixY=noArray(),
|
||||
OutputArray rotMatrixZ=noArray(),
|
||||
OutputArray eulerAngles=noArray() );
|
||||
|
||||
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
|
||||
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
|
||||
OutputArray dABdA,
|
||||
OutputArray dABdB );
|
||||
|
||||
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
|
||||
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
|
||||
InputArray rvec2, InputArray tvec2,
|
||||
OutputArray rvec3, OutputArray tvec3,
|
||||
OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(),
|
||||
OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(),
|
||||
OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(),
|
||||
OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() );
|
||||
|
||||
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
|
||||
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
|
||||
InputArray rvec, InputArray tvec,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
OutputArray imagePoints,
|
||||
OutputArray jacobian=noArray(),
|
||||
double aspectRatio=0 );
|
||||
|
||||
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
|
||||
enum
|
||||
{
|
||||
ITERATIVE=CV_ITERATIVE,
|
||||
EPNP=CV_EPNP,
|
||||
P3P=CV_P3P
|
||||
};
|
||||
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
OutputArray rvec, OutputArray tvec,
|
||||
bool useExtrinsicGuess=false, int flags=ITERATIVE);
|
||||
|
||||
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
|
||||
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
|
||||
InputArray imagePoints,
|
||||
InputArray cameraMatrix,
|
||||
InputArray distCoeffs,
|
||||
OutputArray rvec,
|
||||
OutputArray tvec,
|
||||
bool useExtrinsicGuess = false,
|
||||
int iterationsCount = 100,
|
||||
float reprojectionError = 8.0,
|
||||
int minInliersCount = 100,
|
||||
OutputArray inliers = noArray(),
|
||||
int flags = ITERATIVE);
|
||||
|
||||
//! initializes camera matrix from a few 3D points and the corresponding projections.
|
||||
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
|
||||
InputArrayOfArrays imagePoints,
|
||||
Size imageSize, double aspectRatio=1. );
|
||||
|
||||
enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
|
||||
CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 };
|
||||
|
||||
//! finds checkerboard pattern of the specified size in the image
|
||||
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
|
||||
OutputArray corners,
|
||||
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
|
||||
|
||||
//! finds subpixel-accurate positions of the chessboard corners
|
||||
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
|
||||
|
||||
//! draws the checkerboard pattern (found or partly found) in the image
|
||||
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
|
||||
InputArray corners, bool patternWasFound );
|
||||
|
||||
enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2,
|
||||
CALIB_CB_CLUSTERING = 4 };
|
||||
|
||||
//! finds circles' grid pattern of the specified size in the image
|
||||
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
|
||||
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID,
|
||||
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
|
||||
|
||||
//! the deprecated function. Use findCirclesGrid() instead of it.
|
||||
CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize,
|
||||
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID );
|
||||
enum
|
||||
{
|
||||
CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,
|
||||
CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,
|
||||
CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,
|
||||
CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,
|
||||
CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,
|
||||
CALIB_FIX_K1 = CV_CALIB_FIX_K1,
|
||||
CALIB_FIX_K2 = CV_CALIB_FIX_K2,
|
||||
CALIB_FIX_K3 = CV_CALIB_FIX_K3,
|
||||
CALIB_FIX_K4 = CV_CALIB_FIX_K4,
|
||||
CALIB_FIX_K5 = CV_CALIB_FIX_K5,
|
||||
CALIB_FIX_K6 = CV_CALIB_FIX_K6,
|
||||
CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL,
|
||||
CALIB_THIN_PRISM_MODEL = CV_CALIB_THIN_PRISM_MODEL,
|
||||
CALIB_FIX_S1_S2_S3_S4=CV_CALIB_FIX_S1_S2_S3_S4,
|
||||
// only for stereo
|
||||
CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,
|
||||
CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,
|
||||
// for stereo rectification
|
||||
CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY
|
||||
};
|
||||
|
||||
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
|
||||
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
|
||||
InputArrayOfArrays imagePoints,
|
||||
Size imageSize,
|
||||
CV_OUT InputOutputArray cameraMatrix,
|
||||
CV_OUT InputOutputArray distCoeffs,
|
||||
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
|
||||
int flags=0, TermCriteria criteria = TermCriteria(
|
||||
TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) );
|
||||
|
||||
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
|
||||
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
|
||||
Size imageSize,
|
||||
double apertureWidth,
|
||||
double apertureHeight,
|
||||
CV_OUT double& fovx,
|
||||
CV_OUT double& fovy,
|
||||
CV_OUT double& focalLength,
|
||||
CV_OUT Point2d& principalPoint,
|
||||
CV_OUT double& aspectRatio );
|
||||
|
||||
//! finds intrinsic and extrinsic parameters of a stereo camera
|
||||
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
|
||||
InputArrayOfArrays imagePoints1,
|
||||
InputArrayOfArrays imagePoints2,
|
||||
CV_OUT InputOutputArray cameraMatrix1,
|
||||
CV_OUT InputOutputArray distCoeffs1,
|
||||
CV_OUT InputOutputArray cameraMatrix2,
|
||||
CV_OUT InputOutputArray distCoeffs2,
|
||||
Size imageSize, OutputArray R,
|
||||
OutputArray T, OutputArray E, OutputArray F,
|
||||
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
|
||||
int flags=CALIB_FIX_INTRINSIC );
|
||||
|
||||
|
||||
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
|
||||
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
|
||||
InputArray cameraMatrix2, InputArray distCoeffs2,
|
||||
Size imageSize, InputArray R, InputArray T,
|
||||
OutputArray R1, OutputArray R2,
|
||||
OutputArray P1, OutputArray P2,
|
||||
OutputArray Q, int flags=CALIB_ZERO_DISPARITY,
|
||||
double alpha=-1, Size newImageSize=Size(),
|
||||
CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 );
|
||||
|
||||
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
|
||||
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
|
||||
InputArray F, Size imgSize,
|
||||
OutputArray H1, OutputArray H2,
|
||||
double threshold=5 );
|
||||
|
||||
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
||||
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
|
||||
InputArray cameraMatrix2, InputArray distCoeffs2,
|
||||
InputArray cameraMatrix3, InputArray distCoeffs3,
|
||||
InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
|
||||
Size imageSize, InputArray R12, InputArray T12,
|
||||
InputArray R13, InputArray T13,
|
||||
OutputArray R1, OutputArray R2, OutputArray R3,
|
||||
OutputArray P1, OutputArray P2, OutputArray P3,
|
||||
OutputArray Q, double alpha, Size newImgSize,
|
||||
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
|
||||
|
||||
//! returns the optimal new camera matrix
|
||||
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
|
||||
Size imageSize, double alpha, Size newImgSize=Size(),
|
||||
CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false);
|
||||
|
||||
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
|
||||
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
|
||||
|
||||
//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
|
||||
CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
|
||||
|
||||
//! for backward compatibility
|
||||
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
|
||||
|
||||
//! the algorithm for finding fundamental matrix
|
||||
enum
|
||||
{
|
||||
FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
|
||||
FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
|
||||
FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
|
||||
FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm
|
||||
};
|
||||
|
||||
//! finds fundamental matrix from a set of corresponding 2D points
|
||||
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
|
||||
int method=FM_RANSAC,
|
||||
double param1=3., double param2=0.99,
|
||||
OutputArray mask=noArray());
|
||||
|
||||
//! variant of findFundamentalMat for backward compatibility
|
||||
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
|
||||
OutputArray mask, int method=FM_RANSAC,
|
||||
double param1=3., double param2=0.99);
|
||||
|
||||
//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
|
||||
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2, double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||
int method = CV_RANSAC,
|
||||
double prob = 0.999, double threshold = 1.0, OutputArray mask = noArray() );
|
||||
|
||||
//! decompose essential matrix to possible rotation matrix and one translation vector
|
||||
CV_EXPORTS void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
|
||||
|
||||
//! recover relative camera pose from a set of corresponding 2D points
|
||||
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2, OutputArray R, OutputArray t,
|
||||
double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||
InputOutputArray mask = noArray());
|
||||
|
||||
|
||||
//! finds coordinates of epipolar lines corresponding the specified points
|
||||
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
|
||||
int whichImage, InputArray F,
|
||||
OutputArray lines );
|
||||
|
||||
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
|
||||
InputArray projPoints1, InputArray projPoints2,
|
||||
OutputArray points4D );
|
||||
|
||||
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
|
||||
OutputArray newPoints1, OutputArray newPoints2 );
|
||||
|
||||
|
||||
class CV_EXPORTS_W StereoMatcher : public Algorithm
|
||||
{
|
||||
public:
|
||||
CV_WRAP virtual void compute( InputArray left, InputArray right,
|
||||
OutputArray disparity ) = 0;
|
||||
};
|
||||
|
||||
enum { STEREO_DISP_SCALE=16, STEREO_PREFILTER_NORMALIZED_RESPONSE = 0, STEREO_PREFILTER_XSOBEL = 1 };
|
||||
|
||||
CV_EXPORTS Ptr<StereoMatcher> createStereoBM(int numDisparities=0, int SADWindowSize=21);
|
||||
|
||||
CV_EXPORTS Ptr<StereoMatcher> createStereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
|
||||
int P1=0, int P2=0, int disp12MaxDiff=0,
|
||||
int preFilterCap=0, int uniquenessRatio=0,
|
||||
int speckleWindowSize=0, int speckleRange=0,
|
||||
bool fullDP=false);
|
||||
|
||||
template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
|
||||
|
||||
// to be moved to "compat" module
|
||||
class CV_EXPORTS_W StereoBM
|
||||
{
|
||||
public:
|
||||
enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1,
|
||||
BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 };
|
||||
|
||||
//! the default constructor
|
||||
CV_WRAP StereoBM();
|
||||
//! the full constructor taking the camera-specific preset, number of disparities and the SAD window size
|
||||
CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);
|
||||
//! the method that reinitializes the state. The previous content is destroyed
|
||||
void init(int preset, int ndisparities=0, int SADWindowSize=21);
|
||||
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
|
||||
CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right,
|
||||
OutputArray disparity, int disptype=CV_16S );
|
||||
|
||||
//! pointer to the underlying CvStereoBMState
|
||||
Ptr<CvStereoBMState> state;
|
||||
};
|
||||
|
||||
|
||||
// to be moved to "compat" module
|
||||
class CV_EXPORTS_W StereoSGBM
|
||||
{
|
||||
public:
|
||||
enum { DISP_SHIFT=4, DISP_SCALE = (1<<DISP_SHIFT) };
|
||||
|
||||
//! the default constructor
|
||||
CV_WRAP StereoSGBM();
|
||||
|
||||
//! the full constructor taking all the necessary algorithm parameters
|
||||
CV_WRAP StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
|
||||
int P1=0, int P2=0, int disp12MaxDiff=0,
|
||||
int preFilterCap=0, int uniquenessRatio=0,
|
||||
int speckleWindowSize=0, int speckleRange=0,
|
||||
bool fullDP=false);
|
||||
//! the destructor
|
||||
virtual ~StereoSGBM();
|
||||
|
||||
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
|
||||
CV_WRAP_AS(compute) virtual void operator()(InputArray left, InputArray right,
|
||||
OutputArray disp);
|
||||
|
||||
CV_PROP_RW int minDisparity;
|
||||
CV_PROP_RW int numberOfDisparities;
|
||||
CV_PROP_RW int SADWindowSize;
|
||||
CV_PROP_RW int preFilterCap;
|
||||
CV_PROP_RW int uniquenessRatio;
|
||||
CV_PROP_RW int P1;
|
||||
CV_PROP_RW int P2;
|
||||
CV_PROP_RW int speckleWindowSize;
|
||||
CV_PROP_RW int speckleRange;
|
||||
CV_PROP_RW int disp12MaxDiff;
|
||||
CV_PROP_RW bool fullDP;
|
||||
|
||||
protected:
|
||||
Ptr<StereoMatcher> sm;
|
||||
};
|
||||
|
||||
//! filters off speckles (small regions of incorrectly computed disparity)
|
||||
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff,
|
||||
InputOutputArray buf=noArray() );
|
||||
|
||||
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
|
||||
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
|
||||
int minDisparity, int numberOfDisparities,
|
||||
int SADWindowSize );
|
||||
|
||||
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
|
||||
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
|
||||
int minDisparity, int numberOfDisparities,
|
||||
int disp12MaxDisp=1 );
|
||||
|
||||
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
|
||||
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
|
||||
OutputArray _3dImage, InputArray Q,
|
||||
bool handleMissingValues=false,
|
||||
int ddepth=-1 );
|
||||
|
||||
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
|
||||
OutputArray out, OutputArray inliers,
|
||||
double ransacThreshold=3, double confidence=0.99);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#include "opencv2/calib3d.hpp"
|
@ -9,10 +9,10 @@
|
||||
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||
|
||||
#include "opencv2/ts/ts.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
||||
#ifdef GTEST_CREATE_SHARED_LIBRARY
|
||||
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||
|
@ -69,7 +69,7 @@
|
||||
#ifdef DEBUG_CHESSBOARD
|
||||
# include "opencv2/opencv_modules.hpp"
|
||||
# ifdef HAVE_OPENCV_HIGHGUI
|
||||
# include "opencv2/highgui/highgui.hpp"
|
||||
# include "opencv2/highgui.hpp"
|
||||
# else
|
||||
# undef DEBUG_CHESSBOARD
|
||||
# endif
|
||||
|
@ -49,7 +49,7 @@
|
||||
#if defined(DEBUG_WINDOWS)
|
||||
# include "opencv2/opencv_modules.hpp"
|
||||
# ifdef HAVE_OPENCV_HIGHGUI
|
||||
# include "opencv2/highgui/highgui.hpp"
|
||||
# include "opencv2/highgui.hpp"
|
||||
# else
|
||||
# undef DEBUG_WINDOWS
|
||||
# endif
|
||||
|
@ -46,7 +46,7 @@
|
||||
#ifdef DEBUG_CIRCLES
|
||||
# include "opencv2/opencv_modules.hpp"
|
||||
# ifdef HAVE_OPENCV_HIGHGUI
|
||||
# include "opencv2/highgui/highgui.hpp"
|
||||
# include "opencv2/highgui.hpp"
|
||||
# else
|
||||
# undef DEBUG_CIRCLES
|
||||
# endif
|
||||
|
@ -46,11 +46,11 @@
|
||||
#include "cvconfig.h"
|
||||
#endif
|
||||
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include <vector>
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef CV_CHESSBOARDGENERATOR_H143KJTVYM389YTNHKFDHJ89NYVMO3VLMEJNTBGUEIYVCM203P
|
||||
#define CV_CHESSBOARDGENERATOR_H143KJTVYM389YTNHKFDHJ89NYVMO3VLMEJNTBGUEIYVCM203P
|
||||
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
@ -9,11 +9,11 @@
|
||||
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||
|
||||
#include "opencv2/ts/ts.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <iostream>
|
||||
|
||||
namespace cvtest
|
||||
|
@ -42,9 +42,9 @@ In OpenCV 2.4 you only need :ocv:func:`applyColorMap` to apply a colormap on a g
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include <opencv2/contrib/contrib.hpp>
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/contrib.hpp>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
|
@ -16,9 +16,9 @@
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
|
||||
#include <iostream>
|
||||
|
@ -16,9 +16,9 @@
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
@ -16,9 +16,9 @@
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
@ -16,9 +16,9 @@
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
@ -16,9 +16,9 @@
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
@ -16,11 +16,11 @@
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
974
modules/contrib/include/opencv2/contrib.hpp
Normal file
974
modules/contrib/include/opencv2/contrib.hpp
Normal file
@ -0,0 +1,974 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CONTRIB_HPP__
|
||||
#define __OPENCV_CONTRIB_HPP__
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
/****************************************************************************************\
|
||||
* Adaptive Skin Detector *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvAdaptiveSkinDetector
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
GSD_HUE_LT = 3,
|
||||
GSD_HUE_UT = 33,
|
||||
GSD_INTENSITY_LT = 15,
|
||||
GSD_INTENSITY_UT = 250
|
||||
};
|
||||
|
||||
class CV_EXPORTS Histogram
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
|
||||
};
|
||||
|
||||
protected:
|
||||
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
|
||||
|
||||
public:
|
||||
CvHistogram *fHistogram;
|
||||
Histogram();
|
||||
virtual ~Histogram();
|
||||
|
||||
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
|
||||
void mergeWith(Histogram *source, double weight);
|
||||
};
|
||||
|
||||
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
|
||||
double fHistogramMergeFactor, fHuePercentCovered;
|
||||
Histogram histogramHueMotion, skinHueHistogram;
|
||||
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
|
||||
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
|
||||
|
||||
protected:
|
||||
void initData(IplImage *src, int widthDivider, int heightDivider);
|
||||
void adaptiveFilter();
|
||||
|
||||
public:
|
||||
|
||||
enum {
|
||||
MORPHING_METHOD_NONE = 0,
|
||||
MORPHING_METHOD_ERODE = 1,
|
||||
MORPHING_METHOD_ERODE_ERODE = 2,
|
||||
MORPHING_METHOD_ERODE_DILATE = 3
|
||||
};
|
||||
|
||||
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
|
||||
virtual ~CvAdaptiveSkinDetector();
|
||||
|
||||
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
|
||||
};
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Fuzzy MeanShift Tracker *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvFuzzyPoint {
|
||||
public:
|
||||
double x, y, value;
|
||||
|
||||
CvFuzzyPoint(double _x, double _y);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyCurve {
|
||||
private:
|
||||
std::vector<CvFuzzyPoint> points;
|
||||
double value, centre;
|
||||
|
||||
bool between(double x, double x1, double x2);
|
||||
|
||||
public:
|
||||
CvFuzzyCurve();
|
||||
~CvFuzzyCurve();
|
||||
|
||||
void setCentre(double _centre);
|
||||
double getCentre();
|
||||
void clear();
|
||||
void addPoint(double x, double y);
|
||||
double calcValue(double param);
|
||||
double getValue();
|
||||
void setValue(double _value);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyFunction {
|
||||
public:
|
||||
std::vector<CvFuzzyCurve> curves;
|
||||
|
||||
CvFuzzyFunction();
|
||||
~CvFuzzyFunction();
|
||||
void addCurve(CvFuzzyCurve *curve, double value = 0);
|
||||
void resetValues();
|
||||
double calcValue();
|
||||
CvFuzzyCurve *newCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyRule {
|
||||
private:
|
||||
CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
|
||||
CvFuzzyCurve *fuzzyOutput;
|
||||
public:
|
||||
CvFuzzyRule();
|
||||
~CvFuzzyRule();
|
||||
void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcValue(double param1, double param2);
|
||||
CvFuzzyCurve *getOutputCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyController {
|
||||
private:
|
||||
std::vector<CvFuzzyRule*> rules;
|
||||
public:
|
||||
CvFuzzyController();
|
||||
~CvFuzzyController();
|
||||
void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcOutput(double param1, double param2);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyMeanShiftTracker
|
||||
{
|
||||
private:
|
||||
class FuzzyResizer
|
||||
{
|
||||
private:
|
||||
CvFuzzyFunction iInput, iOutput;
|
||||
CvFuzzyController fuzzyController;
|
||||
public:
|
||||
FuzzyResizer();
|
||||
int calcOutput(double edgeDensity, double density);
|
||||
};
|
||||
|
||||
class SearchWindow
|
||||
{
|
||||
public:
|
||||
FuzzyResizer *fuzzyResizer;
|
||||
int x, y;
|
||||
int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
|
||||
int ldx, ldy, ldw, ldh, numShifts, numIters;
|
||||
int xGc, yGc;
|
||||
long m00, m01, m10, m11, m02, m20;
|
||||
double ellipseAngle;
|
||||
double density;
|
||||
unsigned int depthLow, depthHigh;
|
||||
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
|
||||
|
||||
SearchWindow();
|
||||
~SearchWindow();
|
||||
void setSize(int _x, int _y, int _width, int _height);
|
||||
void initDepthValues(IplImage *maskImage, IplImage *depthMap);
|
||||
bool shift();
|
||||
void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
|
||||
void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
|
||||
};
|
||||
|
||||
public:
|
||||
enum TrackingState
|
||||
{
|
||||
tsNone = 0,
|
||||
tsSearching = 1,
|
||||
tsTracking = 2,
|
||||
tsSetWindow = 3,
|
||||
tsDisabled = 10
|
||||
};
|
||||
|
||||
enum ResizeMethod {
|
||||
rmEdgeDensityLinear = 0,
|
||||
rmEdgeDensityFuzzy = 1,
|
||||
rmInnerDensity = 2
|
||||
};
|
||||
|
||||
enum {
|
||||
MinKernelMass = 1000
|
||||
};
|
||||
|
||||
SearchWindow kernel;
|
||||
int searchMode;
|
||||
|
||||
private:
|
||||
enum
|
||||
{
|
||||
MaxMeanShiftIteration = 5,
|
||||
MaxSetSizeIteration = 5
|
||||
};
|
||||
|
||||
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
|
||||
|
||||
public:
|
||||
CvFuzzyMeanShiftTracker();
|
||||
~CvFuzzyMeanShiftTracker();
|
||||
|
||||
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
|
||||
};
|
||||
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class CV_EXPORTS Octree
|
||||
{
|
||||
public:
|
||||
struct Node
|
||||
{
|
||||
Node() {}
|
||||
int begin, end;
|
||||
float x_min, x_max, y_min, y_max, z_min, z_max;
|
||||
int maxLevels;
|
||||
bool isLeaf;
|
||||
int children[8];
|
||||
};
|
||||
|
||||
Octree();
|
||||
Octree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
|
||||
virtual ~Octree();
|
||||
|
||||
virtual void buildTree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
|
||||
virtual void getPointsWithinSphere( const Point3f& center, float radius,
|
||||
std::vector<Point3f>& points ) const;
|
||||
const std::vector<Node>& getNodes() const { return nodes; }
|
||||
private:
|
||||
int minPoints;
|
||||
std::vector<Point3f> points;
|
||||
std::vector<Node> nodes;
|
||||
|
||||
virtual void buildNext(size_t node_ind);
|
||||
};
|
||||
|
||||
|
||||
class CV_EXPORTS Mesh3D
|
||||
{
|
||||
public:
|
||||
struct EmptyMeshException {};
|
||||
|
||||
Mesh3D();
|
||||
Mesh3D(const std::vector<Point3f>& vtx);
|
||||
~Mesh3D();
|
||||
|
||||
void buildOctree();
|
||||
void clearOctree();
|
||||
float estimateResolution(float tryRatio = 0.1f);
|
||||
void computeNormals(float normalRadius, int minNeighbors = 20);
|
||||
void computeNormals(const std::vector<int>& subset, float normalRadius, int minNeighbors = 20);
|
||||
|
||||
void writeAsVrml(const std::string& file, const std::vector<Scalar>& colors = std::vector<Scalar>()) const;
|
||||
|
||||
std::vector<Point3f> vtx;
|
||||
std::vector<Point3f> normals;
|
||||
float resolution;
|
||||
Octree octree;
|
||||
|
||||
const static Point3f allzero;
|
||||
};
|
||||
|
||||
class CV_EXPORTS SpinImageModel
|
||||
{
|
||||
public:
|
||||
|
||||
/* model parameters, leave unset for default or auto estimate */
|
||||
float normalRadius;
|
||||
int minNeighbors;
|
||||
|
||||
float binSize;
|
||||
int imageWidth;
|
||||
|
||||
float lambda;
|
||||
float gamma;
|
||||
|
||||
float T_GeometriccConsistency;
|
||||
float T_GroupingCorespondances;
|
||||
|
||||
/* public interface */
|
||||
SpinImageModel();
|
||||
explicit SpinImageModel(const Mesh3D& mesh);
|
||||
~SpinImageModel();
|
||||
|
||||
void setLogger(std::ostream* log);
|
||||
void selectRandomSubset(float ratio);
|
||||
void setSubset(const std::vector<int>& subset);
|
||||
void compute();
|
||||
|
||||
void match(const SpinImageModel& scene, std::vector< std::vector<Vec2i> >& result);
|
||||
|
||||
Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const;
|
||||
|
||||
size_t getSpinCount() const { return spinImages.rows; }
|
||||
Mat getSpinImage(size_t index) const { return spinImages.row((int)index); }
|
||||
const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; }
|
||||
const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; }
|
||||
|
||||
const Mesh3D& getMesh() const { return mesh; }
|
||||
Mesh3D& getMesh() { return mesh; }
|
||||
|
||||
/* static utility functions */
|
||||
static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result);
|
||||
|
||||
static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal);
|
||||
|
||||
static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
|
||||
const Point3f& pointModel1, const Point3f& normalModel1,
|
||||
const Point3f& pointScene2, const Point3f& normalScene2,
|
||||
const Point3f& pointModel2, const Point3f& normalModel2);
|
||||
|
||||
static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
|
||||
const Point3f& pointModel1, const Point3f& normalModel1,
|
||||
const Point3f& pointScene2, const Point3f& normalScene2,
|
||||
const Point3f& pointModel2, const Point3f& normalModel2,
|
||||
float gamma);
|
||||
protected:
|
||||
void defaultParams();
|
||||
|
||||
void matchSpinToModel(const Mat& spin, std::vector<int>& indeces,
|
||||
std::vector<float>& corrCoeffs, bool useExtremeOutliers = true) const;
|
||||
|
||||
void repackSpinImages(const std::vector<uchar>& mask, Mat& spinImages, bool reAlloc = true) const;
|
||||
|
||||
std::vector<int> subset;
|
||||
Mesh3D mesh;
|
||||
Mat spinImages;
|
||||
std::ostream* out;
|
||||
};
|
||||
|
||||
class CV_EXPORTS TickMeter
|
||||
{
|
||||
public:
|
||||
TickMeter();
|
||||
void start();
|
||||
void stop();
|
||||
|
||||
int64 getTimeTicks() const;
|
||||
double getTimeMicro() const;
|
||||
double getTimeMilli() const;
|
||||
double getTimeSec() const;
|
||||
int64 getCounter() const;
|
||||
|
||||
void reset();
|
||||
private:
|
||||
int64 counter;
|
||||
int64 sumTime;
|
||||
int64 startTime;
|
||||
};
|
||||
|
||||
CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm);
|
||||
|
||||
class CV_EXPORTS SelfSimDescriptor
|
||||
{
|
||||
public:
|
||||
SelfSimDescriptor();
|
||||
SelfSimDescriptor(int _ssize, int _lsize,
|
||||
int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET,
|
||||
int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS,
|
||||
int _nangles=DEFAULT_NUM_ANGLES);
|
||||
SelfSimDescriptor(const SelfSimDescriptor& ss);
|
||||
virtual ~SelfSimDescriptor();
|
||||
SelfSimDescriptor& operator = (const SelfSimDescriptor& ss);
|
||||
|
||||
size_t getDescriptorSize() const;
|
||||
Size getGridSize( Size imgsize, Size winStride ) const;
|
||||
|
||||
virtual void compute(const Mat& img, std::vector<float>& descriptors, Size winStride=Size(),
|
||||
const std::vector<Point>& locations=std::vector<Point>()) const;
|
||||
virtual void computeLogPolarMapping(Mat& mappingMask) const;
|
||||
virtual void SSD(const Mat& img, Point pt, Mat& ssd) const;
|
||||
|
||||
int smallSize;
|
||||
int largeSize;
|
||||
int startDistanceBucket;
|
||||
int numberOfDistanceBuckets;
|
||||
int numberOfAngles;
|
||||
|
||||
enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41,
|
||||
DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3,
|
||||
DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
|
||||
};
|
||||
|
||||
|
||||
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
|
||||
|
||||
class CV_EXPORTS LevMarqSparse {
|
||||
public:
|
||||
LevMarqSparse();
|
||||
LevMarqSparse(int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (CV_CDECL * func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data, // user-specific data passed to the callbacks
|
||||
BundleAdjustCallback cb, void* user_data
|
||||
);
|
||||
|
||||
virtual ~LevMarqSparse();
|
||||
|
||||
virtual void run( int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (CV_CDECL * func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data // user-specific data passed to the callbacks
|
||||
);
|
||||
|
||||
virtual void clear();
|
||||
|
||||
// useful function to do simple bundle adjustment tasks
|
||||
static void bundleAdjust(std::vector<Point3d>& points, // positions of points in global coordinate system (input and output)
|
||||
const std::vector<std::vector<Point2d> >& imagePoints, // projections of 3d points for every camera
|
||||
const std::vector<std::vector<int> >& visibility, // visibility of 3d points for every camera
|
||||
std::vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
|
||||
std::vector<Mat>& R, // rotation matrices of all cameras (input and output)
|
||||
std::vector<Mat>& T, // translation vector of all cameras (input and output)
|
||||
std::vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
|
||||
const TermCriteria& criteria=
|
||||
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
|
||||
BundleAdjustCallback cb = 0, void* user_data = 0);
|
||||
|
||||
public:
|
||||
virtual void optimize(CvMat &_vis); //main function that runs minimization
|
||||
|
||||
//iteratively asks for measurement for visible camera-point pairs
|
||||
void ask_for_proj(CvMat &_vis,bool once=false);
|
||||
//iteratively asks for Jacobians for every camera_point pair
|
||||
void ask_for_projac(CvMat &_vis);
|
||||
|
||||
CvMat* err; //error X-hX
|
||||
double prevErrNorm, errNorm;
|
||||
double lambda;
|
||||
CvTermCriteria criteria;
|
||||
int iters;
|
||||
|
||||
CvMat** U; //size of array is equal to number of cameras
|
||||
CvMat** V; //size of array is equal to number of points
|
||||
CvMat** inv_V_star; //inverse of V*
|
||||
|
||||
CvMat** A;
|
||||
CvMat** B;
|
||||
CvMat** W;
|
||||
|
||||
CvMat* X; //measurement
|
||||
CvMat* hX; //current measurement extimation given new parameter vector
|
||||
|
||||
CvMat* prevP; //current already accepted parameter.
|
||||
CvMat* P; // parameters used to evaluate function with new params
|
||||
// this parameters may be rejected
|
||||
|
||||
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
|
||||
|
||||
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
|
||||
// length of array is j = number of cameras
|
||||
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
|
||||
// length of array is i = number of points
|
||||
|
||||
CvMat** Yj; //length of array is i = num_points
|
||||
|
||||
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
|
||||
|
||||
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
|
||||
|
||||
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
|
||||
|
||||
int num_cams;
|
||||
int num_points;
|
||||
int num_err_param;
|
||||
int num_cam_param;
|
||||
int num_point_param;
|
||||
|
||||
//target function and jacobian pointers, which needs to be initialized
|
||||
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
|
||||
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
|
||||
|
||||
void* data;
|
||||
|
||||
BundleAdjustCallback cb;
|
||||
void* user_data;
|
||||
};
|
||||
|
||||
CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ,
|
||||
CV_OUT std::vector<std::vector<Point> >& results, CV_OUT std::vector<float>& cost,
|
||||
double templScale=1, int maxMatches = 20,
|
||||
double minMatchDistance = 1.0, int padX = 3,
|
||||
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
|
||||
double orientationWeight = 0.5, double truncate = 20);
|
||||
|
||||
|
||||
class CV_EXPORTS_W StereoVar
|
||||
{
|
||||
public:
|
||||
// Flags
|
||||
enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16};
|
||||
enum {CYCLE_O, CYCLE_V};
|
||||
enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
|
||||
|
||||
//! the default constructor
|
||||
CV_WRAP StereoVar();
|
||||
|
||||
//! the full constructor taking all the necessary algorithm parameters
|
||||
CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
|
||||
|
||||
//! the destructor
|
||||
virtual ~StereoVar();
|
||||
|
||||
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
|
||||
CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp);
|
||||
|
||||
CV_PROP_RW int levels;
|
||||
CV_PROP_RW double pyrScale;
|
||||
CV_PROP_RW int nIt;
|
||||
CV_PROP_RW int minDisp;
|
||||
CV_PROP_RW int maxDisp;
|
||||
CV_PROP_RW int poly_n;
|
||||
CV_PROP_RW double poly_sigma;
|
||||
CV_PROP_RW float fi;
|
||||
CV_PROP_RW float lambda;
|
||||
CV_PROP_RW int penalization;
|
||||
CV_PROP_RW int cycle;
|
||||
CV_PROP_RW int flags;
|
||||
|
||||
private:
|
||||
void autoParams();
|
||||
void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
|
||||
void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
|
||||
void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
|
||||
};
|
||||
|
||||
CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
|
||||
|
||||
class CV_EXPORTS Directory
|
||||
{
|
||||
public:
|
||||
static std::vector<std::string> GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true );
|
||||
static std::vector<std::string> GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true );
|
||||
static std::vector<std::string> GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true );
|
||||
};
|
||||
|
||||
/*
|
||||
* Generation of a set of different colors by the following way:
|
||||
* 1) generate more then need colors (in "factor" times) in RGB,
|
||||
* 2) convert them to Lab,
|
||||
* 3) choose the needed count of colors from the set that are more different from
|
||||
* each other,
|
||||
* 4) convert the colors back to RGB
|
||||
*/
|
||||
CV_EXPORTS void generateColors( std::vector<Scalar>& colors, size_t count, size_t factor=100 );
|
||||
|
||||
|
||||
/*
|
||||
* Estimate the rigid body motion from frame0 to frame1. The method is based on the paper
|
||||
* "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
|
||||
*/
|
||||
enum { ROTATION = 1,
|
||||
TRANSLATION = 2,
|
||||
RIGID_BODY_MOTION = 4
|
||||
};
|
||||
CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt,
|
||||
const Mat& image0, const Mat& depth0, const Mat& mask0,
|
||||
const Mat& image1, const Mat& depth1, const Mat& mask1,
|
||||
const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f,
|
||||
const std::vector<int>& iterCounts=std::vector<int>(),
|
||||
const std::vector<float>& minGradientMagnitudes=std::vector<float>(),
|
||||
int transformType=RIGID_BODY_MOTION );
|
||||
|
||||
/**
|
||||
*Bilinear interpolation technique.
|
||||
*
|
||||
*The value of a desired cortical pixel is obtained through a bilinear interpolation of the values
|
||||
*of the four nearest neighbouring Cartesian pixels to the center of the RF.
|
||||
*The same principle is applied to the inverse transformation.
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Interp
|
||||
{
|
||||
public:
|
||||
|
||||
LogPolar_Interp() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0,
|
||||
int interp=INTER_LINEAR, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Interp();
|
||||
|
||||
protected:
|
||||
|
||||
Mat Rsri;
|
||||
Mat Csri;
|
||||
|
||||
int S, R, M, N;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
int interp;
|
||||
|
||||
Mat ETAyx;
|
||||
Mat CSIyx;
|
||||
|
||||
void create_map(int M, int N, int R, int S, double ro0);
|
||||
};
|
||||
|
||||
/**
|
||||
*Overlapping circular receptive fields technique
|
||||
*
|
||||
*The Cartesian plane is divided in two regions: the fovea and the periphery.
|
||||
*The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in
|
||||
*the periphery we use the overlapping Gaussian circular RFs.
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Overlapping
|
||||
{
|
||||
public:
|
||||
LogPolar_Overlapping() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Overlapping(int w, int h, Point2i center, int R=70,
|
||||
double ro0=3.0, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Overlapping();
|
||||
|
||||
protected:
|
||||
|
||||
Mat Rsri;
|
||||
Mat Csri;
|
||||
std::vector<int> Rsr;
|
||||
std::vector<int> Csr;
|
||||
std::vector<double> Wsr;
|
||||
|
||||
int S, R, M, N, ind1;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
|
||||
struct kernel
|
||||
{
|
||||
kernel() { w = 0; }
|
||||
std::vector<double> weights;
|
||||
int w;
|
||||
};
|
||||
|
||||
Mat ETAyx;
|
||||
Mat CSIyx;
|
||||
std::vector<kernel> w_ker_2D;
|
||||
|
||||
void create_map(int M, int N, int R, int S, double ro0);
|
||||
};
|
||||
|
||||
/**
|
||||
* Adjacent receptive fields technique
|
||||
*
|
||||
*All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF.
|
||||
*The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF.
|
||||
*This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements
|
||||
*to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Adjacent
|
||||
{
|
||||
public:
|
||||
LogPolar_Adjacent() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param smin the size of the subpixel (default value 0.25 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Adjacent();
|
||||
|
||||
protected:
|
||||
struct pixel
|
||||
{
|
||||
pixel() { u = v = 0; a = 0.; }
|
||||
int u;
|
||||
int v;
|
||||
double a;
|
||||
};
|
||||
int S, R, M, N;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
std::vector<std::vector<pixel> > L;
|
||||
std::vector<double> A;
|
||||
|
||||
void subdivide_recursively(double x, double y, int i, int j, double length, double smin);
|
||||
bool get_uv(double x, double y, int&u, int&v);
|
||||
void create_map(int M, int N, int R, int S, double ro0, double smin);
|
||||
};
|
||||
|
||||
CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
|
||||
CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
|
||||
|
||||
class CV_EXPORTS LDA
|
||||
{
|
||||
public:
|
||||
// Initializes a LDA with num_components (default 0) and specifies how
|
||||
// samples are aligned (default dataAsRow=true).
|
||||
LDA(int num_components = 0) :
|
||||
_num_components(num_components) {};
|
||||
|
||||
// Initializes and performs a Discriminant Analysis with Fisher's
|
||||
// Optimization Criterion on given data in src and corresponding labels
|
||||
// in labels. If 0 (or less) number of components are given, they are
|
||||
// automatically determined for given data in computation.
|
||||
LDA(InputArrayOfArrays src, InputArray labels,
|
||||
int num_components = 0) :
|
||||
_num_components(num_components)
|
||||
{
|
||||
this->compute(src, labels); //! compute eigenvectors and eigenvalues
|
||||
}
|
||||
|
||||
// Serializes this object to a given filename.
|
||||
void save(const std::string& filename) const;
|
||||
|
||||
// Deserializes this object from a given filename.
|
||||
void load(const std::string& filename);
|
||||
|
||||
// Serializes this object to a given cv::FileStorage.
|
||||
void save(FileStorage& fs) const;
|
||||
|
||||
// Deserializes this object from a given cv::FileStorage.
|
||||
void load(const FileStorage& node);
|
||||
|
||||
// Destructor.
|
||||
~LDA() {}
|
||||
|
||||
//! Compute the discriminants for data in src and labels.
|
||||
void compute(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Projects samples into the LDA subspace.
|
||||
Mat project(InputArray src);
|
||||
|
||||
// Reconstructs projections from the LDA subspace.
|
||||
Mat reconstruct(InputArray src);
|
||||
|
||||
// Returns the eigenvectors of this LDA.
|
||||
Mat eigenvectors() const { return _eigenvectors; };
|
||||
|
||||
// Returns the eigenvalues of this LDA.
|
||||
Mat eigenvalues() const { return _eigenvalues; }
|
||||
|
||||
protected:
|
||||
bool _dataAsRow;
|
||||
int _num_components;
|
||||
Mat _eigenvectors;
|
||||
Mat _eigenvalues;
|
||||
|
||||
void lda(InputArrayOfArrays src, InputArray labels);
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W FaceRecognizer : public Algorithm
|
||||
{
|
||||
public:
|
||||
//! virtual destructor
|
||||
virtual ~FaceRecognizer() {}
|
||||
|
||||
// Trains a FaceRecognizer.
|
||||
CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;
|
||||
|
||||
// Updates a FaceRecognizer.
|
||||
CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Gets a prediction from a FaceRecognizer.
|
||||
virtual int predict(InputArray src) const = 0;
|
||||
|
||||
// Predicts the label and confidence for a given sample.
|
||||
CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0;
|
||||
|
||||
// Serializes this object to a given filename.
|
||||
CV_WRAP virtual void save(const std::string& filename) const;
|
||||
|
||||
// Deserializes this object from a given filename.
|
||||
CV_WRAP virtual void load(const std::string& filename);
|
||||
|
||||
// Serializes this object to a given cv::FileStorage.
|
||||
virtual void save(FileStorage& fs) const = 0;
|
||||
|
||||
// Deserializes this object from a given cv::FileStorage.
|
||||
virtual void load(const FileStorage& fs) = 0;
|
||||
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
|
||||
int grid_x=8, int grid_y=8, double threshold = DBL_MAX);
|
||||
|
||||
enum
|
||||
{
|
||||
COLORMAP_AUTUMN = 0,
|
||||
COLORMAP_BONE = 1,
|
||||
COLORMAP_JET = 2,
|
||||
COLORMAP_WINTER = 3,
|
||||
COLORMAP_RAINBOW = 4,
|
||||
COLORMAP_OCEAN = 5,
|
||||
COLORMAP_SUMMER = 6,
|
||||
COLORMAP_SPRING = 7,
|
||||
COLORMAP_COOL = 8,
|
||||
COLORMAP_HSV = 9,
|
||||
COLORMAP_PINK = 10,
|
||||
COLORMAP_HOT = 11
|
||||
};
|
||||
|
||||
CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);
|
||||
|
||||
CV_EXPORTS bool initModule_contrib();
|
||||
}
|
||||
|
||||
#include "opencv2/contrib/retina.hpp"
|
||||
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -7,11 +7,12 @@
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
@ -40,936 +41,8 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CONTRIB_HPP__
|
||||
#define __OPENCV_CONTRIB_HPP__
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
/****************************************************************************************\
|
||||
* Adaptive Skin Detector *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvAdaptiveSkinDetector
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
GSD_HUE_LT = 3,
|
||||
GSD_HUE_UT = 33,
|
||||
GSD_INTENSITY_LT = 15,
|
||||
GSD_INTENSITY_UT = 250
|
||||
};
|
||||
|
||||
class CV_EXPORTS Histogram
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
|
||||
};
|
||||
|
||||
protected:
|
||||
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
|
||||
|
||||
public:
|
||||
CvHistogram *fHistogram;
|
||||
Histogram();
|
||||
virtual ~Histogram();
|
||||
|
||||
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
|
||||
void mergeWith(Histogram *source, double weight);
|
||||
};
|
||||
|
||||
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
|
||||
double fHistogramMergeFactor, fHuePercentCovered;
|
||||
Histogram histogramHueMotion, skinHueHistogram;
|
||||
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
|
||||
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
|
||||
|
||||
protected:
|
||||
void initData(IplImage *src, int widthDivider, int heightDivider);
|
||||
void adaptiveFilter();
|
||||
|
||||
public:
|
||||
|
||||
enum {
|
||||
MORPHING_METHOD_NONE = 0,
|
||||
MORPHING_METHOD_ERODE = 1,
|
||||
MORPHING_METHOD_ERODE_ERODE = 2,
|
||||
MORPHING_METHOD_ERODE_DILATE = 3
|
||||
};
|
||||
|
||||
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
|
||||
virtual ~CvAdaptiveSkinDetector();
|
||||
|
||||
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
|
||||
};
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Fuzzy MeanShift Tracker *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvFuzzyPoint {
|
||||
public:
|
||||
double x, y, value;
|
||||
|
||||
CvFuzzyPoint(double _x, double _y);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyCurve {
|
||||
private:
|
||||
std::vector<CvFuzzyPoint> points;
|
||||
double value, centre;
|
||||
|
||||
bool between(double x, double x1, double x2);
|
||||
|
||||
public:
|
||||
CvFuzzyCurve();
|
||||
~CvFuzzyCurve();
|
||||
|
||||
void setCentre(double _centre);
|
||||
double getCentre();
|
||||
void clear();
|
||||
void addPoint(double x, double y);
|
||||
double calcValue(double param);
|
||||
double getValue();
|
||||
void setValue(double _value);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyFunction {
|
||||
public:
|
||||
std::vector<CvFuzzyCurve> curves;
|
||||
|
||||
CvFuzzyFunction();
|
||||
~CvFuzzyFunction();
|
||||
void addCurve(CvFuzzyCurve *curve, double value = 0);
|
||||
void resetValues();
|
||||
double calcValue();
|
||||
CvFuzzyCurve *newCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyRule {
|
||||
private:
|
||||
CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
|
||||
CvFuzzyCurve *fuzzyOutput;
|
||||
public:
|
||||
CvFuzzyRule();
|
||||
~CvFuzzyRule();
|
||||
void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcValue(double param1, double param2);
|
||||
CvFuzzyCurve *getOutputCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyController {
|
||||
private:
|
||||
std::vector<CvFuzzyRule*> rules;
|
||||
public:
|
||||
CvFuzzyController();
|
||||
~CvFuzzyController();
|
||||
void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcOutput(double param1, double param2);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyMeanShiftTracker
|
||||
{
|
||||
private:
|
||||
class FuzzyResizer
|
||||
{
|
||||
private:
|
||||
CvFuzzyFunction iInput, iOutput;
|
||||
CvFuzzyController fuzzyController;
|
||||
public:
|
||||
FuzzyResizer();
|
||||
int calcOutput(double edgeDensity, double density);
|
||||
};
|
||||
|
||||
class SearchWindow
|
||||
{
|
||||
public:
|
||||
FuzzyResizer *fuzzyResizer;
|
||||
int x, y;
|
||||
int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
|
||||
int ldx, ldy, ldw, ldh, numShifts, numIters;
|
||||
int xGc, yGc;
|
||||
long m00, m01, m10, m11, m02, m20;
|
||||
double ellipseAngle;
|
||||
double density;
|
||||
unsigned int depthLow, depthHigh;
|
||||
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
|
||||
|
||||
SearchWindow();
|
||||
~SearchWindow();
|
||||
void setSize(int _x, int _y, int _width, int _height);
|
||||
void initDepthValues(IplImage *maskImage, IplImage *depthMap);
|
||||
bool shift();
|
||||
void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
|
||||
void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
|
||||
};
|
||||
|
||||
public:
|
||||
enum TrackingState
|
||||
{
|
||||
tsNone = 0,
|
||||
tsSearching = 1,
|
||||
tsTracking = 2,
|
||||
tsSetWindow = 3,
|
||||
tsDisabled = 10
|
||||
};
|
||||
|
||||
enum ResizeMethod {
|
||||
rmEdgeDensityLinear = 0,
|
||||
rmEdgeDensityFuzzy = 1,
|
||||
rmInnerDensity = 2
|
||||
};
|
||||
|
||||
enum {
|
||||
MinKernelMass = 1000
|
||||
};
|
||||
|
||||
SearchWindow kernel;
|
||||
int searchMode;
|
||||
|
||||
private:
|
||||
enum
|
||||
{
|
||||
MaxMeanShiftIteration = 5,
|
||||
MaxSetSizeIteration = 5
|
||||
};
|
||||
|
||||
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
|
||||
|
||||
public:
|
||||
CvFuzzyMeanShiftTracker();
|
||||
~CvFuzzyMeanShiftTracker();
|
||||
|
||||
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
|
||||
};
|
||||
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class CV_EXPORTS Octree
|
||||
{
|
||||
public:
|
||||
struct Node
|
||||
{
|
||||
Node() {}
|
||||
int begin, end;
|
||||
float x_min, x_max, y_min, y_max, z_min, z_max;
|
||||
int maxLevels;
|
||||
bool isLeaf;
|
||||
int children[8];
|
||||
};
|
||||
|
||||
Octree();
|
||||
Octree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
|
||||
virtual ~Octree();
|
||||
|
||||
virtual void buildTree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
|
||||
virtual void getPointsWithinSphere( const Point3f& center, float radius,
|
||||
std::vector<Point3f>& points ) const;
|
||||
const std::vector<Node>& getNodes() const { return nodes; }
|
||||
private:
|
||||
int minPoints;
|
||||
std::vector<Point3f> points;
|
||||
std::vector<Node> nodes;
|
||||
|
||||
virtual void buildNext(size_t node_ind);
|
||||
};
|
||||
|
||||
|
||||
class CV_EXPORTS Mesh3D
|
||||
{
|
||||
public:
|
||||
struct EmptyMeshException {};
|
||||
|
||||
Mesh3D();
|
||||
Mesh3D(const std::vector<Point3f>& vtx);
|
||||
~Mesh3D();
|
||||
|
||||
void buildOctree();
|
||||
void clearOctree();
|
||||
float estimateResolution(float tryRatio = 0.1f);
|
||||
void computeNormals(float normalRadius, int minNeighbors = 20);
|
||||
void computeNormals(const std::vector<int>& subset, float normalRadius, int minNeighbors = 20);
|
||||
|
||||
void writeAsVrml(const std::string& file, const std::vector<Scalar>& colors = std::vector<Scalar>()) const;
|
||||
|
||||
std::vector<Point3f> vtx;
|
||||
std::vector<Point3f> normals;
|
||||
float resolution;
|
||||
Octree octree;
|
||||
|
||||
const static Point3f allzero;
|
||||
};
|
||||
|
||||
class CV_EXPORTS SpinImageModel
|
||||
{
|
||||
public:
|
||||
|
||||
/* model parameters, leave unset for default or auto estimate */
|
||||
float normalRadius;
|
||||
int minNeighbors;
|
||||
|
||||
float binSize;
|
||||
int imageWidth;
|
||||
|
||||
float lambda;
|
||||
float gamma;
|
||||
|
||||
float T_GeometriccConsistency;
|
||||
float T_GroupingCorespondances;
|
||||
|
||||
/* public interface */
|
||||
SpinImageModel();
|
||||
explicit SpinImageModel(const Mesh3D& mesh);
|
||||
~SpinImageModel();
|
||||
|
||||
void setLogger(std::ostream* log);
|
||||
void selectRandomSubset(float ratio);
|
||||
void setSubset(const std::vector<int>& subset);
|
||||
void compute();
|
||||
|
||||
void match(const SpinImageModel& scene, std::vector< std::vector<Vec2i> >& result);
|
||||
|
||||
Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const;
|
||||
|
||||
size_t getSpinCount() const { return spinImages.rows; }
|
||||
Mat getSpinImage(size_t index) const { return spinImages.row((int)index); }
|
||||
const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; }
|
||||
const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; }
|
||||
|
||||
const Mesh3D& getMesh() const { return mesh; }
|
||||
Mesh3D& getMesh() { return mesh; }
|
||||
|
||||
/* static utility functions */
|
||||
static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result);
|
||||
|
||||
static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal);
|
||||
|
||||
static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
|
||||
const Point3f& pointModel1, const Point3f& normalModel1,
|
||||
const Point3f& pointScene2, const Point3f& normalScene2,
|
||||
const Point3f& pointModel2, const Point3f& normalModel2);
|
||||
|
||||
static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
|
||||
const Point3f& pointModel1, const Point3f& normalModel1,
|
||||
const Point3f& pointScene2, const Point3f& normalScene2,
|
||||
const Point3f& pointModel2, const Point3f& normalModel2,
|
||||
float gamma);
|
||||
protected:
|
||||
void defaultParams();
|
||||
|
||||
void matchSpinToModel(const Mat& spin, std::vector<int>& indeces,
|
||||
std::vector<float>& corrCoeffs, bool useExtremeOutliers = true) const;
|
||||
|
||||
void repackSpinImages(const std::vector<uchar>& mask, Mat& spinImages, bool reAlloc = true) const;
|
||||
|
||||
std::vector<int> subset;
|
||||
Mesh3D mesh;
|
||||
Mat spinImages;
|
||||
std::ostream* out;
|
||||
};
|
||||
|
||||
class CV_EXPORTS TickMeter
|
||||
{
|
||||
public:
|
||||
TickMeter();
|
||||
void start();
|
||||
void stop();
|
||||
|
||||
int64 getTimeTicks() const;
|
||||
double getTimeMicro() const;
|
||||
double getTimeMilli() const;
|
||||
double getTimeSec() const;
|
||||
int64 getCounter() const;
|
||||
|
||||
void reset();
|
||||
private:
|
||||
int64 counter;
|
||||
int64 sumTime;
|
||||
int64 startTime;
|
||||
};
|
||||
|
||||
CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm);
|
||||
|
||||
class CV_EXPORTS SelfSimDescriptor
|
||||
{
|
||||
public:
|
||||
SelfSimDescriptor();
|
||||
SelfSimDescriptor(int _ssize, int _lsize,
|
||||
int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET,
|
||||
int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS,
|
||||
int _nangles=DEFAULT_NUM_ANGLES);
|
||||
SelfSimDescriptor(const SelfSimDescriptor& ss);
|
||||
virtual ~SelfSimDescriptor();
|
||||
SelfSimDescriptor& operator = (const SelfSimDescriptor& ss);
|
||||
|
||||
size_t getDescriptorSize() const;
|
||||
Size getGridSize( Size imgsize, Size winStride ) const;
|
||||
|
||||
virtual void compute(const Mat& img, std::vector<float>& descriptors, Size winStride=Size(),
|
||||
const std::vector<Point>& locations=std::vector<Point>()) const;
|
||||
virtual void computeLogPolarMapping(Mat& mappingMask) const;
|
||||
virtual void SSD(const Mat& img, Point pt, Mat& ssd) const;
|
||||
|
||||
int smallSize;
|
||||
int largeSize;
|
||||
int startDistanceBucket;
|
||||
int numberOfDistanceBuckets;
|
||||
int numberOfAngles;
|
||||
|
||||
enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41,
|
||||
DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3,
|
||||
DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
|
||||
};
|
||||
|
||||
|
||||
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
|
||||
|
||||
class CV_EXPORTS LevMarqSparse {
|
||||
public:
|
||||
LevMarqSparse();
|
||||
LevMarqSparse(int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (CV_CDECL * func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data, // user-specific data passed to the callbacks
|
||||
BundleAdjustCallback cb, void* user_data
|
||||
);
|
||||
|
||||
virtual ~LevMarqSparse();
|
||||
|
||||
virtual void run( int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (CV_CDECL * func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data // user-specific data passed to the callbacks
|
||||
);
|
||||
|
||||
virtual void clear();
|
||||
|
||||
// useful function to do simple bundle adjustment tasks
|
||||
static void bundleAdjust(std::vector<Point3d>& points, // positions of points in global coordinate system (input and output)
|
||||
const std::vector<std::vector<Point2d> >& imagePoints, // projections of 3d points for every camera
|
||||
const std::vector<std::vector<int> >& visibility, // visibility of 3d points for every camera
|
||||
std::vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
|
||||
std::vector<Mat>& R, // rotation matrices of all cameras (input and output)
|
||||
std::vector<Mat>& T, // translation vector of all cameras (input and output)
|
||||
std::vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
|
||||
const TermCriteria& criteria=
|
||||
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
|
||||
BundleAdjustCallback cb = 0, void* user_data = 0);
|
||||
|
||||
public:
|
||||
virtual void optimize(CvMat &_vis); //main function that runs minimization
|
||||
|
||||
//iteratively asks for measurement for visible camera-point pairs
|
||||
void ask_for_proj(CvMat &_vis,bool once=false);
|
||||
//iteratively asks for Jacobians for every camera_point pair
|
||||
void ask_for_projac(CvMat &_vis);
|
||||
|
||||
CvMat* err; //error X-hX
|
||||
double prevErrNorm, errNorm;
|
||||
double lambda;
|
||||
CvTermCriteria criteria;
|
||||
int iters;
|
||||
|
||||
CvMat** U; //size of array is equal to number of cameras
|
||||
CvMat** V; //size of array is equal to number of points
|
||||
CvMat** inv_V_star; //inverse of V*
|
||||
|
||||
CvMat** A;
|
||||
CvMat** B;
|
||||
CvMat** W;
|
||||
|
||||
CvMat* X; //measurement
|
||||
CvMat* hX; //current measurement extimation given new parameter vector
|
||||
|
||||
CvMat* prevP; //current already accepted parameter.
|
||||
CvMat* P; // parameters used to evaluate function with new params
|
||||
// this parameters may be rejected
|
||||
|
||||
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
|
||||
|
||||
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
|
||||
// length of array is j = number of cameras
|
||||
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
|
||||
// length of array is i = number of points
|
||||
|
||||
CvMat** Yj; //length of array is i = num_points
|
||||
|
||||
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
|
||||
|
||||
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
|
||||
|
||||
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
|
||||
|
||||
int num_cams;
|
||||
int num_points;
|
||||
int num_err_param;
|
||||
int num_cam_param;
|
||||
int num_point_param;
|
||||
|
||||
//target function and jacobian pointers, which needs to be initialized
|
||||
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
|
||||
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
|
||||
|
||||
void* data;
|
||||
|
||||
BundleAdjustCallback cb;
|
||||
void* user_data;
|
||||
};
|
||||
|
||||
CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ,
|
||||
CV_OUT std::vector<std::vector<Point> >& results, CV_OUT std::vector<float>& cost,
|
||||
double templScale=1, int maxMatches = 20,
|
||||
double minMatchDistance = 1.0, int padX = 3,
|
||||
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
|
||||
double orientationWeight = 0.5, double truncate = 20);
|
||||
|
||||
|
||||
class CV_EXPORTS_W StereoVar
|
||||
{
|
||||
public:
|
||||
// Flags
|
||||
enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16};
|
||||
enum {CYCLE_O, CYCLE_V};
|
||||
enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
|
||||
|
||||
//! the default constructor
|
||||
CV_WRAP StereoVar();
|
||||
|
||||
//! the full constructor taking all the necessary algorithm parameters
|
||||
CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
|
||||
|
||||
//! the destructor
|
||||
virtual ~StereoVar();
|
||||
|
||||
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
|
||||
CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp);
|
||||
|
||||
CV_PROP_RW int levels;
|
||||
CV_PROP_RW double pyrScale;
|
||||
CV_PROP_RW int nIt;
|
||||
CV_PROP_RW int minDisp;
|
||||
CV_PROP_RW int maxDisp;
|
||||
CV_PROP_RW int poly_n;
|
||||
CV_PROP_RW double poly_sigma;
|
||||
CV_PROP_RW float fi;
|
||||
CV_PROP_RW float lambda;
|
||||
CV_PROP_RW int penalization;
|
||||
CV_PROP_RW int cycle;
|
||||
CV_PROP_RW int flags;
|
||||
|
||||
private:
|
||||
void autoParams();
|
||||
void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
|
||||
void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
|
||||
void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
|
||||
};
|
||||
|
||||
CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
|
||||
|
||||
class CV_EXPORTS Directory
|
||||
{
|
||||
public:
|
||||
static std::vector<std::string> GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true );
|
||||
static std::vector<std::string> GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true );
|
||||
static std::vector<std::string> GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true );
|
||||
};
|
||||
|
||||
/*
|
||||
* Generation of a set of different colors by the following way:
|
||||
* 1) generate more then need colors (in "factor" times) in RGB,
|
||||
* 2) convert them to Lab,
|
||||
* 3) choose the needed count of colors from the set that are more different from
|
||||
* each other,
|
||||
* 4) convert the colors back to RGB
|
||||
*/
|
||||
CV_EXPORTS void generateColors( std::vector<Scalar>& colors, size_t count, size_t factor=100 );
|
||||
|
||||
|
||||
/*
|
||||
* Estimate the rigid body motion from frame0 to frame1. The method is based on the paper
|
||||
* "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
|
||||
*/
|
||||
enum { ROTATION = 1,
|
||||
TRANSLATION = 2,
|
||||
RIGID_BODY_MOTION = 4
|
||||
};
|
||||
CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt,
|
||||
const Mat& image0, const Mat& depth0, const Mat& mask0,
|
||||
const Mat& image1, const Mat& depth1, const Mat& mask1,
|
||||
const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f,
|
||||
const std::vector<int>& iterCounts=std::vector<int>(),
|
||||
const std::vector<float>& minGradientMagnitudes=std::vector<float>(),
|
||||
int transformType=RIGID_BODY_MOTION );
|
||||
|
||||
/**
|
||||
*Bilinear interpolation technique.
|
||||
*
|
||||
*The value of a desired cortical pixel is obtained through a bilinear interpolation of the values
|
||||
*of the four nearest neighbouring Cartesian pixels to the center of the RF.
|
||||
*The same principle is applied to the inverse transformation.
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Interp
|
||||
{
|
||||
public:
|
||||
|
||||
LogPolar_Interp() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0,
|
||||
int interp=INTER_LINEAR, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Interp();
|
||||
|
||||
protected:
|
||||
|
||||
Mat Rsri;
|
||||
Mat Csri;
|
||||
|
||||
int S, R, M, N;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
int interp;
|
||||
|
||||
Mat ETAyx;
|
||||
Mat CSIyx;
|
||||
|
||||
void create_map(int M, int N, int R, int S, double ro0);
|
||||
};
|
||||
|
||||
/**
|
||||
*Overlapping circular receptive fields technique
|
||||
*
|
||||
*The Cartesian plane is divided in two regions: the fovea and the periphery.
|
||||
*The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in
|
||||
*the periphery we use the overlapping Gaussian circular RFs.
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Overlapping
|
||||
{
|
||||
public:
|
||||
LogPolar_Overlapping() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Overlapping(int w, int h, Point2i center, int R=70,
|
||||
double ro0=3.0, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Overlapping();
|
||||
|
||||
protected:
|
||||
|
||||
Mat Rsri;
|
||||
Mat Csri;
|
||||
std::vector<int> Rsr;
|
||||
std::vector<int> Csr;
|
||||
std::vector<double> Wsr;
|
||||
|
||||
int S, R, M, N, ind1;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
|
||||
struct kernel
|
||||
{
|
||||
kernel() { w = 0; }
|
||||
std::vector<double> weights;
|
||||
int w;
|
||||
};
|
||||
|
||||
Mat ETAyx;
|
||||
Mat CSIyx;
|
||||
std::vector<kernel> w_ker_2D;
|
||||
|
||||
void create_map(int M, int N, int R, int S, double ro0);
|
||||
};
|
||||
|
||||
/**
|
||||
* Adjacent receptive fields technique
|
||||
*
|
||||
*All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF.
|
||||
*The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF.
|
||||
*This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements
|
||||
*to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Adjacent
|
||||
{
|
||||
public:
|
||||
LogPolar_Adjacent() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param smin the size of the subpixel (default value 0.25 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Adjacent();
|
||||
|
||||
protected:
|
||||
struct pixel
|
||||
{
|
||||
pixel() { u = v = 0; a = 0.; }
|
||||
int u;
|
||||
int v;
|
||||
double a;
|
||||
};
|
||||
int S, R, M, N;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
std::vector<std::vector<pixel> > L;
|
||||
std::vector<double> A;
|
||||
|
||||
void subdivide_recursively(double x, double y, int i, int j, double length, double smin);
|
||||
bool get_uv(double x, double y, int&u, int&v);
|
||||
void create_map(int M, int N, int R, int S, double ro0, double smin);
|
||||
};
|
||||
|
||||
CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
|
||||
CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
|
||||
|
||||
class CV_EXPORTS LDA
|
||||
{
|
||||
public:
|
||||
// Initializes a LDA with num_components (default 0) and specifies how
|
||||
// samples are aligned (default dataAsRow=true).
|
||||
LDA(int num_components = 0) :
|
||||
_num_components(num_components) {};
|
||||
|
||||
// Initializes and performs a Discriminant Analysis with Fisher's
|
||||
// Optimization Criterion on given data in src and corresponding labels
|
||||
// in labels. If 0 (or less) number of components are given, they are
|
||||
// automatically determined for given data in computation.
|
||||
LDA(InputArrayOfArrays src, InputArray labels,
|
||||
int num_components = 0) :
|
||||
_num_components(num_components)
|
||||
{
|
||||
this->compute(src, labels); //! compute eigenvectors and eigenvalues
|
||||
}
|
||||
|
||||
// Serializes this object to a given filename.
|
||||
void save(const std::string& filename) const;
|
||||
|
||||
// Deserializes this object from a given filename.
|
||||
void load(const std::string& filename);
|
||||
|
||||
// Serializes this object to a given cv::FileStorage.
|
||||
void save(FileStorage& fs) const;
|
||||
|
||||
// Deserializes this object from a given cv::FileStorage.
|
||||
void load(const FileStorage& node);
|
||||
|
||||
// Destructor.
|
||||
~LDA() {}
|
||||
|
||||
//! Compute the discriminants for data in src and labels.
|
||||
void compute(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Projects samples into the LDA subspace.
|
||||
Mat project(InputArray src);
|
||||
|
||||
// Reconstructs projections from the LDA subspace.
|
||||
Mat reconstruct(InputArray src);
|
||||
|
||||
// Returns the eigenvectors of this LDA.
|
||||
Mat eigenvectors() const { return _eigenvectors; };
|
||||
|
||||
// Returns the eigenvalues of this LDA.
|
||||
Mat eigenvalues() const { return _eigenvalues; }
|
||||
|
||||
protected:
|
||||
bool _dataAsRow;
|
||||
int _num_components;
|
||||
Mat _eigenvectors;
|
||||
Mat _eigenvalues;
|
||||
|
||||
void lda(InputArrayOfArrays src, InputArray labels);
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W FaceRecognizer : public Algorithm
|
||||
{
|
||||
public:
|
||||
//! virtual destructor
|
||||
virtual ~FaceRecognizer() {}
|
||||
|
||||
// Trains a FaceRecognizer.
|
||||
CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;
|
||||
|
||||
// Updates a FaceRecognizer.
|
||||
CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Gets a prediction from a FaceRecognizer.
|
||||
virtual int predict(InputArray src) const = 0;
|
||||
|
||||
// Predicts the label and confidence for a given sample.
|
||||
CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0;
|
||||
|
||||
// Serializes this object to a given filename.
|
||||
CV_WRAP virtual void save(const std::string& filename) const;
|
||||
|
||||
// Deserializes this object from a given filename.
|
||||
CV_WRAP virtual void load(const std::string& filename);
|
||||
|
||||
// Serializes this object to a given cv::FileStorage.
|
||||
virtual void save(FileStorage& fs) const = 0;
|
||||
|
||||
// Deserializes this object from a given cv::FileStorage.
|
||||
virtual void load(const FileStorage& fs) = 0;
|
||||
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
|
||||
int grid_x=8, int grid_y=8, double threshold = DBL_MAX);
|
||||
|
||||
enum
|
||||
{
|
||||
COLORMAP_AUTUMN = 0,
|
||||
COLORMAP_BONE = 1,
|
||||
COLORMAP_JET = 2,
|
||||
COLORMAP_WINTER = 3,
|
||||
COLORMAP_RAINBOW = 4,
|
||||
COLORMAP_OCEAN = 5,
|
||||
COLORMAP_SUMMER = 6,
|
||||
COLORMAP_SPRING = 7,
|
||||
COLORMAP_COOL = 8,
|
||||
COLORMAP_HSV = 9,
|
||||
COLORMAP_PINK = 10,
|
||||
COLORMAP_HOT = 11
|
||||
};
|
||||
|
||||
CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);
|
||||
|
||||
CV_EXPORTS bool initModule_contrib();
|
||||
}
|
||||
|
||||
#include "opencv2/contrib/retina.hpp"
|
||||
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __OPENCV_BUILD
|
||||
#error this is a compatibility header which should not be used inside the OpenCV library
|
||||
#endif
|
||||
|
||||
#include "opencv2/contrib.hpp"
|
@ -2,8 +2,8 @@
|
||||
|
||||
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/objdetect/objdetect.hpp>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/objdetect.hpp>
|
||||
|
||||
#include <vector>
|
||||
|
||||
|
@ -43,12 +43,11 @@
|
||||
#ifndef __OPENCV_HYBRIDTRACKER_H_
|
||||
#define __OPENCV_HYBRIDTRACKER_H_
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core/operations.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/ml/ml.hpp"
|
||||
#include "opencv2/ml.hpp"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
|
@ -52,8 +52,8 @@
|
||||
#ifndef __OPENCV_OPENFABMAP_H_
|
||||
#define __OPENCV_OPENFABMAP_H_
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
|
||||
#include <vector>
|
||||
#include <list>
|
||||
|
@ -72,7 +72,7 @@
|
||||
* Author: Alexandre Benoit
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
|
||||
#include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
|
||||
#include <valarray>
|
||||
|
||||
namespace cv
|
||||
|
@ -40,7 +40,7 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
|
@ -46,7 +46,7 @@
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
# include "opencv2/highgui/highgui.hpp"
|
||||
# include "opencv2/highgui.hpp"
|
||||
#endif
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
|
@ -42,7 +42,7 @@
|
||||
#include "precomp.hpp"
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/contrib/hybridtracker.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
@ -39,7 +39,6 @@
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
|
||||
#ifdef WIN32
|
||||
#include <windows.h>
|
||||
|
@ -47,10 +47,10 @@
|
||||
#include "cvconfig.h"
|
||||
#endif
|
||||
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
|
@ -44,11 +44,11 @@
|
||||
|
||||
#define SHOW_DEBUG_IMAGES 0
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
|
||||
#if SHOW_DEBUG_IMAGES
|
||||
# include "opencv2/highgui/highgui.hpp"
|
||||
# include "opencv2/highgui.hpp"
|
||||
#endif
|
||||
|
||||
#include <iostream>
|
||||
|
@ -9,8 +9,8 @@
|
||||
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||
|
||||
#include "opencv2/ts/ts.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include <iostream>
|
||||
|
||||
#endif
|
||||
|
@ -2425,7 +2425,7 @@ The class provides the following features for all derived classes:
|
||||
Here is example of SIFT use in your application via Algorithm interface: ::
|
||||
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
#include "opencv2/nonfree.hpp"
|
||||
|
||||
...
|
||||
|
||||
|
@ -30,14 +30,14 @@ All the OpenCV classes and functions are placed into the ``cv`` namespace. There
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
...
|
||||
cv::Mat H = cv::findHomography(points1, points2, CV_RANSAC, 5);
|
||||
...
|
||||
|
||||
or ::
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
using namespace cv;
|
||||
...
|
||||
Mat H = findHomography(points1, points2, CV_RANSAC, 5 );
|
||||
|
4726
modules/core/include/opencv2/core.hpp
Normal file
4726
modules/core/include/opencv2/core.hpp
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -46,7 +46,7 @@
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
|
||||
#if defined _MSC_VER && _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4714 ) //__forceinline is not inlined
|
||||
|
@ -45,7 +45,7 @@
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/cuda_devptrs.hpp"
|
||||
|
||||
namespace cv { namespace gpu
|
||||
|
@ -45,7 +45,7 @@
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
|
||||
namespace cv { namespace ogl {
|
||||
|
@ -9,7 +9,7 @@
|
||||
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||
|
||||
#include "opencv2/ts/ts.hpp"
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#ifdef GTEST_CREATE_SHARED_LIBRARY
|
||||
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include "cvconfig.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "gl_core_3_1.hpp"
|
||||
|
||||
#ifdef HAVE_OPENGL
|
||||
|
@ -42,7 +42,7 @@
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/core/gpumat.hpp"
|
||||
#include "opencv2/core/opengl_interop.hpp"
|
||||
#include "opencv2/core/opengl.hpp"
|
||||
|
||||
/****************************************************************************************\
|
||||
* [scaled] Identity matrix initialization *
|
||||
|
@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/core/opengl_interop.hpp"
|
||||
#include "opencv2/core/opengl.hpp"
|
||||
#include "opencv2/core/gpumat.hpp"
|
||||
|
||||
#ifdef HAVE_OPENGL
|
||||
|
@ -47,7 +47,7 @@
|
||||
#include "cvconfig.h"
|
||||
#endif
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/core/internal.hpp"
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user