Merge pull request #803 from taka-no-me:split_c_cpp3
This commit is contained in:
commit
b0933dd473
@ -1,4 +1,4 @@
|
|||||||
SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_photo opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy)
|
SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_photo opencv_ml opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy)
|
||||||
ocv_check_dependencies(${OPENCV_HAARTRAINING_DEPS})
|
ocv_check_dependencies(${OPENCV_HAARTRAINING_DEPS})
|
||||||
|
|
||||||
if(NOT OCV_DEPENDENCIES_FOUND)
|
if(NOT OCV_DEPENDENCIES_FOUND)
|
||||||
|
@ -112,7 +112,9 @@ CV_INLINE float cvLogRatio( float val )
|
|||||||
/* each trainData matrix row is a sample */
|
/* each trainData matrix row is a sample */
|
||||||
#define CV_ROW_SAMPLE 1
|
#define CV_ROW_SAMPLE 1
|
||||||
|
|
||||||
#define CV_IS_ROW_SAMPLE( flags ) ( ( flags ) & CV_ROW_SAMPLE )
|
#ifndef CV_IS_ROW_SAMPLE
|
||||||
|
# define CV_IS_ROW_SAMPLE( flags ) ( ( flags ) & CV_ROW_SAMPLE )
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Classifier supports tune function */
|
/* Classifier supports tune function */
|
||||||
#define CV_TUNABLE (1 << 1)
|
#define CV_TUNABLE (1 << 1)
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "opencv2/core.hpp"
|
#include "opencv2/core.hpp"
|
||||||
|
#include "opencv2/imgproc.hpp"
|
||||||
|
|
||||||
#include "HOGfeatures.h"
|
#include "HOGfeatures.h"
|
||||||
#include "cascadeclassifier.h"
|
#include "cascadeclassifier.h"
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "opencv2/core.hpp"
|
#include "opencv2/core.hpp"
|
||||||
|
#include "opencv2/imgproc.hpp"
|
||||||
|
|
||||||
#include "haarfeatures.h"
|
#include "haarfeatures.h"
|
||||||
#include "cascadeclassifier.h"
|
#include "cascadeclassifier.h"
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "opencv2/core.hpp"
|
#include "opencv2/core.hpp"
|
||||||
|
#include "opencv2/imgproc.hpp"
|
||||||
|
|
||||||
#include "lbpfeatures.h"
|
#include "lbpfeatures.h"
|
||||||
#include "cascadeclassifier.h"
|
#include "cascadeclassifier.h"
|
||||||
|
@ -13,6 +13,13 @@ else(APPLE)
|
|||||||
DOC "OpenCL root directory"
|
DOC "OpenCL root directory"
|
||||||
NO_DEFAULT_PATH)
|
NO_DEFAULT_PATH)
|
||||||
|
|
||||||
|
find_path(OPENCL_INCLUDE_DIR
|
||||||
|
NAMES OpenCL/cl.h CL/cl.h
|
||||||
|
HINTS ${OPENCL_ROOT_DIR}
|
||||||
|
PATH_SUFFIXES include include/nvidia-current
|
||||||
|
DOC "OpenCL include directory"
|
||||||
|
NO_DEFAULT_PATH)
|
||||||
|
|
||||||
find_path(OPENCL_INCLUDE_DIR
|
find_path(OPENCL_INCLUDE_DIR
|
||||||
NAMES OpenCL/cl.h CL/cl.h
|
NAMES OpenCL/cl.h CL/cl.h
|
||||||
HINTS ${OPENCL_ROOT_DIR}
|
HINTS ${OPENCL_ROOT_DIR}
|
||||||
@ -25,6 +32,13 @@ else(APPLE)
|
|||||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib/Win32 lib/x86)
|
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib/Win32 lib/x86)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
find_library(OPENCL_LIBRARY
|
||||||
|
NAMES OpenCL
|
||||||
|
HINTS ${OPENCL_ROOT_DIR}
|
||||||
|
PATH_SUFFIXES ${OPENCL_POSSIBLE_LIB_SUFFIXES}
|
||||||
|
DOC "OpenCL library"
|
||||||
|
NO_DEFAULT_PATH)
|
||||||
|
|
||||||
find_library(OPENCL_LIBRARY
|
find_library(OPENCL_LIBRARY
|
||||||
NAMES OpenCL
|
NAMES OpenCL
|
||||||
HINTS ${OPENCL_ROOT_DIR}
|
HINTS ${OPENCL_ROOT_DIR}
|
||||||
|
@ -63,12 +63,9 @@
|
|||||||
#include "opencv2/core/core_c.h"
|
#include "opencv2/core/core_c.h"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
#include "opencv2/photo/photo_c.h"
|
#include "opencv2/photo/photo_c.h"
|
||||||
|
#include "opencv2/video/tracking_c.h"
|
||||||
#include "opencv2/video.hpp"
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
#include "opencv2/features2d.hpp"
|
#include "opencv2/legacy.hpp"
|
||||||
#include "opencv2/flann.hpp"
|
|
||||||
#include "opencv2/calib3d.hpp"
|
|
||||||
#include "opencv2/objdetect.hpp"
|
|
||||||
#include "opencv2/legacy/compat.hpp"
|
#include "opencv2/legacy/compat.hpp"
|
||||||
|
|
||||||
#if !defined(CV_IMPL)
|
#if !defined(CV_IMPL)
|
||||||
|
@ -51,6 +51,10 @@
|
|||||||
#include "opencv2/core.hpp"
|
#include "opencv2/core.hpp"
|
||||||
#include "opencv2/imgproc.hpp"
|
#include "opencv2/imgproc.hpp"
|
||||||
#include "opencv2/photo.hpp"
|
#include "opencv2/photo.hpp"
|
||||||
|
#include "opencv2/video.hpp"
|
||||||
#include "opencv2/highgui.hpp"
|
#include "opencv2/highgui.hpp"
|
||||||
|
#include "opencv2/features2d.hpp"
|
||||||
|
#include "opencv2/calib3d.hpp"
|
||||||
|
#include "opencv2/objdetect.hpp"
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -49,14 +49,12 @@
|
|||||||
#include "opencv2/core/core_c.h"
|
#include "opencv2/core/core_c.h"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
#include "opencv2/photo/photo_c.h"
|
#include "opencv2/photo/photo_c.h"
|
||||||
|
#include "opencv2/video/tracking_c.h"
|
||||||
#include "opencv2/video.hpp"
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
#include "opencv2/features2d.hpp"
|
|
||||||
#include "opencv2/calib3d.hpp"
|
|
||||||
#include "opencv2/objdetect.hpp"
|
|
||||||
#include "opencv2/legacy.hpp"
|
#include "opencv2/legacy.hpp"
|
||||||
#include "opencv2/legacy/compat.hpp"
|
#include "opencv2/legacy/compat.hpp"
|
||||||
#include "opencv2/legacy/blobtrack.hpp"
|
#include "opencv2/legacy/blobtrack.hpp"
|
||||||
|
|
||||||
#include "opencv2/contrib.hpp"
|
#include "opencv2/contrib.hpp"
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -115,7 +115,7 @@ calibrateCamera
|
|||||||
---------------
|
---------------
|
||||||
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
||||||
|
|
||||||
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) )
|
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) )
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
|
.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
|
||||||
|
|
||||||
@ -454,7 +454,7 @@ findChessboardCorners
|
|||||||
-------------------------
|
-------------------------
|
||||||
Finds the positions of internal corners of the chessboard.
|
Finds the positions of internal corners of the chessboard.
|
||||||
|
|
||||||
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE )
|
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE )
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners
|
.. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners
|
||||||
|
|
||||||
@ -515,7 +515,7 @@ Finds centers in the grid of circles.
|
|||||||
|
|
||||||
.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector() )
|
.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector() )
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.findCirclesGridDefault(image, patternSize[, centers[, flags]]) -> retval, centers
|
.. ocv:pyfunction:: cv2.findCirclesGrid(image, patternSize[, centers[, flags[, blobDetector]]]) -> retval, centers
|
||||||
|
|
||||||
:param image: grid view of input circles; it must be an 8-bit grayscale or color image.
|
:param image: grid view of input circles; it must be an 8-bit grayscale or color image.
|
||||||
|
|
||||||
@ -694,7 +694,7 @@ findEssentialMat
|
|||||||
------------------
|
------------------
|
||||||
Calculates an essential matrix from the corresponding points in two images.
|
Calculates an essential matrix from the corresponding points in two images.
|
||||||
|
|
||||||
.. ocv:function:: Mat findEssentialMat( InputArray points1, InputArray points2, double focal=1.0, Point2d pp=Point2d(0, 0), int method=CV_RANSAC, double prob=0.999, double threshold=1.0, OutputArray mask=noArray() )
|
.. ocv:function:: Mat findEssentialMat( InputArray points1, InputArray points2, double focal=1.0, Point2d pp=Point2d(0, 0), int method=RANSAC, double prob=0.999, double threshold=1.0, OutputArray mask=noArray() )
|
||||||
|
|
||||||
:param points1: Array of ``N`` ``(N >= 5)`` 2D points from the first image. The point coordinates should be floating-point (single or double precision).
|
:param points1: Array of ``N`` ``(N >= 5)`` 2D points from the first image. The point coordinates should be floating-point (single or double precision).
|
||||||
|
|
||||||
@ -975,7 +975,7 @@ initCameraMatrix2D
|
|||||||
----------------------
|
----------------------
|
||||||
Finds an initial camera matrix from 3D-2D point correspondences.
|
Finds an initial camera matrix from 3D-2D point correspondences.
|
||||||
|
|
||||||
.. ocv:function:: Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, double aspectRatio=1.)
|
.. ocv:function:: Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, double aspectRatio=1.0 )
|
||||||
|
|
||||||
.. ocv:pyfunction:: cv2.initCameraMatrix2D(objectPoints, imagePoints, imageSize[, aspectRatio]) -> retval
|
.. ocv:pyfunction:: cv2.initCameraMatrix2D(objectPoints, imagePoints, imageSize[, aspectRatio]) -> retval
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
// copy or use the software.
|
// copy or use the software.
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
// License Agreement
|
// License Agreement
|
||||||
// For Open Source Computer Vision Library
|
// For Open Source Computer Vision Library
|
||||||
//
|
//
|
||||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
@ -44,562 +44,184 @@
|
|||||||
#ifndef __OPENCV_CALIB3D_HPP__
|
#ifndef __OPENCV_CALIB3D_HPP__
|
||||||
#define __OPENCV_CALIB3D_HPP__
|
#define __OPENCV_CALIB3D_HPP__
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#include "opencv2/core.hpp"
|
||||||
# include "opencv2/core.hpp"
|
|
||||||
#endif
|
|
||||||
#include "opencv2/core/core_c.h"
|
|
||||||
#include "opencv2/features2d.hpp"
|
#include "opencv2/features2d.hpp"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* Camera Calibration, Pose Estimation and Stereo *
|
|
||||||
\****************************************************************************************/
|
|
||||||
|
|
||||||
typedef struct CvPOSITObject CvPOSITObject;
|
|
||||||
|
|
||||||
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
|
|
||||||
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
|
|
||||||
|
|
||||||
|
|
||||||
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
|
|
||||||
an object given its model and projection in a weak-perspective case */
|
|
||||||
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
|
|
||||||
double focal_length, CvTermCriteria criteria,
|
|
||||||
float* rotation_matrix, float* translation_vector);
|
|
||||||
|
|
||||||
/* Releases CvPOSITObject structure */
|
|
||||||
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
|
|
||||||
|
|
||||||
/* updates the number of RANSAC iterations */
|
|
||||||
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
|
|
||||||
int model_points, int max_iters );
|
|
||||||
|
|
||||||
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
|
|
||||||
|
|
||||||
/* Calculates fundamental matrix given a set of corresponding points */
|
|
||||||
#define CV_FM_7POINT 1
|
|
||||||
#define CV_FM_8POINT 2
|
|
||||||
|
|
||||||
#define CV_LMEDS 4
|
|
||||||
#define CV_RANSAC 8
|
|
||||||
|
|
||||||
#define CV_FM_LMEDS_ONLY CV_LMEDS
|
|
||||||
#define CV_FM_RANSAC_ONLY CV_RANSAC
|
|
||||||
#define CV_FM_LMEDS CV_LMEDS
|
|
||||||
#define CV_FM_RANSAC CV_RANSAC
|
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
CV_ITERATIVE = 0,
|
|
||||||
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
|
|
||||||
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
|
|
||||||
};
|
|
||||||
|
|
||||||
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
|
|
||||||
CvMat* fundamental_matrix,
|
|
||||||
int method CV_DEFAULT(CV_FM_RANSAC),
|
|
||||||
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
|
|
||||||
CvMat* status CV_DEFAULT(NULL) );
|
|
||||||
|
|
||||||
/* For each input point on one of images
|
|
||||||
computes parameters of the corresponding
|
|
||||||
epipolar line on the other image */
|
|
||||||
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
|
|
||||||
int which_image,
|
|
||||||
const CvMat* fundamental_matrix,
|
|
||||||
CvMat* correspondent_lines );
|
|
||||||
|
|
||||||
/* Triangulation functions */
|
|
||||||
|
|
||||||
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
|
|
||||||
CvMat* projPoints1, CvMat* projPoints2,
|
|
||||||
CvMat* points4D);
|
|
||||||
|
|
||||||
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
|
|
||||||
CvMat* new_points1, CvMat* new_points2);
|
|
||||||
|
|
||||||
|
|
||||||
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
|
|
||||||
alpha=0 - only valid pixels will be retained in the undistorted image
|
|
||||||
alpha=1 - all the source image pixels will be retained in the undistorted image
|
|
||||||
*/
|
|
||||||
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
|
|
||||||
const CvMat* dist_coeffs,
|
|
||||||
CvSize image_size, double alpha,
|
|
||||||
CvMat* new_camera_matrix,
|
|
||||||
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
|
|
||||||
CvRect* valid_pixel_ROI CV_DEFAULT(0),
|
|
||||||
int center_principal_point CV_DEFAULT(0));
|
|
||||||
|
|
||||||
/* Converts rotation vector to rotation matrix or vice versa */
|
|
||||||
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
|
|
||||||
CvMat* jacobian CV_DEFAULT(0) );
|
|
||||||
|
|
||||||
/* Finds perspective transformation between the object plane and image (view) plane */
|
|
||||||
CVAPI(int) cvFindHomography( const CvMat* src_points,
|
|
||||||
const CvMat* dst_points,
|
|
||||||
CvMat* homography,
|
|
||||||
int method CV_DEFAULT(0),
|
|
||||||
double ransacReprojThreshold CV_DEFAULT(3),
|
|
||||||
CvMat* mask CV_DEFAULT(0));
|
|
||||||
|
|
||||||
/* Computes RQ decomposition for 3x3 matrices */
|
|
||||||
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
|
|
||||||
CvMat *matrixQx CV_DEFAULT(NULL),
|
|
||||||
CvMat *matrixQy CV_DEFAULT(NULL),
|
|
||||||
CvMat *matrixQz CV_DEFAULT(NULL),
|
|
||||||
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
|
||||||
|
|
||||||
/* Computes projection matrix decomposition */
|
|
||||||
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
|
|
||||||
CvMat *rotMatr, CvMat *posVect,
|
|
||||||
CvMat *rotMatrX CV_DEFAULT(NULL),
|
|
||||||
CvMat *rotMatrY CV_DEFAULT(NULL),
|
|
||||||
CvMat *rotMatrZ CV_DEFAULT(NULL),
|
|
||||||
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
|
||||||
|
|
||||||
/* Computes d(AB)/dA and d(AB)/dB */
|
|
||||||
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
|
|
||||||
|
|
||||||
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
|
|
||||||
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
|
|
||||||
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
|
|
||||||
const CvMat* _rvec2, const CvMat* _tvec2,
|
|
||||||
CvMat* _rvec3, CvMat* _tvec3,
|
|
||||||
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
|
|
||||||
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
|
|
||||||
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
|
|
||||||
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
|
|
||||||
|
|
||||||
/* Projects object points to the view plane using
|
|
||||||
the specified extrinsic and intrinsic camera parameters */
|
|
||||||
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
|
|
||||||
const CvMat* translation_vector, const CvMat* camera_matrix,
|
|
||||||
const CvMat* distortion_coeffs, CvMat* image_points,
|
|
||||||
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
|
|
||||||
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
|
|
||||||
CvMat* dpddist CV_DEFAULT(NULL),
|
|
||||||
double aspect_ratio CV_DEFAULT(0));
|
|
||||||
|
|
||||||
/* Finds extrinsic camera parameters from
|
|
||||||
a few known corresponding point pairs and intrinsic parameters */
|
|
||||||
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
|
|
||||||
const CvMat* image_points,
|
|
||||||
const CvMat* camera_matrix,
|
|
||||||
const CvMat* distortion_coeffs,
|
|
||||||
CvMat* rotation_vector,
|
|
||||||
CvMat* translation_vector,
|
|
||||||
int use_extrinsic_guess CV_DEFAULT(0) );
|
|
||||||
|
|
||||||
/* Computes initial estimate of the intrinsic camera parameters
|
|
||||||
in case of planar calibration target (e.g. chessboard) */
|
|
||||||
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
|
|
||||||
const CvMat* image_points,
|
|
||||||
const CvMat* npoints, CvSize image_size,
|
|
||||||
CvMat* camera_matrix,
|
|
||||||
double aspect_ratio CV_DEFAULT(1.) );
|
|
||||||
|
|
||||||
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
|
|
||||||
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
|
|
||||||
#define CV_CALIB_CB_FILTER_QUADS 4
|
|
||||||
#define CV_CALIB_CB_FAST_CHECK 8
|
|
||||||
|
|
||||||
// Performs a fast check if a chessboard is in the input image. This is a workaround to
|
|
||||||
// a problem of cvFindChessboardCorners being slow on images with no chessboard
|
|
||||||
// - src: input image
|
|
||||||
// - size: chessboard size
|
|
||||||
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
|
|
||||||
// 0 if there is no chessboard, -1 in case of error
|
|
||||||
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
|
|
||||||
|
|
||||||
/* Detects corners on a chessboard calibration pattern */
|
|
||||||
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
|
|
||||||
CvPoint2D32f* corners,
|
|
||||||
int* corner_count CV_DEFAULT(NULL),
|
|
||||||
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
|
|
||||||
|
|
||||||
/* Draws individual chessboard corners or the whole chessboard detected */
|
|
||||||
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
|
|
||||||
CvPoint2D32f* corners,
|
|
||||||
int count, int pattern_was_found );
|
|
||||||
|
|
||||||
#define CV_CALIB_USE_INTRINSIC_GUESS 1
|
|
||||||
#define CV_CALIB_FIX_ASPECT_RATIO 2
|
|
||||||
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
|
|
||||||
#define CV_CALIB_ZERO_TANGENT_DIST 8
|
|
||||||
#define CV_CALIB_FIX_FOCAL_LENGTH 16
|
|
||||||
#define CV_CALIB_FIX_K1 32
|
|
||||||
#define CV_CALIB_FIX_K2 64
|
|
||||||
#define CV_CALIB_FIX_K3 128
|
|
||||||
#define CV_CALIB_FIX_K4 2048
|
|
||||||
#define CV_CALIB_FIX_K5 4096
|
|
||||||
#define CV_CALIB_FIX_K6 8192
|
|
||||||
#define CV_CALIB_RATIONAL_MODEL 16384
|
|
||||||
#define CV_CALIB_THIN_PRISM_MODEL 32768
|
|
||||||
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
|
|
||||||
|
|
||||||
|
|
||||||
/* Finds intrinsic and extrinsic camera parameters
|
|
||||||
from a few views of known calibration pattern */
|
|
||||||
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
|
|
||||||
const CvMat* image_points,
|
|
||||||
const CvMat* point_counts,
|
|
||||||
CvSize image_size,
|
|
||||||
CvMat* camera_matrix,
|
|
||||||
CvMat* distortion_coeffs,
|
|
||||||
CvMat* rotation_vectors CV_DEFAULT(NULL),
|
|
||||||
CvMat* translation_vectors CV_DEFAULT(NULL),
|
|
||||||
int flags CV_DEFAULT(0),
|
|
||||||
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
|
||||||
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
|
|
||||||
|
|
||||||
/* Computes various useful characteristics of the camera from the data computed by
|
|
||||||
cvCalibrateCamera2 */
|
|
||||||
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
|
|
||||||
CvSize image_size,
|
|
||||||
double aperture_width CV_DEFAULT(0),
|
|
||||||
double aperture_height CV_DEFAULT(0),
|
|
||||||
double *fovx CV_DEFAULT(NULL),
|
|
||||||
double *fovy CV_DEFAULT(NULL),
|
|
||||||
double *focal_length CV_DEFAULT(NULL),
|
|
||||||
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
|
|
||||||
double *pixel_aspect_ratio CV_DEFAULT(NULL));
|
|
||||||
|
|
||||||
#define CV_CALIB_FIX_INTRINSIC 256
|
|
||||||
#define CV_CALIB_SAME_FOCAL_LENGTH 512
|
|
||||||
|
|
||||||
/* Computes the transformation from one camera coordinate system to another one
|
|
||||||
from a few correspondent views of the same calibration target. Optionally, calibrates
|
|
||||||
both cameras */
|
|
||||||
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
|
|
||||||
const CvMat* image_points2, const CvMat* npoints,
|
|
||||||
CvMat* camera_matrix1, CvMat* dist_coeffs1,
|
|
||||||
CvMat* camera_matrix2, CvMat* dist_coeffs2,
|
|
||||||
CvSize image_size, CvMat* R, CvMat* T,
|
|
||||||
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
|
|
||||||
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
|
||||||
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
|
|
||||||
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
|
|
||||||
|
|
||||||
#define CV_CALIB_ZERO_DISPARITY 1024
|
|
||||||
|
|
||||||
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
|
|
||||||
views parallel (=> to make all the epipolar lines horizontal or vertical) */
|
|
||||||
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
|
|
||||||
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
|
|
||||||
CvSize image_size, const CvMat* R, const CvMat* T,
|
|
||||||
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
|
|
||||||
CvMat* Q CV_DEFAULT(0),
|
|
||||||
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
|
|
||||||
double alpha CV_DEFAULT(-1),
|
|
||||||
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
|
|
||||||
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
|
|
||||||
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
|
|
||||||
|
|
||||||
/* Computes rectification transformations for uncalibrated pair of images using a set
|
|
||||||
of point correspondences */
|
|
||||||
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
|
|
||||||
const CvMat* F, CvSize img_size,
|
|
||||||
CvMat* H1, CvMat* H2,
|
|
||||||
double threshold CV_DEFAULT(5));
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* stereo correspondence parameters and functions */
|
|
||||||
|
|
||||||
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
|
|
||||||
#define CV_STEREO_BM_XSOBEL 1
|
|
||||||
|
|
||||||
/* Block matching algorithm structure */
|
|
||||||
typedef struct CvStereoBMState
|
|
||||||
{
|
|
||||||
// pre-filtering (normalization of input images)
|
|
||||||
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
|
|
||||||
int preFilterSize; // averaging window size: ~5x5..21x21
|
|
||||||
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
|
|
||||||
|
|
||||||
// correspondence using Sum of Absolute Difference (SAD)
|
|
||||||
int SADWindowSize; // ~5x5..21x21
|
|
||||||
int minDisparity; // minimum disparity (can be negative)
|
|
||||||
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
|
|
||||||
|
|
||||||
// post-filtering
|
|
||||||
int textureThreshold; // the disparity is only computed for pixels
|
|
||||||
// with textured enough neighborhood
|
|
||||||
int uniquenessRatio; // accept the computed disparity d* only if
|
|
||||||
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
|
|
||||||
// for any d != d*+/-1 within the search range.
|
|
||||||
int speckleWindowSize; // disparity variation window
|
|
||||||
int speckleRange; // acceptable range of variation in window
|
|
||||||
|
|
||||||
int trySmallerWindows; // if 1, the results may be more accurate,
|
|
||||||
// at the expense of slower processing
|
|
||||||
CvRect roi1, roi2;
|
|
||||||
int disp12MaxDiff;
|
|
||||||
|
|
||||||
// temporary buffers
|
|
||||||
CvMat* preFilteredImg0;
|
|
||||||
CvMat* preFilteredImg1;
|
|
||||||
CvMat* slidingSumBuf;
|
|
||||||
CvMat* cost;
|
|
||||||
CvMat* disp;
|
|
||||||
} CvStereoBMState;
|
|
||||||
|
|
||||||
#define CV_STEREO_BM_BASIC 0
|
|
||||||
#define CV_STEREO_BM_FISH_EYE 1
|
|
||||||
#define CV_STEREO_BM_NARROW 2
|
|
||||||
|
|
||||||
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
|
|
||||||
int numberOfDisparities CV_DEFAULT(0));
|
|
||||||
|
|
||||||
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
|
|
||||||
|
|
||||||
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
|
|
||||||
CvArr* disparity, CvStereoBMState* state );
|
|
||||||
|
|
||||||
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
|
|
||||||
int numberOfDisparities, int SADWindowSize );
|
|
||||||
|
|
||||||
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
|
|
||||||
int minDisparity, int numberOfDisparities,
|
|
||||||
int disp12MaxDiff CV_DEFAULT(1) );
|
|
||||||
|
|
||||||
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
|
|
||||||
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
|
|
||||||
CvArr* _3dImage, const CvMat* Q,
|
|
||||||
int handleMissingValues CV_DEFAULT(0) );
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
class CV_EXPORTS CvLevMarq
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
CvLevMarq();
|
|
||||||
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
|
|
||||||
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
|
||||||
bool completeSymmFlag=false );
|
|
||||||
~CvLevMarq();
|
|
||||||
void init( int nparams, int nerrs, CvTermCriteria criteria=
|
|
||||||
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
|
||||||
bool completeSymmFlag=false );
|
|
||||||
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
|
|
||||||
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
|
|
||||||
|
|
||||||
void clear();
|
|
||||||
void step();
|
|
||||||
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
|
|
||||||
|
|
||||||
cv::Ptr<CvMat> mask;
|
|
||||||
cv::Ptr<CvMat> prevParam;
|
|
||||||
cv::Ptr<CvMat> param;
|
|
||||||
cv::Ptr<CvMat> J;
|
|
||||||
cv::Ptr<CvMat> err;
|
|
||||||
cv::Ptr<CvMat> JtJ;
|
|
||||||
cv::Ptr<CvMat> JtJN;
|
|
||||||
cv::Ptr<CvMat> JtErr;
|
|
||||||
cv::Ptr<CvMat> JtJV;
|
|
||||||
cv::Ptr<CvMat> JtJW;
|
|
||||||
double prevErrNorm, errNorm;
|
|
||||||
int lambdaLg10;
|
|
||||||
CvTermCriteria criteria;
|
|
||||||
int state;
|
|
||||||
int iters;
|
|
||||||
bool completeSymmFlag;
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
|
|
||||||
CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray());
|
|
||||||
|
|
||||||
//! type of the robust estimation algorithm
|
//! type of the robust estimation algorithm
|
||||||
enum
|
enum { LMEDS = 4, //!< least-median algorithm
|
||||||
{
|
RANSAC = 8 //!< RANSAC algorithm
|
||||||
LMEDS=CV_LMEDS, //!< least-median algorithm
|
};
|
||||||
RANSAC=CV_RANSAC //!< RANSAC algorithm
|
|
||||||
};
|
enum { ITERATIVE = 0,
|
||||||
|
EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
|
||||||
|
P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
|
||||||
|
};
|
||||||
|
|
||||||
|
enum { CALIB_CB_ADAPTIVE_THRESH = 1,
|
||||||
|
CALIB_CB_NORMALIZE_IMAGE = 2,
|
||||||
|
CALIB_CB_FILTER_QUADS = 4,
|
||||||
|
CALIB_CB_FAST_CHECK = 8
|
||||||
|
};
|
||||||
|
|
||||||
|
enum { CALIB_CB_SYMMETRIC_GRID = 1,
|
||||||
|
CALIB_CB_ASYMMETRIC_GRID = 2,
|
||||||
|
CALIB_CB_CLUSTERING = 4
|
||||||
|
};
|
||||||
|
|
||||||
|
enum { CALIB_USE_INTRINSIC_GUESS = 0x00001,
|
||||||
|
CALIB_FIX_ASPECT_RATIO = 0x00002,
|
||||||
|
CALIB_FIX_PRINCIPAL_POINT = 0x00004,
|
||||||
|
CALIB_ZERO_TANGENT_DIST = 0x00008,
|
||||||
|
CALIB_FIX_FOCAL_LENGTH = 0x00010,
|
||||||
|
CALIB_FIX_K1 = 0x00020,
|
||||||
|
CALIB_FIX_K2 = 0x00040,
|
||||||
|
CALIB_FIX_K3 = 0x00080,
|
||||||
|
CALIB_FIX_K4 = 0x00800,
|
||||||
|
CALIB_FIX_K5 = 0x01000,
|
||||||
|
CALIB_FIX_K6 = 0x02000,
|
||||||
|
CALIB_RATIONAL_MODEL = 0x04000,
|
||||||
|
CALIB_THIN_PRISM_MODEL = 0x08000,
|
||||||
|
CALIB_FIX_S1_S2_S3_S4 = 0x10000,
|
||||||
|
// only for stereo
|
||||||
|
CALIB_FIX_INTRINSIC = 0x00100,
|
||||||
|
CALIB_SAME_FOCAL_LENGTH = 0x00200,
|
||||||
|
// for stereo rectification
|
||||||
|
CALIB_ZERO_DISPARITY = 0x00400
|
||||||
|
};
|
||||||
|
|
||||||
|
//! the algorithm for finding fundamental matrix
|
||||||
|
enum { FM_7POINT = 1, //!< 7-point algorithm
|
||||||
|
FM_8POINT = 2, //!< 8-point algorithm
|
||||||
|
FM_LMEDS = 4, //!< least-median algorithm
|
||||||
|
FM_RANSAC = 8 //!< RANSAC algorithm
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
|
||||||
|
CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
|
||||||
|
|
||||||
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
|
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
|
||||||
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
||||||
int method=0, double ransacReprojThreshold=3,
|
int method = 0, double ransacReprojThreshold = 3,
|
||||||
OutputArray mask=noArray());
|
OutputArray mask=noArray());
|
||||||
|
|
||||||
//! variant of findHomography for backward compatibility
|
//! variant of findHomography for backward compatibility
|
||||||
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
|
||||||
OutputArray mask, int method=0, double ransacReprojThreshold=3);
|
OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );
|
||||||
|
|
||||||
//! Computes RQ decomposition of 3x3 matrix
|
//! Computes RQ decomposition of 3x3 matrix
|
||||||
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
|
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
|
||||||
OutputArray Qx=noArray(),
|
OutputArray Qx = noArray(),
|
||||||
OutputArray Qy=noArray(),
|
OutputArray Qy = noArray(),
|
||||||
OutputArray Qz=noArray());
|
OutputArray Qz = noArray());
|
||||||
|
|
||||||
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
|
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
|
||||||
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
|
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
|
||||||
OutputArray rotMatrix, OutputArray transVect,
|
OutputArray rotMatrix, OutputArray transVect,
|
||||||
OutputArray rotMatrixX=noArray(),
|
OutputArray rotMatrixX = noArray(),
|
||||||
OutputArray rotMatrixY=noArray(),
|
OutputArray rotMatrixY = noArray(),
|
||||||
OutputArray rotMatrixZ=noArray(),
|
OutputArray rotMatrixZ = noArray(),
|
||||||
OutputArray eulerAngles=noArray() );
|
OutputArray eulerAngles =noArray() );
|
||||||
|
|
||||||
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
|
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
|
||||||
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
|
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );
|
||||||
OutputArray dABdA,
|
|
||||||
OutputArray dABdB );
|
|
||||||
|
|
||||||
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
|
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
|
||||||
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
|
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
|
||||||
InputArray rvec2, InputArray tvec2,
|
InputArray rvec2, InputArray tvec2,
|
||||||
OutputArray rvec3, OutputArray tvec3,
|
OutputArray rvec3, OutputArray tvec3,
|
||||||
OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(),
|
OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(),
|
||||||
OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(),
|
OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(),
|
||||||
OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(),
|
OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),
|
||||||
OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() );
|
OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );
|
||||||
|
|
||||||
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
|
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
|
||||||
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
|
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
|
||||||
InputArray rvec, InputArray tvec,
|
InputArray rvec, InputArray tvec,
|
||||||
InputArray cameraMatrix, InputArray distCoeffs,
|
InputArray cameraMatrix, InputArray distCoeffs,
|
||||||
OutputArray imagePoints,
|
OutputArray imagePoints,
|
||||||
OutputArray jacobian=noArray(),
|
OutputArray jacobian = noArray(),
|
||||||
double aspectRatio=0 );
|
double aspectRatio = 0 );
|
||||||
|
|
||||||
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
|
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
|
||||||
enum
|
|
||||||
{
|
|
||||||
ITERATIVE=CV_ITERATIVE,
|
|
||||||
EPNP=CV_EPNP,
|
|
||||||
P3P=CV_P3P
|
|
||||||
};
|
|
||||||
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
|
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
|
||||||
InputArray cameraMatrix, InputArray distCoeffs,
|
InputArray cameraMatrix, InputArray distCoeffs,
|
||||||
OutputArray rvec, OutputArray tvec,
|
OutputArray rvec, OutputArray tvec,
|
||||||
bool useExtrinsicGuess=false, int flags=ITERATIVE);
|
bool useExtrinsicGuess = false, int flags = ITERATIVE );
|
||||||
|
|
||||||
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
|
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
|
||||||
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
|
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
|
||||||
InputArray imagePoints,
|
InputArray cameraMatrix, InputArray distCoeffs,
|
||||||
InputArray cameraMatrix,
|
OutputArray rvec, OutputArray tvec,
|
||||||
InputArray distCoeffs,
|
bool useExtrinsicGuess = false, int iterationsCount = 100,
|
||||||
OutputArray rvec,
|
float reprojectionError = 8.0, int minInliersCount = 100,
|
||||||
OutputArray tvec,
|
OutputArray inliers = noArray(), int flags = ITERATIVE );
|
||||||
bool useExtrinsicGuess = false,
|
|
||||||
int iterationsCount = 100,
|
|
||||||
float reprojectionError = 8.0,
|
|
||||||
int minInliersCount = 100,
|
|
||||||
OutputArray inliers = noArray(),
|
|
||||||
int flags = ITERATIVE);
|
|
||||||
|
|
||||||
//! initializes camera matrix from a few 3D points and the corresponding projections.
|
//! initializes camera matrix from a few 3D points and the corresponding projections.
|
||||||
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
|
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
|
||||||
InputArrayOfArrays imagePoints,
|
InputArrayOfArrays imagePoints,
|
||||||
Size imageSize, double aspectRatio=1. );
|
Size imageSize, double aspectRatio = 1.0 );
|
||||||
|
|
||||||
enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
|
|
||||||
CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 };
|
|
||||||
|
|
||||||
//! finds checkerboard pattern of the specified size in the image
|
//! finds checkerboard pattern of the specified size in the image
|
||||||
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
|
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
|
||||||
OutputArray corners,
|
int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
|
||||||
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
|
|
||||||
|
|
||||||
//! finds subpixel-accurate positions of the chessboard corners
|
//! finds subpixel-accurate positions of the chessboard corners
|
||||||
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
|
CV_EXPORTS bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );
|
||||||
|
|
||||||
//! draws the checkerboard pattern (found or partly found) in the image
|
//! draws the checkerboard pattern (found or partly found) in the image
|
||||||
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
|
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
|
||||||
InputArray corners, bool patternWasFound );
|
InputArray corners, bool patternWasFound );
|
||||||
|
|
||||||
enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2,
|
|
||||||
CALIB_CB_CLUSTERING = 4 };
|
|
||||||
|
|
||||||
//! finds circles' grid pattern of the specified size in the image
|
//! finds circles' grid pattern of the specified size in the image
|
||||||
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
|
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
|
||||||
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID,
|
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
|
||||||
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
|
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
|
||||||
|
|
||||||
//! the deprecated function. Use findCirclesGrid() instead of it.
|
|
||||||
CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize,
|
|
||||||
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID );
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,
|
|
||||||
CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,
|
|
||||||
CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,
|
|
||||||
CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,
|
|
||||||
CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,
|
|
||||||
CALIB_FIX_K1 = CV_CALIB_FIX_K1,
|
|
||||||
CALIB_FIX_K2 = CV_CALIB_FIX_K2,
|
|
||||||
CALIB_FIX_K3 = CV_CALIB_FIX_K3,
|
|
||||||
CALIB_FIX_K4 = CV_CALIB_FIX_K4,
|
|
||||||
CALIB_FIX_K5 = CV_CALIB_FIX_K5,
|
|
||||||
CALIB_FIX_K6 = CV_CALIB_FIX_K6,
|
|
||||||
CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL,
|
|
||||||
CALIB_THIN_PRISM_MODEL = CV_CALIB_THIN_PRISM_MODEL,
|
|
||||||
CALIB_FIX_S1_S2_S3_S4=CV_CALIB_FIX_S1_S2_S3_S4,
|
|
||||||
// only for stereo
|
|
||||||
CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,
|
|
||||||
CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,
|
|
||||||
// for stereo rectification
|
|
||||||
CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY
|
|
||||||
};
|
|
||||||
|
|
||||||
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
|
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
|
||||||
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
|
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
|
||||||
InputArrayOfArrays imagePoints,
|
InputArrayOfArrays imagePoints, Size imageSize,
|
||||||
Size imageSize,
|
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
|
||||||
InputOutputArray cameraMatrix,
|
|
||||||
InputOutputArray distCoeffs,
|
|
||||||
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
|
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
|
||||||
int flags=0, TermCriteria criteria = TermCriteria(
|
int flags = 0, TermCriteria criteria = TermCriteria(
|
||||||
TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) );
|
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
|
||||||
|
|
||||||
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
|
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
|
||||||
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
|
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,
|
||||||
Size imageSize,
|
double apertureWidth, double apertureHeight,
|
||||||
double apertureWidth,
|
CV_OUT double& fovx, CV_OUT double& fovy,
|
||||||
double apertureHeight,
|
CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,
|
||||||
CV_OUT double& fovx,
|
CV_OUT double& aspectRatio );
|
||||||
CV_OUT double& fovy,
|
|
||||||
CV_OUT double& focalLength,
|
|
||||||
CV_OUT Point2d& principalPoint,
|
|
||||||
CV_OUT double& aspectRatio );
|
|
||||||
|
|
||||||
//! finds intrinsic and extrinsic parameters of a stereo camera
|
//! finds intrinsic and extrinsic parameters of a stereo camera
|
||||||
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
|
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
|
||||||
InputArrayOfArrays imagePoints1,
|
InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
|
||||||
InputArrayOfArrays imagePoints2,
|
InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
|
||||||
InputOutputArray cameraMatrix1,
|
InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
|
||||||
InputOutputArray distCoeffs1,
|
Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,
|
||||||
InputOutputArray cameraMatrix2,
|
|
||||||
InputOutputArray distCoeffs2,
|
|
||||||
Size imageSize, OutputArray R,
|
|
||||||
OutputArray T, OutputArray E, OutputArray F,
|
|
||||||
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
|
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
|
||||||
int flags=CALIB_FIX_INTRINSIC );
|
int flags = CALIB_FIX_INTRINSIC );
|
||||||
|
|
||||||
|
|
||||||
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
|
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
|
||||||
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
|
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
|
||||||
InputArray cameraMatrix2, InputArray distCoeffs2,
|
InputArray cameraMatrix2, InputArray distCoeffs2,
|
||||||
Size imageSize, InputArray R, InputArray T,
|
Size imageSize, InputArray R, InputArray T,
|
||||||
OutputArray R1, OutputArray R2,
|
OutputArray R1, OutputArray R2,
|
||||||
OutputArray P1, OutputArray P2,
|
OutputArray P1, OutputArray P2,
|
||||||
OutputArray Q, int flags=CALIB_ZERO_DISPARITY,
|
OutputArray Q, int flags = CALIB_ZERO_DISPARITY,
|
||||||
double alpha=-1, Size newImageSize=Size(),
|
double alpha = -1, Size newImageSize = Size(),
|
||||||
CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 );
|
CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
|
||||||
|
|
||||||
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
|
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
|
||||||
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
|
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
|
||||||
InputArray F, Size imgSize,
|
InputArray F, Size imgSize,
|
||||||
OutputArray H1, OutputArray H2,
|
OutputArray H1, OutputArray H2,
|
||||||
double threshold=5 );
|
double threshold = 5 );
|
||||||
|
|
||||||
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
||||||
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
|
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
|
||||||
@ -615,8 +237,9 @@ CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distC
|
|||||||
|
|
||||||
//! returns the optimal new camera matrix
|
//! returns the optimal new camera matrix
|
||||||
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
|
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
|
||||||
Size imageSize, double alpha, Size newImgSize=Size(),
|
Size imageSize, double alpha, Size newImgSize = Size(),
|
||||||
CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false);
|
CV_OUT Rect* validPixROI = 0,
|
||||||
|
bool centerPrincipalPoint = false);
|
||||||
|
|
||||||
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
|
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
|
||||||
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
|
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
|
||||||
@ -627,44 +250,36 @@ CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst
|
|||||||
//! for backward compatibility
|
//! for backward compatibility
|
||||||
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
|
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
|
||||||
|
|
||||||
//! the algorithm for finding fundamental matrix
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
|
|
||||||
FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
|
|
||||||
FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
|
|
||||||
FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm
|
|
||||||
};
|
|
||||||
|
|
||||||
//! finds fundamental matrix from a set of corresponding 2D points
|
//! finds fundamental matrix from a set of corresponding 2D points
|
||||||
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
|
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
|
||||||
int method=FM_RANSAC,
|
int method = FM_RANSAC,
|
||||||
double param1=3., double param2=0.99,
|
double param1 = 3., double param2 = 0.99,
|
||||||
OutputArray mask=noArray());
|
OutputArray mask = noArray() );
|
||||||
|
|
||||||
//! variant of findFundamentalMat for backward compatibility
|
//! variant of findFundamentalMat for backward compatibility
|
||||||
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
|
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
|
||||||
OutputArray mask, int method=FM_RANSAC,
|
OutputArray mask, int method = FM_RANSAC,
|
||||||
double param1=3., double param2=0.99);
|
double param1 = 3., double param2 = 0.99 );
|
||||||
|
|
||||||
//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
|
//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
|
||||||
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2, double focal = 1.0, Point2d pp = Point2d(0, 0),
|
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2,
|
||||||
int method = CV_RANSAC,
|
double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||||
double prob = 0.999, double threshold = 1.0, OutputArray mask = noArray() );
|
int method = RANSAC, double prob = 0.999,
|
||||||
|
double threshold = 1.0, OutputArray mask = noArray() );
|
||||||
|
|
||||||
//! decompose essential matrix to possible rotation matrix and one translation vector
|
//! decompose essential matrix to possible rotation matrix and one translation vector
|
||||||
CV_EXPORTS void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
|
CV_EXPORTS void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
|
||||||
|
|
||||||
//! recover relative camera pose from a set of corresponding 2D points
|
//! recover relative camera pose from a set of corresponding 2D points
|
||||||
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2, OutputArray R, OutputArray t,
|
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2,
|
||||||
|
OutputArray R, OutputArray t,
|
||||||
double focal = 1.0, Point2d pp = Point2d(0, 0),
|
double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||||
InputOutputArray mask = noArray());
|
InputOutputArray mask = noArray() );
|
||||||
|
|
||||||
|
|
||||||
//! finds coordinates of epipolar lines corresponding the specified points
|
//! finds coordinates of epipolar lines corresponding the specified points
|
||||||
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
|
CV_EXPORTS void computeCorrespondEpilines( InputArray points, int whichImage,
|
||||||
int whichImage, InputArray F,
|
InputArray F, OutputArray lines );
|
||||||
OutputArray lines );
|
|
||||||
|
|
||||||
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
|
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
|
||||||
InputArray projPoints1, InputArray projPoints2,
|
InputArray projPoints1, InputArray projPoints2,
|
||||||
@ -673,13 +288,39 @@ CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
|
|||||||
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
|
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
|
||||||
OutputArray newPoints1, OutputArray newPoints2 );
|
OutputArray newPoints1, OutputArray newPoints2 );
|
||||||
|
|
||||||
|
//! filters off speckles (small regions of incorrectly computed disparity)
|
||||||
|
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
|
||||||
|
int maxSpeckleSize, double maxDiff,
|
||||||
|
InputOutputArray buf = noArray() );
|
||||||
|
|
||||||
|
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
|
||||||
|
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
|
||||||
|
int minDisparity, int numberOfDisparities,
|
||||||
|
int SADWindowSize );
|
||||||
|
|
||||||
|
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
|
||||||
|
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
|
||||||
|
int minDisparity, int numberOfDisparities,
|
||||||
|
int disp12MaxDisp = 1 );
|
||||||
|
|
||||||
|
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
|
||||||
|
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
|
||||||
|
OutputArray _3dImage, InputArray Q,
|
||||||
|
bool handleMissingValues = false,
|
||||||
|
int ddepth = -1 );
|
||||||
|
|
||||||
|
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
|
||||||
|
OutputArray out, OutputArray inliers,
|
||||||
|
double ransacThreshold = 3, double confidence = 0.99);
|
||||||
|
|
||||||
|
|
||||||
template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
|
|
||||||
|
|
||||||
class CV_EXPORTS_W StereoMatcher : public Algorithm
|
class CV_EXPORTS_W StereoMatcher : public Algorithm
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { DISP_SHIFT=4, DISP_SCALE=(1 << DISP_SHIFT) };
|
enum { DISP_SHIFT = 4,
|
||||||
|
DISP_SCALE = (1 << DISP_SHIFT)
|
||||||
|
};
|
||||||
|
|
||||||
CV_WRAP virtual void compute( InputArray left, InputArray right,
|
CV_WRAP virtual void compute( InputArray left, InputArray right,
|
||||||
OutputArray disparity ) = 0;
|
OutputArray disparity ) = 0;
|
||||||
@ -704,10 +345,13 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class CV_EXPORTS_W StereoBM : public StereoMatcher
|
class CV_EXPORTS_W StereoBM : public StereoMatcher
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1 };
|
enum { PREFILTER_NORMALIZED_RESPONSE = 0,
|
||||||
|
PREFILTER_XSOBEL = 1
|
||||||
|
};
|
||||||
|
|
||||||
CV_WRAP virtual int getPreFilterType() const = 0;
|
CV_WRAP virtual int getPreFilterType() const = 0;
|
||||||
CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;
|
CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;
|
||||||
@ -734,13 +378,15 @@ public:
|
|||||||
CV_WRAP virtual void setROI2(Rect roi2) = 0;
|
CV_WRAP virtual void setROI2(Rect roi2) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_EXPORTS_W Ptr<StereoBM> createStereoBM(int numDisparities=0, int blockSize=21);
|
CV_EXPORTS_W Ptr<StereoBM> createStereoBM(int numDisparities = 0, int blockSize = 21);
|
||||||
|
|
||||||
|
|
||||||
class CV_EXPORTS_W StereoSGBM : public StereoMatcher
|
class CV_EXPORTS_W StereoSGBM : public StereoMatcher
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { MODE_SGBM=0, MODE_HH=1 };
|
enum { MODE_SGBM = 0,
|
||||||
|
MODE_HH = 1
|
||||||
|
};
|
||||||
|
|
||||||
CV_WRAP virtual int getPreFilterCap() const = 0;
|
CV_WRAP virtual int getPreFilterCap() const = 0;
|
||||||
CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
|
CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
|
||||||
@ -760,38 +406,11 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
CV_EXPORTS_W Ptr<StereoSGBM> createStereoSGBM(int minDisparity, int numDisparities, int blockSize,
|
CV_EXPORTS_W Ptr<StereoSGBM> createStereoSGBM(int minDisparity, int numDisparities, int blockSize,
|
||||||
int P1=0, int P2=0, int disp12MaxDiff=0,
|
int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
|
||||||
int preFilterCap=0, int uniquenessRatio=0,
|
int preFilterCap = 0, int uniquenessRatio = 0,
|
||||||
int speckleWindowSize=0, int speckleRange=0,
|
int speckleWindowSize = 0, int speckleRange = 0,
|
||||||
int mode=StereoSGBM::MODE_SGBM);
|
int mode = StereoSGBM::MODE_SGBM);
|
||||||
|
|
||||||
//! filters off speckles (small regions of incorrectly computed disparity)
|
} // cv
|
||||||
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
|
|
||||||
int maxSpeckleSize, double maxDiff,
|
|
||||||
InputOutputArray buf=noArray() );
|
|
||||||
|
|
||||||
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
|
|
||||||
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
|
|
||||||
int minDisparity, int numberOfDisparities,
|
|
||||||
int SADWindowSize );
|
|
||||||
|
|
||||||
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
|
|
||||||
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
|
|
||||||
int minDisparity, int numberOfDisparities,
|
|
||||||
int disp12MaxDisp=1 );
|
|
||||||
|
|
||||||
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
|
|
||||||
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
|
|
||||||
OutputArray _3dImage, InputArray Q,
|
|
||||||
bool handleMissingValues=false,
|
|
||||||
int ddepth=-1 );
|
|
||||||
|
|
||||||
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
|
|
||||||
OutputArray out, OutputArray inliers,
|
|
||||||
double ransacThreshold=3, double confidence=0.99);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
413
modules/calib3d/include/opencv2/calib3d/calib3d_c.h
Normal file
413
modules/calib3d/include/opencv2/calib3d/calib3d_c.h
Normal file
@ -0,0 +1,413 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_CALIB3D_C_H__
|
||||||
|
#define __OPENCV_CALIB3D_C_H__
|
||||||
|
|
||||||
|
#include "opencv2/core/core_c.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* Camera Calibration, Pose Estimation and Stereo *
|
||||||
|
\****************************************************************************************/
|
||||||
|
|
||||||
|
typedef struct CvPOSITObject CvPOSITObject;
|
||||||
|
|
||||||
|
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
|
||||||
|
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
|
||||||
|
|
||||||
|
|
||||||
|
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
|
||||||
|
an object given its model and projection in a weak-perspective case */
|
||||||
|
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
|
||||||
|
double focal_length, CvTermCriteria criteria,
|
||||||
|
float* rotation_matrix, float* translation_vector);
|
||||||
|
|
||||||
|
/* Releases CvPOSITObject structure */
|
||||||
|
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
|
||||||
|
|
||||||
|
/* updates the number of RANSAC iterations */
|
||||||
|
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
|
||||||
|
int model_points, int max_iters );
|
||||||
|
|
||||||
|
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
|
||||||
|
|
||||||
|
/* Calculates fundamental matrix given a set of corresponding points */
|
||||||
|
#define CV_FM_7POINT 1
|
||||||
|
#define CV_FM_8POINT 2
|
||||||
|
|
||||||
|
#define CV_LMEDS 4
|
||||||
|
#define CV_RANSAC 8
|
||||||
|
|
||||||
|
#define CV_FM_LMEDS_ONLY CV_LMEDS
|
||||||
|
#define CV_FM_RANSAC_ONLY CV_RANSAC
|
||||||
|
#define CV_FM_LMEDS CV_LMEDS
|
||||||
|
#define CV_FM_RANSAC CV_RANSAC
|
||||||
|
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
CV_ITERATIVE = 0,
|
||||||
|
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
|
||||||
|
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
|
||||||
|
};
|
||||||
|
|
||||||
|
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
|
||||||
|
CvMat* fundamental_matrix,
|
||||||
|
int method CV_DEFAULT(CV_FM_RANSAC),
|
||||||
|
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
|
||||||
|
CvMat* status CV_DEFAULT(NULL) );
|
||||||
|
|
||||||
|
/* For each input point on one of images
|
||||||
|
computes parameters of the corresponding
|
||||||
|
epipolar line on the other image */
|
||||||
|
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
|
||||||
|
int which_image,
|
||||||
|
const CvMat* fundamental_matrix,
|
||||||
|
CvMat* correspondent_lines );
|
||||||
|
|
||||||
|
/* Triangulation functions */
|
||||||
|
|
||||||
|
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
|
||||||
|
CvMat* projPoints1, CvMat* projPoints2,
|
||||||
|
CvMat* points4D);
|
||||||
|
|
||||||
|
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
|
||||||
|
CvMat* new_points1, CvMat* new_points2);
|
||||||
|
|
||||||
|
|
||||||
|
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
|
||||||
|
alpha=0 - only valid pixels will be retained in the undistorted image
|
||||||
|
alpha=1 - all the source image pixels will be retained in the undistorted image
|
||||||
|
*/
|
||||||
|
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
|
||||||
|
const CvMat* dist_coeffs,
|
||||||
|
CvSize image_size, double alpha,
|
||||||
|
CvMat* new_camera_matrix,
|
||||||
|
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
|
||||||
|
CvRect* valid_pixel_ROI CV_DEFAULT(0),
|
||||||
|
int center_principal_point CV_DEFAULT(0));
|
||||||
|
|
||||||
|
/* Converts rotation vector to rotation matrix or vice versa */
|
||||||
|
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
|
||||||
|
CvMat* jacobian CV_DEFAULT(0) );
|
||||||
|
|
||||||
|
/* Finds perspective transformation between the object plane and image (view) plane */
|
||||||
|
CVAPI(int) cvFindHomography( const CvMat* src_points,
|
||||||
|
const CvMat* dst_points,
|
||||||
|
CvMat* homography,
|
||||||
|
int method CV_DEFAULT(0),
|
||||||
|
double ransacReprojThreshold CV_DEFAULT(3),
|
||||||
|
CvMat* mask CV_DEFAULT(0));
|
||||||
|
|
||||||
|
/* Computes RQ decomposition for 3x3 matrices */
|
||||||
|
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
|
||||||
|
CvMat *matrixQx CV_DEFAULT(NULL),
|
||||||
|
CvMat *matrixQy CV_DEFAULT(NULL),
|
||||||
|
CvMat *matrixQz CV_DEFAULT(NULL),
|
||||||
|
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
||||||
|
|
||||||
|
/* Computes projection matrix decomposition */
|
||||||
|
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
|
||||||
|
CvMat *rotMatr, CvMat *posVect,
|
||||||
|
CvMat *rotMatrX CV_DEFAULT(NULL),
|
||||||
|
CvMat *rotMatrY CV_DEFAULT(NULL),
|
||||||
|
CvMat *rotMatrZ CV_DEFAULT(NULL),
|
||||||
|
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
|
||||||
|
|
||||||
|
/* Computes d(AB)/dA and d(AB)/dB */
|
||||||
|
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
|
||||||
|
|
||||||
|
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
|
||||||
|
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
|
||||||
|
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
|
||||||
|
const CvMat* _rvec2, const CvMat* _tvec2,
|
||||||
|
CvMat* _rvec3, CvMat* _tvec3,
|
||||||
|
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
|
||||||
|
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
|
||||||
|
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
|
||||||
|
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
|
||||||
|
|
||||||
|
/* Projects object points to the view plane using
|
||||||
|
the specified extrinsic and intrinsic camera parameters */
|
||||||
|
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
|
||||||
|
const CvMat* translation_vector, const CvMat* camera_matrix,
|
||||||
|
const CvMat* distortion_coeffs, CvMat* image_points,
|
||||||
|
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
|
||||||
|
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
|
||||||
|
CvMat* dpddist CV_DEFAULT(NULL),
|
||||||
|
double aspect_ratio CV_DEFAULT(0));
|
||||||
|
|
||||||
|
/* Finds extrinsic camera parameters from
|
||||||
|
a few known corresponding point pairs and intrinsic parameters */
|
||||||
|
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
|
||||||
|
const CvMat* image_points,
|
||||||
|
const CvMat* camera_matrix,
|
||||||
|
const CvMat* distortion_coeffs,
|
||||||
|
CvMat* rotation_vector,
|
||||||
|
CvMat* translation_vector,
|
||||||
|
int use_extrinsic_guess CV_DEFAULT(0) );
|
||||||
|
|
||||||
|
/* Computes initial estimate of the intrinsic camera parameters
|
||||||
|
in case of planar calibration target (e.g. chessboard) */
|
||||||
|
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
|
||||||
|
const CvMat* image_points,
|
||||||
|
const CvMat* npoints, CvSize image_size,
|
||||||
|
CvMat* camera_matrix,
|
||||||
|
double aspect_ratio CV_DEFAULT(1.) );
|
||||||
|
|
||||||
|
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
|
||||||
|
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
|
||||||
|
#define CV_CALIB_CB_FILTER_QUADS 4
|
||||||
|
#define CV_CALIB_CB_FAST_CHECK 8
|
||||||
|
|
||||||
|
// Performs a fast check if a chessboard is in the input image. This is a workaround to
|
||||||
|
// a problem of cvFindChessboardCorners being slow on images with no chessboard
|
||||||
|
// - src: input image
|
||||||
|
// - size: chessboard size
|
||||||
|
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
|
||||||
|
// 0 if there is no chessboard, -1 in case of error
|
||||||
|
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
|
||||||
|
|
||||||
|
/* Detects corners on a chessboard calibration pattern */
|
||||||
|
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
|
||||||
|
CvPoint2D32f* corners,
|
||||||
|
int* corner_count CV_DEFAULT(NULL),
|
||||||
|
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
|
||||||
|
|
||||||
|
/* Draws individual chessboard corners or the whole chessboard detected */
|
||||||
|
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
|
||||||
|
CvPoint2D32f* corners,
|
||||||
|
int count, int pattern_was_found );
|
||||||
|
|
||||||
|
#define CV_CALIB_USE_INTRINSIC_GUESS 1
|
||||||
|
#define CV_CALIB_FIX_ASPECT_RATIO 2
|
||||||
|
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
|
||||||
|
#define CV_CALIB_ZERO_TANGENT_DIST 8
|
||||||
|
#define CV_CALIB_FIX_FOCAL_LENGTH 16
|
||||||
|
#define CV_CALIB_FIX_K1 32
|
||||||
|
#define CV_CALIB_FIX_K2 64
|
||||||
|
#define CV_CALIB_FIX_K3 128
|
||||||
|
#define CV_CALIB_FIX_K4 2048
|
||||||
|
#define CV_CALIB_FIX_K5 4096
|
||||||
|
#define CV_CALIB_FIX_K6 8192
|
||||||
|
#define CV_CALIB_RATIONAL_MODEL 16384
|
||||||
|
#define CV_CALIB_THIN_PRISM_MODEL 32768
|
||||||
|
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
|
||||||
|
|
||||||
|
|
||||||
|
/* Finds intrinsic and extrinsic camera parameters
|
||||||
|
from a few views of known calibration pattern */
|
||||||
|
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
|
||||||
|
const CvMat* image_points,
|
||||||
|
const CvMat* point_counts,
|
||||||
|
CvSize image_size,
|
||||||
|
CvMat* camera_matrix,
|
||||||
|
CvMat* distortion_coeffs,
|
||||||
|
CvMat* rotation_vectors CV_DEFAULT(NULL),
|
||||||
|
CvMat* translation_vectors CV_DEFAULT(NULL),
|
||||||
|
int flags CV_DEFAULT(0),
|
||||||
|
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
||||||
|
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
|
||||||
|
|
||||||
|
/* Computes various useful characteristics of the camera from the data computed by
|
||||||
|
cvCalibrateCamera2 */
|
||||||
|
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
|
||||||
|
CvSize image_size,
|
||||||
|
double aperture_width CV_DEFAULT(0),
|
||||||
|
double aperture_height CV_DEFAULT(0),
|
||||||
|
double *fovx CV_DEFAULT(NULL),
|
||||||
|
double *fovy CV_DEFAULT(NULL),
|
||||||
|
double *focal_length CV_DEFAULT(NULL),
|
||||||
|
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
|
||||||
|
double *pixel_aspect_ratio CV_DEFAULT(NULL));
|
||||||
|
|
||||||
|
#define CV_CALIB_FIX_INTRINSIC 256
|
||||||
|
#define CV_CALIB_SAME_FOCAL_LENGTH 512
|
||||||
|
|
||||||
|
/* Computes the transformation from one camera coordinate system to another one
|
||||||
|
from a few correspondent views of the same calibration target. Optionally, calibrates
|
||||||
|
both cameras */
|
||||||
|
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
|
||||||
|
const CvMat* image_points2, const CvMat* npoints,
|
||||||
|
CvMat* camera_matrix1, CvMat* dist_coeffs1,
|
||||||
|
CvMat* camera_matrix2, CvMat* dist_coeffs2,
|
||||||
|
CvSize image_size, CvMat* R, CvMat* T,
|
||||||
|
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
|
||||||
|
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
|
||||||
|
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
|
||||||
|
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
|
||||||
|
|
||||||
|
#define CV_CALIB_ZERO_DISPARITY 1024
|
||||||
|
|
||||||
|
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
|
||||||
|
views parallel (=> to make all the epipolar lines horizontal or vertical) */
|
||||||
|
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
|
||||||
|
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
|
||||||
|
CvSize image_size, const CvMat* R, const CvMat* T,
|
||||||
|
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
|
||||||
|
CvMat* Q CV_DEFAULT(0),
|
||||||
|
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
|
||||||
|
double alpha CV_DEFAULT(-1),
|
||||||
|
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
|
||||||
|
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
|
||||||
|
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
|
||||||
|
|
||||||
|
/* Computes rectification transformations for uncalibrated pair of images using a set
|
||||||
|
of point correspondences */
|
||||||
|
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
|
||||||
|
const CvMat* F, CvSize img_size,
|
||||||
|
CvMat* H1, CvMat* H2,
|
||||||
|
double threshold CV_DEFAULT(5));
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/* stereo correspondence parameters and functions */
|
||||||
|
|
||||||
|
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
|
||||||
|
#define CV_STEREO_BM_XSOBEL 1
|
||||||
|
|
||||||
|
/* Block matching algorithm structure */
|
||||||
|
typedef struct CvStereoBMState
|
||||||
|
{
|
||||||
|
// pre-filtering (normalization of input images)
|
||||||
|
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
|
||||||
|
int preFilterSize; // averaging window size: ~5x5..21x21
|
||||||
|
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
|
||||||
|
|
||||||
|
// correspondence using Sum of Absolute Difference (SAD)
|
||||||
|
int SADWindowSize; // ~5x5..21x21
|
||||||
|
int minDisparity; // minimum disparity (can be negative)
|
||||||
|
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
|
||||||
|
|
||||||
|
// post-filtering
|
||||||
|
int textureThreshold; // the disparity is only computed for pixels
|
||||||
|
// with textured enough neighborhood
|
||||||
|
int uniquenessRatio; // accept the computed disparity d* only if
|
||||||
|
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
|
||||||
|
// for any d != d*+/-1 within the search range.
|
||||||
|
int speckleWindowSize; // disparity variation window
|
||||||
|
int speckleRange; // acceptable range of variation in window
|
||||||
|
|
||||||
|
int trySmallerWindows; // if 1, the results may be more accurate,
|
||||||
|
// at the expense of slower processing
|
||||||
|
CvRect roi1, roi2;
|
||||||
|
int disp12MaxDiff;
|
||||||
|
|
||||||
|
// temporary buffers
|
||||||
|
CvMat* preFilteredImg0;
|
||||||
|
CvMat* preFilteredImg1;
|
||||||
|
CvMat* slidingSumBuf;
|
||||||
|
CvMat* cost;
|
||||||
|
CvMat* disp;
|
||||||
|
} CvStereoBMState;
|
||||||
|
|
||||||
|
#define CV_STEREO_BM_BASIC 0
|
||||||
|
#define CV_STEREO_BM_FISH_EYE 1
|
||||||
|
#define CV_STEREO_BM_NARROW 2
|
||||||
|
|
||||||
|
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
|
||||||
|
int numberOfDisparities CV_DEFAULT(0));
|
||||||
|
|
||||||
|
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
|
||||||
|
|
||||||
|
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
|
||||||
|
CvArr* disparity, CvStereoBMState* state );
|
||||||
|
|
||||||
|
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
|
||||||
|
int numberOfDisparities, int SADWindowSize );
|
||||||
|
|
||||||
|
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
|
||||||
|
int minDisparity, int numberOfDisparities,
|
||||||
|
int disp12MaxDiff CV_DEFAULT(1) );
|
||||||
|
|
||||||
|
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
|
||||||
|
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
|
||||||
|
CvArr* _3dImage, const CvMat* Q,
|
||||||
|
int handleMissingValues CV_DEFAULT(0) );
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
class CV_EXPORTS CvLevMarq
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CvLevMarq();
|
||||||
|
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
|
||||||
|
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
||||||
|
bool completeSymmFlag=false );
|
||||||
|
~CvLevMarq();
|
||||||
|
void init( int nparams, int nerrs, CvTermCriteria criteria=
|
||||||
|
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
||||||
|
bool completeSymmFlag=false );
|
||||||
|
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
|
||||||
|
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
|
||||||
|
|
||||||
|
void clear();
|
||||||
|
void step();
|
||||||
|
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
|
||||||
|
|
||||||
|
cv::Ptr<CvMat> mask;
|
||||||
|
cv::Ptr<CvMat> prevParam;
|
||||||
|
cv::Ptr<CvMat> param;
|
||||||
|
cv::Ptr<CvMat> J;
|
||||||
|
cv::Ptr<CvMat> err;
|
||||||
|
cv::Ptr<CvMat> JtJ;
|
||||||
|
cv::Ptr<CvMat> JtJN;
|
||||||
|
cv::Ptr<CvMat> JtErr;
|
||||||
|
cv::Ptr<CvMat> JtJV;
|
||||||
|
cv::Ptr<CvMat> JtJW;
|
||||||
|
double prevErrNorm, errNorm;
|
||||||
|
int lambdaLg10;
|
||||||
|
CvTermCriteria criteria;
|
||||||
|
int state;
|
||||||
|
int iters;
|
||||||
|
bool completeSymmFlag;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* __OPENCV_CALIB3D_C_H__ */
|
@ -10,7 +10,7 @@ using namespace perf;
|
|||||||
using std::tr1::make_tuple;
|
using std::tr1::make_tuple;
|
||||||
using std::tr1::get;
|
using std::tr1::get;
|
||||||
|
|
||||||
CV_ENUM(pnpAlgo, CV_ITERATIVE, CV_EPNP /*, CV_P3P*/)
|
CV_ENUM(pnpAlgo, ITERATIVE, EPNP /*, P3P*/)
|
||||||
|
|
||||||
typedef std::tr1::tuple<int, pnpAlgo> PointsNum_Algo_t;
|
typedef std::tr1::tuple<int, pnpAlgo> PointsNum_Algo_t;
|
||||||
typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo;
|
typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo;
|
||||||
@ -20,7 +20,7 @@ typedef perf::TestBaseWithParam<int> PointsNum;
|
|||||||
PERF_TEST_P(PointsNum_Algo, solvePnP,
|
PERF_TEST_P(PointsNum_Algo, solvePnP,
|
||||||
testing::Combine(
|
testing::Combine(
|
||||||
testing::Values(/*4,*/ 3*9, 7*13), //TODO: find why results on 4 points are too unstable
|
testing::Values(/*4,*/ 3*9, 7*13), //TODO: find why results on 4 points are too unstable
|
||||||
testing::Values((int)CV_ITERATIVE, (int)CV_EPNP)
|
testing::Values((int)ITERATIVE, (int)EPNP)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
@ -93,7 +93,7 @@ PERF_TEST(PointsNum_Algo, solveP3P)
|
|||||||
|
|
||||||
TEST_CYCLE_N(1000)
|
TEST_CYCLE_N(1000)
|
||||||
{
|
{
|
||||||
solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, CV_P3P);
|
solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, P3P);
|
||||||
}
|
}
|
||||||
|
|
||||||
SANITY_CHECK(rvec, 1e-6);
|
SANITY_CHECK(rvec, 1e-6);
|
||||||
|
@ -61,6 +61,7 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
#include "circlesgrid.hpp"
|
#include "circlesgrid.hpp"
|
||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
|
|
||||||
@ -825,7 +826,7 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
|
|||||||
dpdk_p[dpdk_step+7] = fy*y*cdist*(-icdist2)*icdist2*r6;
|
dpdk_p[dpdk_step+7] = fy*y*cdist*(-icdist2)*icdist2*r6;
|
||||||
if( _dpdk->cols > 8 )
|
if( _dpdk->cols > 8 )
|
||||||
{
|
{
|
||||||
dpdk_p[8] = fx*r2; //s1
|
dpdk_p[8] = fx*r2; //s1
|
||||||
dpdk_p[9] = fx*r4; //s2
|
dpdk_p[9] = fx*r4; //s2
|
||||||
dpdk_p[10] = 0;//s3
|
dpdk_p[10] = 0;//s3
|
||||||
dpdk_p[11] = 0;//s4
|
dpdk_p[11] = 0;//s4
|
||||||
@ -1255,7 +1256,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
|
|||||||
//when the thin prism model is used the distortion coefficients matrix must have 12 parameters
|
//when the thin prism model is used the distortion coefficients matrix must have 12 parameters
|
||||||
if((flags & CV_CALIB_THIN_PRISM_MODEL) && (distCoeffs->cols*distCoeffs->rows != 12))
|
if((flags & CV_CALIB_THIN_PRISM_MODEL) && (distCoeffs->cols*distCoeffs->rows != 12))
|
||||||
CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
|
CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
|
||||||
|
|
||||||
nimages = npoints->rows*npoints->cols;
|
nimages = npoints->rows*npoints->cols;
|
||||||
npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
|
npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
@ -202,12 +202,12 @@ void CirclesGridClusterFinder::findCorners(const std::vector<cv::Point2f> &hull2
|
|||||||
//corners are the most sharp angles (6)
|
//corners are the most sharp angles (6)
|
||||||
Mat anglesMat = Mat(angles);
|
Mat anglesMat = Mat(angles);
|
||||||
Mat sortedIndices;
|
Mat sortedIndices;
|
||||||
sortIdx(anglesMat, sortedIndices, CV_SORT_EVERY_COLUMN + CV_SORT_DESCENDING);
|
sortIdx(anglesMat, sortedIndices, SORT_EVERY_COLUMN + SORT_DESCENDING);
|
||||||
CV_Assert(sortedIndices.type() == CV_32SC1);
|
CV_Assert(sortedIndices.type() == CV_32SC1);
|
||||||
CV_Assert(sortedIndices.cols == 1);
|
CV_Assert(sortedIndices.cols == 1);
|
||||||
const int cornersCount = isAsymmetricGrid ? 6 : 4;
|
const int cornersCount = isAsymmetricGrid ? 6 : 4;
|
||||||
Mat cornersIndices;
|
Mat cornersIndices;
|
||||||
cv::sort(sortedIndices.rowRange(0, cornersCount), cornersIndices, CV_SORT_EVERY_COLUMN + CV_SORT_ASCENDING);
|
cv::sort(sortedIndices.rowRange(0, cornersCount), cornersIndices, SORT_EVERY_COLUMN + SORT_ASCENDING);
|
||||||
corners.clear();
|
corners.clear();
|
||||||
for(int i=0; i<cornersCount; i++)
|
for(int i=0; i<cornersCount; i++)
|
||||||
{
|
{
|
||||||
@ -438,15 +438,15 @@ bool Graph::doesVertexExist(size_t id) const
|
|||||||
|
|
||||||
void Graph::addVertex(size_t id)
|
void Graph::addVertex(size_t id)
|
||||||
{
|
{
|
||||||
assert( !doesVertexExist( id ) );
|
CV_Assert( !doesVertexExist( id ) );
|
||||||
|
|
||||||
vertices.insert(std::pair<size_t, Vertex> (id, Vertex()));
|
vertices.insert(std::pair<size_t, Vertex> (id, Vertex()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Graph::addEdge(size_t id1, size_t id2)
|
void Graph::addEdge(size_t id1, size_t id2)
|
||||||
{
|
{
|
||||||
assert( doesVertexExist( id1 ) );
|
CV_Assert( doesVertexExist( id1 ) );
|
||||||
assert( doesVertexExist( id2 ) );
|
CV_Assert( doesVertexExist( id2 ) );
|
||||||
|
|
||||||
vertices[id1].neighbors.insert(id2);
|
vertices[id1].neighbors.insert(id2);
|
||||||
vertices[id2].neighbors.insert(id1);
|
vertices[id2].neighbors.insert(id1);
|
||||||
@ -454,8 +454,8 @@ void Graph::addEdge(size_t id1, size_t id2)
|
|||||||
|
|
||||||
void Graph::removeEdge(size_t id1, size_t id2)
|
void Graph::removeEdge(size_t id1, size_t id2)
|
||||||
{
|
{
|
||||||
assert( doesVertexExist( id1 ) );
|
CV_Assert( doesVertexExist( id1 ) );
|
||||||
assert( doesVertexExist( id2 ) );
|
CV_Assert( doesVertexExist( id2 ) );
|
||||||
|
|
||||||
vertices[id1].neighbors.erase(id2);
|
vertices[id1].neighbors.erase(id2);
|
||||||
vertices[id2].neighbors.erase(id1);
|
vertices[id2].neighbors.erase(id1);
|
||||||
@ -463,8 +463,8 @@ void Graph::removeEdge(size_t id1, size_t id2)
|
|||||||
|
|
||||||
bool Graph::areVerticesAdjacent(size_t id1, size_t id2) const
|
bool Graph::areVerticesAdjacent(size_t id1, size_t id2) const
|
||||||
{
|
{
|
||||||
assert( doesVertexExist( id1 ) );
|
CV_Assert( doesVertexExist( id1 ) );
|
||||||
assert( doesVertexExist( id2 ) );
|
CV_Assert( doesVertexExist( id2 ) );
|
||||||
|
|
||||||
Vertices::const_iterator it = vertices.find(id1);
|
Vertices::const_iterator it = vertices.find(id1);
|
||||||
return it->second.neighbors.find(id2) != it->second.neighbors.end();
|
return it->second.neighbors.find(id2) != it->second.neighbors.end();
|
||||||
@ -477,7 +477,7 @@ size_t Graph::getVerticesCount() const
|
|||||||
|
|
||||||
size_t Graph::getDegree(size_t id) const
|
size_t Graph::getDegree(size_t id) const
|
||||||
{
|
{
|
||||||
assert( doesVertexExist(id) );
|
CV_Assert( doesVertexExist(id) );
|
||||||
|
|
||||||
Vertices::const_iterator it = vertices.find(id);
|
Vertices::const_iterator it = vertices.find(id);
|
||||||
return it->second.neighbors.size();
|
return it->second.neighbors.size();
|
||||||
@ -495,7 +495,7 @@ void Graph::floydWarshall(cv::Mat &distanceMatrix, int infinity) const
|
|||||||
distanceMatrix.at<int> ((int)it1->first, (int)it1->first) = 0;
|
distanceMatrix.at<int> ((int)it1->first, (int)it1->first) = 0;
|
||||||
for (Neighbors::const_iterator it2 = it1->second.neighbors.begin(); it2 != it1->second.neighbors.end(); it2++)
|
for (Neighbors::const_iterator it2 = it1->second.neighbors.begin(); it2 != it1->second.neighbors.end(); it2++)
|
||||||
{
|
{
|
||||||
assert( it1->first != *it2 );
|
CV_Assert( it1->first != *it2 );
|
||||||
distanceMatrix.at<int> ((int)it1->first, (int)*it2) = edgeWeight;
|
distanceMatrix.at<int> ((int)it1->first, (int)*it2) = edgeWeight;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -524,7 +524,7 @@ void Graph::floydWarshall(cv::Mat &distanceMatrix, int infinity) const
|
|||||||
|
|
||||||
const Graph::Neighbors& Graph::getNeighbors(size_t id) const
|
const Graph::Neighbors& Graph::getNeighbors(size_t id) const
|
||||||
{
|
{
|
||||||
assert( doesVertexExist(id) );
|
CV_Assert( doesVertexExist(id) );
|
||||||
|
|
||||||
Vertices::const_iterator it = vertices.find(id);
|
Vertices::const_iterator it = vertices.find(id);
|
||||||
return it->second.neighbors;
|
return it->second.neighbors;
|
||||||
@ -604,7 +604,7 @@ bool CirclesGridFinder::findHoles()
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsBadArg, "Unkown pattern type");
|
CV_Error(Error::StsBadArg, "Unkown pattern type");
|
||||||
}
|
}
|
||||||
return (isDetectionCorrect());
|
return (isDetectionCorrect());
|
||||||
//CV_Error( 0, "Detection is not correct" );
|
//CV_Error( 0, "Detection is not correct" );
|
||||||
@ -813,7 +813,7 @@ void CirclesGridFinder::findMCS(const std::vector<Point2f> &basis, std::vector<G
|
|||||||
Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const std::vector<Point2f>& centers,
|
Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const std::vector<Point2f>& centers,
|
||||||
const std::vector<Point2f> &keypoints, std::vector<Point2f> &warpedKeypoints)
|
const std::vector<Point2f> &keypoints, std::vector<Point2f> &warpedKeypoints)
|
||||||
{
|
{
|
||||||
assert( !centers.empty() );
|
CV_Assert( !centers.empty() );
|
||||||
const float edgeLength = 30;
|
const float edgeLength = 30;
|
||||||
const Point2f offset(150, 150);
|
const Point2f offset(150, 150);
|
||||||
|
|
||||||
@ -832,7 +832,7 @@ Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const std::vector<Poin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat H = findHomography(Mat(centers), Mat(dstPoints), CV_RANSAC);
|
Mat H = findHomography(Mat(centers), Mat(dstPoints), RANSAC);
|
||||||
//Mat H = findHomography( Mat( corners ), Mat( dstPoints ) );
|
//Mat H = findHomography( Mat( corners ), Mat( dstPoints ) );
|
||||||
|
|
||||||
std::vector<Point2f> srcKeypoints;
|
std::vector<Point2f> srcKeypoints;
|
||||||
@ -912,7 +912,7 @@ void CirclesGridFinder::findCandidateLine(std::vector<size_t> &line, size_t seed
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert( line.size() == seeds.size() );
|
CV_Assert( line.size() == seeds.size() );
|
||||||
}
|
}
|
||||||
|
|
||||||
void CirclesGridFinder::findCandidateHoles(std::vector<size_t> &above, std::vector<size_t> &below, bool addRow, Point2f basisVec,
|
void CirclesGridFinder::findCandidateHoles(std::vector<size_t> &above, std::vector<size_t> &below, bool addRow, Point2f basisVec,
|
||||||
@ -927,9 +927,9 @@ void CirclesGridFinder::findCandidateHoles(std::vector<size_t> &above, std::vect
|
|||||||
size_t lastIdx = addRow ? holes.size() - 1 : holes[0].size() - 1;
|
size_t lastIdx = addRow ? holes.size() - 1 : holes[0].size() - 1;
|
||||||
findCandidateLine(below, lastIdx, addRow, basisVec, belowSeeds);
|
findCandidateLine(below, lastIdx, addRow, basisVec, belowSeeds);
|
||||||
|
|
||||||
assert( below.size() == above.size() );
|
CV_Assert( below.size() == above.size() );
|
||||||
assert( belowSeeds.size() == aboveSeeds.size() );
|
CV_Assert( belowSeeds.size() == aboveSeeds.size() );
|
||||||
assert( below.size() == belowSeeds.size() );
|
CV_Assert( below.size() == belowSeeds.size() );
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CirclesGridFinder::areCentersNew(const std::vector<size_t> &newCenters, const std::vector<std::vector<size_t> > &holes)
|
bool CirclesGridFinder::areCentersNew(const std::vector<size_t> &newCenters, const std::vector<std::vector<size_t> > &holes)
|
||||||
@ -1000,10 +1000,10 @@ void CirclesGridFinder::insertWinner(float aboveConfidence, float belowConfidenc
|
|||||||
float CirclesGridFinder::computeGraphConfidence(const std::vector<Graph> &basisGraphs, bool addRow,
|
float CirclesGridFinder::computeGraphConfidence(const std::vector<Graph> &basisGraphs, bool addRow,
|
||||||
const std::vector<size_t> &points, const std::vector<size_t> &seeds)
|
const std::vector<size_t> &points, const std::vector<size_t> &seeds)
|
||||||
{
|
{
|
||||||
assert( points.size() == seeds.size() );
|
CV_Assert( points.size() == seeds.size() );
|
||||||
float confidence = 0;
|
float confidence = 0;
|
||||||
const size_t vCount = basisGraphs[0].getVerticesCount();
|
const size_t vCount = basisGraphs[0].getVerticesCount();
|
||||||
assert( basisGraphs[0].getVerticesCount() == basisGraphs[1].getVerticesCount() );
|
CV_Assert( basisGraphs[0].getVerticesCount() == basisGraphs[1].getVerticesCount() );
|
||||||
|
|
||||||
for (size_t i = 0; i < seeds.size(); i++)
|
for (size_t i = 0; i < seeds.size(); i++)
|
||||||
{
|
{
|
||||||
@ -1087,7 +1087,7 @@ void CirclesGridFinder::findBasis(const std::vector<Point2f> &samples, std::vect
|
|||||||
const int clustersCount = 4;
|
const int clustersCount = 4;
|
||||||
kmeans(Mat(samples).reshape(1, 0), clustersCount, bestLabels, termCriteria, parameters.kmeansAttempts,
|
kmeans(Mat(samples).reshape(1, 0), clustersCount, bestLabels, termCriteria, parameters.kmeansAttempts,
|
||||||
KMEANS_RANDOM_CENTERS, centers);
|
KMEANS_RANDOM_CENTERS, centers);
|
||||||
assert( centers.type() == CV_32FC1 );
|
CV_Assert( centers.type() == CV_32FC1 );
|
||||||
|
|
||||||
std::vector<int> basisIndices;
|
std::vector<int> basisIndices;
|
||||||
//TODO: only remove duplicate
|
//TODO: only remove duplicate
|
||||||
@ -1204,7 +1204,7 @@ void CirclesGridFinder::computeRNG(Graph &rng, std::vector<cv::Point2f> &vectors
|
|||||||
|
|
||||||
void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessorMatrix)
|
void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessorMatrix)
|
||||||
{
|
{
|
||||||
assert( dm.type() == CV_32SC1 );
|
CV_Assert( dm.type() == CV_32SC1 );
|
||||||
predecessorMatrix.create(verticesCount, verticesCount, CV_32SC1);
|
predecessorMatrix.create(verticesCount, verticesCount, CV_32SC1);
|
||||||
predecessorMatrix = -1;
|
predecessorMatrix = -1;
|
||||||
for (int i = 0; i < predecessorMatrix.rows; i++)
|
for (int i = 0; i < predecessorMatrix.rows; i++)
|
||||||
@ -1253,7 +1253,6 @@ size_t CirclesGridFinder::findLongestPath(std::vector<Graph> &basisGraphs, Path
|
|||||||
|
|
||||||
double maxVal;
|
double maxVal;
|
||||||
Point maxLoc;
|
Point maxLoc;
|
||||||
assert (infinity < 0);
|
|
||||||
minMaxLoc(distanceMatrix, 0, &maxVal, 0, &maxLoc);
|
minMaxLoc(distanceMatrix, 0, &maxVal, 0, &maxLoc);
|
||||||
|
|
||||||
if (maxVal > longestPaths[0].length)
|
if (maxVal > longestPaths[0].length)
|
||||||
@ -1594,9 +1593,3 @@ size_t CirclesGridFinder::getFirstCorner(std::vector<Point> &largeCornerIndices,
|
|||||||
|
|
||||||
return cornerIdx;
|
return cornerIdx;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool cv::findCirclesGridDefault( InputArray image, Size patternSize,
|
|
||||||
OutputArray centers, int flags )
|
|
||||||
{
|
|
||||||
return findCirclesGrid(image, patternSize, centers, flags);
|
|
||||||
}
|
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
/************************************************************************************\
|
/************************************************************************************\
|
||||||
Some backward compatibility stuff, to be moved to legacy or compat module
|
Some backward compatibility stuff, to be moved to legacy or compat module
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
CvStereoBMState* cvCreateStereoBMState( int /*preset*/, int numberOfDisparities )
|
CvStereoBMState* cvCreateStereoBMState( int /*preset*/, int numberOfDisparities )
|
||||||
{
|
{
|
||||||
@ -83,10 +84,6 @@ void cvReleaseStereoBMState( CvStereoBMState** state )
|
|||||||
cvFree( state );
|
cvFree( state );
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> void cv::Ptr<CvStereoBMState>::delete_obj()
|
|
||||||
{ cvReleaseStereoBMState(&obj); }
|
|
||||||
|
|
||||||
|
|
||||||
void cvFindStereoCorrespondenceBM( const CvArr* leftarr, const CvArr* rightarr,
|
void cvFindStereoCorrespondenceBM( const CvArr* leftarr, const CvArr* rightarr,
|
||||||
CvArr* disparr, CvStereoBMState* state )
|
CvArr* disparr, CvStereoBMState* state )
|
||||||
{
|
{
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#define epnp_h
|
#define epnp_h
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/core/core_c.h"
|
||||||
|
|
||||||
class epnp {
|
class epnp {
|
||||||
public:
|
public:
|
||||||
|
@ -435,7 +435,7 @@ cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2, double f
|
|||||||
threshold /= focal;
|
threshold /= focal;
|
||||||
|
|
||||||
Mat E;
|
Mat E;
|
||||||
if( method == CV_RANSAC )
|
if( method == RANSAC )
|
||||||
createRANSACPointSetRegistrator(new EMEstimatorCallback, 5, threshold, prob)->run(points1, points2, E, _mask);
|
createRANSACPointSetRegistrator(new EMEstimatorCallback, 5, threshold, prob)->run(points1, points2, E, _mask);
|
||||||
else
|
else
|
||||||
createLMeDSPointSetRegistrator(new EMEstimatorCallback, 5, prob)->run(points1, points2, E, _mask);
|
createLMeDSPointSetRegistrator(new EMEstimatorCallback, 5, prob)->run(points1, points2, E, _mask);
|
||||||
|
@ -181,12 +181,12 @@ public:
|
|||||||
LtL[j][k] += Lx[j]*Lx[k] + Ly[j]*Ly[k];
|
LtL[j][k] += Lx[j]*Lx[k] + Ly[j]*Ly[k];
|
||||||
}
|
}
|
||||||
completeSymm( _LtL );
|
completeSymm( _LtL );
|
||||||
|
|
||||||
eigen( _LtL, matW, matV );
|
eigen( _LtL, matW, matV );
|
||||||
_Htemp = _invHnorm*_H0;
|
_Htemp = _invHnorm*_H0;
|
||||||
_H0 = _Htemp*_Hnorm2;
|
_H0 = _Htemp*_Hnorm2;
|
||||||
_H0.convertTo(_model, _H0.type(), 1./_H0.at<double>(2,2) );
|
_H0.convertTo(_model, _H0.type(), 1./_H0.at<double>(2,2) );
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -292,7 +292,7 @@ cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
|
|||||||
{
|
{
|
||||||
npoints = p.checkVector(3, -1, false);
|
npoints = p.checkVector(3, -1, false);
|
||||||
if( npoints < 0 )
|
if( npoints < 0 )
|
||||||
CV_Error(CV_StsBadArg, "The input arrays should be 2D or 3D point sets");
|
CV_Error(Error::StsBadArg, "The input arrays should be 2D or 3D point sets");
|
||||||
if( npoints == 0 )
|
if( npoints == 0 )
|
||||||
return Mat();
|
return Mat();
|
||||||
convertPointsFromHomogeneous(p, p);
|
convertPointsFromHomogeneous(p, p);
|
||||||
@ -317,7 +317,7 @@ cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
|
|||||||
else if( method == LMEDS )
|
else if( method == LMEDS )
|
||||||
result = createLMeDSPointSetRegistrator(cb, 4, confidence, maxIters)->run(src, dst, H, tempMask);
|
result = createLMeDSPointSetRegistrator(cb, 4, confidence, maxIters)->run(src, dst, H, tempMask);
|
||||||
else
|
else
|
||||||
CV_Error(CV_StsBadArg, "Unknown estimation method");
|
CV_Error(Error::StsBadArg, "Unknown estimation method");
|
||||||
|
|
||||||
if( result && npoints > 4 )
|
if( result && npoints > 4 )
|
||||||
{
|
{
|
||||||
@ -475,7 +475,7 @@ static int run7Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
|
|||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
|
static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
|
||||||
{
|
{
|
||||||
double a[9*9], w[9], v[9*9];
|
double a[9*9], w[9], v[9*9];
|
||||||
@ -585,11 +585,11 @@ static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
|
|||||||
gemm( T2, F0, 1., 0, 0., TF, GEMM_1_T );
|
gemm( T2, F0, 1., 0, 0., TF, GEMM_1_T );
|
||||||
F0 = Mat(3, 3, CV_64F, fmatrix);
|
F0 = Mat(3, 3, CV_64F, fmatrix);
|
||||||
gemm( TF, T1, 1., 0, 0., F0, 0 );
|
gemm( TF, T1, 1., 0, 0., F0, 0 );
|
||||||
|
|
||||||
// make F(3,3) = 1
|
// make F(3,3) = 1
|
||||||
if( fabs(F0.at<double>(2,2)) > FLT_EPSILON )
|
if( fabs(F0.at<double>(2,2)) > FLT_EPSILON )
|
||||||
F0 *= 1./F0.at<double>(2,2);
|
F0 *= 1./F0.at<double>(2,2);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -671,7 +671,7 @@ cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
|
|||||||
{
|
{
|
||||||
npoints = p.checkVector(3, -1, false);
|
npoints = p.checkVector(3, -1, false);
|
||||||
if( npoints < 0 )
|
if( npoints < 0 )
|
||||||
CV_Error(CV_StsBadArg, "The input arrays should be 2D or 3D point sets");
|
CV_Error(Error::StsBadArg, "The input arrays should be 2D or 3D point sets");
|
||||||
if( npoints == 0 )
|
if( npoints == 0 )
|
||||||
return Mat();
|
return Mat();
|
||||||
convertPointsFromHomogeneous(p, p);
|
convertPointsFromHomogeneous(p, p);
|
||||||
@ -739,7 +739,7 @@ void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
|
|||||||
{
|
{
|
||||||
npoints = points.checkVector(3);
|
npoints = points.checkVector(3);
|
||||||
if( npoints < 0 )
|
if( npoints < 0 )
|
||||||
CV_Error( CV_StsBadArg, "The input should be a 2D or 3D point set");
|
CV_Error( Error::StsBadArg, "The input should be a 2D or 3D point set");
|
||||||
Mat temp;
|
Mat temp;
|
||||||
convertPointsFromHomogeneous(points, temp);
|
convertPointsFromHomogeneous(points, temp);
|
||||||
points = temp;
|
points = temp;
|
||||||
@ -893,7 +893,7 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
CV_Error(CV_StsUnsupportedFormat, "");
|
CV_Error(Error::StsUnsupportedFormat, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -974,7 +974,7 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
CV_Error(CV_StsUnsupportedFormat, "");
|
CV_Error(Error::StsUnsupportedFormat, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
//
|
//
|
||||||
//M*/
|
//M*/
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
/* POSIT structure */
|
/* POSIT structure */
|
||||||
struct CvPOSITObject
|
struct CvPOSITObject
|
||||||
|
@ -53,7 +53,7 @@ namespace cv
|
|||||||
int RANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
|
int RANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
|
||||||
{
|
{
|
||||||
if( modelPoints <= 0 )
|
if( modelPoints <= 0 )
|
||||||
CV_Error( CV_StsOutOfRange, "the number of model points should be positive" );
|
CV_Error( Error::StsOutOfRange, "the number of model points should be positive" );
|
||||||
|
|
||||||
p = MAX(p, 0.);
|
p = MAX(p, 0.);
|
||||||
p = MIN(p, 1.);
|
p = MIN(p, 1.);
|
||||||
|
@ -108,7 +108,7 @@ static void findCorner(const std::vector<Point2f>& contour, Point2f point, Point
|
|||||||
min_idx = (int)i;
|
min_idx = (int)i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(min_idx >= 0);
|
CV_Assert(min_idx >= 0);
|
||||||
|
|
||||||
// temporary solution, have to make something more precise
|
// temporary solution, have to make something more precise
|
||||||
corner = contour[min_idx];
|
corner = contour[min_idx];
|
||||||
|
@ -43,6 +43,8 @@
|
|||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "epnp.h"
|
#include "epnp.h"
|
||||||
#include "p3p.h"
|
#include "p3p.h"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
|
|
||||||
@ -57,7 +59,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
|
|||||||
_tvec.create(3, 1, CV_64F);
|
_tvec.create(3, 1, CV_64F);
|
||||||
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
|
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
|
||||||
|
|
||||||
if (flags == CV_EPNP)
|
if (flags == EPNP)
|
||||||
{
|
{
|
||||||
cv::Mat undistortedPoints;
|
cv::Mat undistortedPoints;
|
||||||
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||||
@ -68,7 +70,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
|
|||||||
cv::Rodrigues(R, rvec);
|
cv::Rodrigues(R, rvec);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
else if (flags == CV_P3P)
|
else if (flags == P3P)
|
||||||
{
|
{
|
||||||
CV_Assert( npoints == 4);
|
CV_Assert( npoints == 4);
|
||||||
cv::Mat undistortedPoints;
|
cv::Mat undistortedPoints;
|
||||||
@ -81,7 +83,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
|
|||||||
cv::Rodrigues(R, rvec);
|
cv::Rodrigues(R, rvec);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
else if (flags == CV_ITERATIVE)
|
else if (flags == ITERATIVE)
|
||||||
{
|
{
|
||||||
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
|
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
|
||||||
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
|
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
|
||||||
@ -342,7 +344,7 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
|
|||||||
|
|
||||||
if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
|
if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
|
||||||
{
|
{
|
||||||
if (flags != CV_P3P)
|
if (flags != P3P)
|
||||||
{
|
{
|
||||||
int i, pointsCount = (int)localInliers.size();
|
int i, pointsCount = (int)localInliers.size();
|
||||||
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
|
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
|
||||||
|
@ -84,7 +84,7 @@ struct StereoBMParams
|
|||||||
int disp12MaxDiff;
|
int disp12MaxDiff;
|
||||||
int dispType;
|
int dispType;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static void prefilterNorm( const Mat& src, Mat& dst, int winsize, int ftzero, uchar* buf )
|
static void prefilterNorm( const Mat& src, Mat& dst, int winsize, int ftzero, uchar* buf )
|
||||||
{
|
{
|
||||||
@ -783,46 +783,46 @@ public:
|
|||||||
{
|
{
|
||||||
params = StereoBMParams(_numDisparities, _SADWindowSize);
|
params = StereoBMParams(_numDisparities, _SADWindowSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr )
|
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr )
|
||||||
{
|
{
|
||||||
Mat left0 = leftarr.getMat(), right0 = rightarr.getMat();
|
Mat left0 = leftarr.getMat(), right0 = rightarr.getMat();
|
||||||
int dtype = disparr.fixedType() ? disparr.type() : params.dispType;
|
int dtype = disparr.fixedType() ? disparr.type() : params.dispType;
|
||||||
|
|
||||||
if (left0.size() != right0.size())
|
if (left0.size() != right0.size())
|
||||||
CV_Error( CV_StsUnmatchedSizes, "All the images must have the same size" );
|
CV_Error( Error::StsUnmatchedSizes, "All the images must have the same size" );
|
||||||
|
|
||||||
if (left0.type() != CV_8UC1 || right0.type() != CV_8UC1)
|
if (left0.type() != CV_8UC1 || right0.type() != CV_8UC1)
|
||||||
CV_Error( CV_StsUnsupportedFormat, "Both input images must have CV_8UC1" );
|
CV_Error( Error::StsUnsupportedFormat, "Both input images must have CV_8UC1" );
|
||||||
|
|
||||||
if (dtype != CV_16SC1 && dtype != CV_32FC1)
|
if (dtype != CV_16SC1 && dtype != CV_32FC1)
|
||||||
CV_Error( CV_StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
|
CV_Error( Error::StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
|
||||||
|
|
||||||
disparr.create(left0.size(), dtype);
|
disparr.create(left0.size(), dtype);
|
||||||
Mat disp0 = disparr.getMat();
|
Mat disp0 = disparr.getMat();
|
||||||
|
|
||||||
if( params.preFilterType != PREFILTER_NORMALIZED_RESPONSE &&
|
if( params.preFilterType != PREFILTER_NORMALIZED_RESPONSE &&
|
||||||
params.preFilterType != PREFILTER_XSOBEL )
|
params.preFilterType != PREFILTER_XSOBEL )
|
||||||
CV_Error( CV_StsOutOfRange, "preFilterType must be = CV_STEREO_BM_NORMALIZED_RESPONSE" );
|
CV_Error( Error::StsOutOfRange, "preFilterType must be = CV_STEREO_BM_NORMALIZED_RESPONSE" );
|
||||||
|
|
||||||
if( params.preFilterSize < 5 || params.preFilterSize > 255 || params.preFilterSize % 2 == 0 )
|
if( params.preFilterSize < 5 || params.preFilterSize > 255 || params.preFilterSize % 2 == 0 )
|
||||||
CV_Error( CV_StsOutOfRange, "preFilterSize must be odd and be within 5..255" );
|
CV_Error( Error::StsOutOfRange, "preFilterSize must be odd and be within 5..255" );
|
||||||
|
|
||||||
if( params.preFilterCap < 1 || params.preFilterCap > 63 )
|
if( params.preFilterCap < 1 || params.preFilterCap > 63 )
|
||||||
CV_Error( CV_StsOutOfRange, "preFilterCap must be within 1..63" );
|
CV_Error( Error::StsOutOfRange, "preFilterCap must be within 1..63" );
|
||||||
|
|
||||||
if( params.SADWindowSize < 5 || params.SADWindowSize > 255 || params.SADWindowSize % 2 == 0 ||
|
if( params.SADWindowSize < 5 || params.SADWindowSize > 255 || params.SADWindowSize % 2 == 0 ||
|
||||||
params.SADWindowSize >= std::min(left0.cols, left0.rows) )
|
params.SADWindowSize >= std::min(left0.cols, left0.rows) )
|
||||||
CV_Error( CV_StsOutOfRange, "SADWindowSize must be odd, be within 5..255 and be not larger than image width or height" );
|
CV_Error( Error::StsOutOfRange, "SADWindowSize must be odd, be within 5..255 and be not larger than image width or height" );
|
||||||
|
|
||||||
if( params.numDisparities <= 0 || params.numDisparities % 16 != 0 )
|
if( params.numDisparities <= 0 || params.numDisparities % 16 != 0 )
|
||||||
CV_Error( CV_StsOutOfRange, "numDisparities must be positive and divisble by 16" );
|
CV_Error( Error::StsOutOfRange, "numDisparities must be positive and divisble by 16" );
|
||||||
|
|
||||||
if( params.textureThreshold < 0 )
|
if( params.textureThreshold < 0 )
|
||||||
CV_Error( CV_StsOutOfRange, "texture threshold must be non-negative" );
|
CV_Error( Error::StsOutOfRange, "texture threshold must be non-negative" );
|
||||||
|
|
||||||
if( params.uniquenessRatio < 0 )
|
if( params.uniquenessRatio < 0 )
|
||||||
CV_Error( CV_StsOutOfRange, "uniqueness ratio must be non-negative" );
|
CV_Error( Error::StsOutOfRange, "uniqueness ratio must be non-negative" );
|
||||||
|
|
||||||
preFilteredImg0.create( left0.size(), CV_8U );
|
preFilteredImg0.create( left0.size(), CV_8U );
|
||||||
preFilteredImg1.create( left0.size(), CV_8U );
|
preFilteredImg1.create( left0.size(), CV_8U );
|
||||||
@ -887,15 +887,15 @@ public:
|
|||||||
R2.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
|
R2.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
|
||||||
params.minDisparity, params.numDisparities,
|
params.minDisparity, params.numDisparities,
|
||||||
params.SADWindowSize);
|
params.SADWindowSize);
|
||||||
|
|
||||||
parallel_for_(Range(0, nstripes),
|
parallel_for_(Range(0, nstripes),
|
||||||
FindStereoCorrespInvoker(left, right, disp, ¶ms, nstripes,
|
FindStereoCorrespInvoker(left, right, disp, ¶ms, nstripes,
|
||||||
bufSize0, useShorts, validDisparityRect,
|
bufSize0, useShorts, validDisparityRect,
|
||||||
slidingSumBuf, cost));
|
slidingSumBuf, cost));
|
||||||
|
|
||||||
if( params.speckleRange >= 0 && params.speckleWindowSize > 0 )
|
if( params.speckleRange >= 0 && params.speckleWindowSize > 0 )
|
||||||
filterSpeckles(disp, FILTERED, params.speckleWindowSize, params.speckleRange, slidingSumBuf);
|
filterSpeckles(disp, FILTERED, params.speckleWindowSize, params.speckleRange, slidingSumBuf);
|
||||||
|
|
||||||
if (disp0.data != disp.data)
|
if (disp0.data != disp.data)
|
||||||
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
|
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
|
||||||
}
|
}
|
||||||
@ -963,7 +963,7 @@ public:
|
|||||||
void read(const FileNode& fn)
|
void read(const FileNode& fn)
|
||||||
{
|
{
|
||||||
FileNode n = fn["name"];
|
FileNode n = fn["name"];
|
||||||
CV_Assert( n.isString() && strcmp(n.node->data.str.ptr, name_) == 0 );
|
CV_Assert( n.isString() && String(n) == name_ );
|
||||||
params.minDisparity = (int)fn["minDisparity"];
|
params.minDisparity = (int)fn["minDisparity"];
|
||||||
params.numDisparities = (int)fn["numDisparities"];
|
params.numDisparities = (int)fn["numDisparities"];
|
||||||
params.SADWindowSize = (int)fn["blockSize"];
|
params.SADWindowSize = (int)fn["blockSize"];
|
||||||
|
@ -919,7 +919,7 @@ public:
|
|||||||
void read(const FileNode& fn)
|
void read(const FileNode& fn)
|
||||||
{
|
{
|
||||||
FileNode n = fn["name"];
|
FileNode n = fn["name"];
|
||||||
CV_Assert( n.isString() && strcmp(n.node->data.str.ptr, name_) == 0 );
|
CV_Assert( n.isString() && String(n) == name_ );
|
||||||
params.minDisparity = (int)fn["minDisparity"];
|
params.minDisparity = (int)fn["minDisparity"];
|
||||||
params.numDisparities = (int)fn["numDisparities"];
|
params.numDisparities = (int)fn["numDisparities"];
|
||||||
params.SADWindowSize = (int)fn["blockSize"];
|
params.SADWindowSize = (int)fn["blockSize"];
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
// cvCorrectMatches function is Copyright (C) 2009, Jostein Austvik Jacobsen.
|
// cvCorrectMatches function is Copyright (C) 2009, Jostein Austvik Jacobsen.
|
||||||
// cvTriangulatePoints function is derived from icvReconstructPointsFor3View, originally by Valery Mosyagin.
|
// cvTriangulatePoints function is derived from icvReconstructPointsFor3View, originally by Valery Mosyagin.
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ protected:
|
|||||||
Mat camMat_est = Mat::eye(3, 3, CV_64F), distCoeffs_est = Mat::zeros(1, 5, CV_64F);
|
Mat camMat_est = Mat::eye(3, 3, CV_64F), distCoeffs_est = Mat::zeros(1, 5, CV_64F);
|
||||||
vector<Mat> rvecs_est, tvecs_est;
|
vector<Mat> rvecs_est, tvecs_est;
|
||||||
|
|
||||||
int flags = /*CV_CALIB_FIX_K3|*/CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6; //CALIB_FIX_K3; //CALIB_FIX_ASPECT_RATIO | | CALIB_ZERO_TANGENT_DIST;
|
int flags = /*CALIB_FIX_K3|*/CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6; //CALIB_FIX_K3; //CALIB_FIX_ASPECT_RATIO | | CALIB_ZERO_TANGENT_DIST;
|
||||||
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, DBL_EPSILON);
|
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, DBL_EPSILON);
|
||||||
double rep_error = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs_est, tvecs_est, flags, criteria);
|
double rep_error = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs_est, tvecs_est, flags, criteria);
|
||||||
rep_error /= brdsNum * cornersSize.area();
|
rep_error /= brdsNum * cornersSize.area();
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
#include "test_chessboardgenerator.hpp"
|
#include "test_chessboardgenerator.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
@ -161,15 +161,15 @@ Mat cv::ChessBoardGenerator::generateChessBoard(const Mat& bg, const Mat& camMat
|
|||||||
if (rendererResolutionMultiplier == 1)
|
if (rendererResolutionMultiplier == 1)
|
||||||
{
|
{
|
||||||
result = bg.clone();
|
result = bg.clone();
|
||||||
drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
|
drawContours(result, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
|
||||||
drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
|
drawContours(result, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Mat tmp;
|
Mat tmp;
|
||||||
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
|
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
|
||||||
drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
|
drawContours(tmp, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
|
||||||
drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
|
drawContours(tmp, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
|
||||||
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
|
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,14 +57,14 @@ void show_points( const Mat& gray, const Mat& u, const vector<Point2f>& v, Size
|
|||||||
merge(vector<Mat>(3, gray), rgb);
|
merge(vector<Mat>(3, gray), rgb);
|
||||||
|
|
||||||
for(size_t i = 0; i < v.size(); i++ )
|
for(size_t i = 0; i < v.size(); i++ )
|
||||||
circle( rgb, v[i], 3, CV_RGB(255, 0, 0), CV_FILLED);
|
circle( rgb, v[i], 3, Scalar(255, 0, 0), FILLED);
|
||||||
|
|
||||||
if( !u.empty() )
|
if( !u.empty() )
|
||||||
{
|
{
|
||||||
const Point2f* u_data = u.ptr<Point2f>();
|
const Point2f* u_data = u.ptr<Point2f>();
|
||||||
size_t count = u.cols * u.rows;
|
size_t count = u.cols * u.rows;
|
||||||
for(size_t i = 0; i < count; i++ )
|
for(size_t i = 0; i < count; i++ )
|
||||||
circle( rgb, u_data[i], 3, CV_RGB(0, 255, 0), CV_FILLED);
|
circle( rgb, u_data[i], 3, Scalar(0, 255, 0), FILLED);
|
||||||
}
|
}
|
||||||
if (!v.empty())
|
if (!v.empty())
|
||||||
{
|
{
|
||||||
@ -208,7 +208,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
|
|||||||
}
|
}
|
||||||
|
|
||||||
int progress = 0;
|
int progress = 0;
|
||||||
int max_idx = board_list.node->data.seq->total/2;
|
int max_idx = board_list.size()/2;
|
||||||
double sum_error = 0.0;
|
double sum_error = 0.0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
@ -244,7 +244,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
|
|||||||
switch( pattern )
|
switch( pattern )
|
||||||
{
|
{
|
||||||
case CHESSBOARD:
|
case CHESSBOARD:
|
||||||
result = findChessboardCorners(gray, pattern_size, v, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
|
result = findChessboardCorners(gray, pattern_size, v, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
|
||||||
break;
|
break;
|
||||||
case CIRCLES_GRID:
|
case CIRCLES_GRID:
|
||||||
result = findCirclesGrid(gray, pattern_size, v);
|
result = findCirclesGrid(gray, pattern_size, v);
|
||||||
@ -459,7 +459,7 @@ bool CV_ChessboardDetectorTest::checkByGenerator()
|
|||||||
vector<Point>& cnt = cnts[0];
|
vector<Point>& cnt = cnts[0];
|
||||||
cnt.push_back(cg[ 0]); cnt.push_back(cg[0+2]);
|
cnt.push_back(cg[ 0]); cnt.push_back(cg[0+2]);
|
||||||
cnt.push_back(cg[7+0]); cnt.push_back(cg[7+2]);
|
cnt.push_back(cg[7+0]); cnt.push_back(cg[7+2]);
|
||||||
cv::drawContours(cb, cnts, -1, Scalar::all(128), CV_FILLED);
|
cv::drawContours(cb, cnts, -1, Scalar::all(128), FILLED);
|
||||||
|
|
||||||
found = findChessboardCorners(cb, cbg.cornersSize(), corners_found);
|
found = findChessboardCorners(cb, cbg.cornersSize(), corners_found);
|
||||||
if (found)
|
if (found)
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
#include "test_chessboardgenerator.hpp"
|
#include "test_chessboardgenerator.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
class CV_ChessboardDetectorTimingTest : public cvtest::BaseTest
|
class CV_ChessboardDetectorTimingTest : public cvtest::BaseTest
|
||||||
{
|
{
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
@ -65,7 +65,7 @@
|
|||||||
#define METHODS_COUNT 3
|
#define METHODS_COUNT 3
|
||||||
|
|
||||||
int NORM_TYPE[COUNT_NORM_TYPES] = {cv::NORM_L1, cv::NORM_L2, cv::NORM_INF};
|
int NORM_TYPE[COUNT_NORM_TYPES] = {cv::NORM_L1, cv::NORM_L2, cv::NORM_INF};
|
||||||
int METHOD[METHODS_COUNT] = {0, CV_RANSAC, CV_LMEDS};
|
int METHOD[METHODS_COUNT] = {0, cv::RANSAC, cv::LMEDS};
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
@ -309,7 +309,7 @@ void CV_HomographyTest::run(int)
|
|||||||
switch (method)
|
switch (method)
|
||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
case CV_LMEDS:
|
case LMEDS:
|
||||||
{
|
{
|
||||||
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, method),
|
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, method),
|
||||||
cv::findHomography(src_mat_2f, dst_vec, method),
|
cv::findHomography(src_mat_2f, dst_vec, method),
|
||||||
@ -339,14 +339,14 @@ void CV_HomographyTest::run(int)
|
|||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
case CV_RANSAC:
|
case RANSAC:
|
||||||
{
|
{
|
||||||
cv::Mat mask [4]; double diff;
|
cv::Mat mask [4]; double diff;
|
||||||
|
|
||||||
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, CV_RANSAC, reproj_threshold, mask[0]),
|
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask[0]),
|
||||||
cv::findHomography(src_mat_2f, dst_vec, CV_RANSAC, reproj_threshold, mask[1]),
|
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask[1]),
|
||||||
cv::findHomography(src_vec, dst_mat_2f, CV_RANSAC, reproj_threshold, mask[2]),
|
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask[2]),
|
||||||
cv::findHomography(src_vec, dst_vec, CV_RANSAC, reproj_threshold, mask[3]) };
|
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask[3]) };
|
||||||
|
|
||||||
for (int j = 0; j < 4; ++j)
|
for (int j = 0; j < 4; ++j)
|
||||||
{
|
{
|
||||||
@ -411,7 +411,7 @@ void CV_HomographyTest::run(int)
|
|||||||
switch (method)
|
switch (method)
|
||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
case CV_LMEDS:
|
case LMEDS:
|
||||||
{
|
{
|
||||||
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f),
|
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f),
|
||||||
cv::findHomography(src_mat_2f, dst_vec),
|
cv::findHomography(src_mat_2f, dst_vec),
|
||||||
@ -466,14 +466,14 @@ void CV_HomographyTest::run(int)
|
|||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
case CV_RANSAC:
|
case RANSAC:
|
||||||
{
|
{
|
||||||
cv::Mat mask_res [4];
|
cv::Mat mask_res [4];
|
||||||
|
|
||||||
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, CV_RANSAC, reproj_threshold, mask_res[0]),
|
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask_res[0]),
|
||||||
cv::findHomography(src_mat_2f, dst_vec, CV_RANSAC, reproj_threshold, mask_res[1]),
|
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask_res[1]),
|
||||||
cv::findHomography(src_vec, dst_mat_2f, CV_RANSAC, reproj_threshold, mask_res[2]),
|
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask_res[2]),
|
||||||
cv::findHomography(src_vec, dst_vec, CV_RANSAC, reproj_threshold, mask_res[3]) };
|
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask_res[3]) };
|
||||||
|
|
||||||
for (int j = 0; j < 4; ++j)
|
for (int j = 0; j < 4; ++j)
|
||||||
{
|
{
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
|
||||||
|
@ -54,9 +54,9 @@ class CV_solvePnPRansac_Test : public cvtest::BaseTest
|
|||||||
public:
|
public:
|
||||||
CV_solvePnPRansac_Test()
|
CV_solvePnPRansac_Test()
|
||||||
{
|
{
|
||||||
eps[CV_ITERATIVE] = 1.0e-2;
|
eps[ITERATIVE] = 1.0e-2;
|
||||||
eps[CV_EPNP] = 1.0e-2;
|
eps[EPNP] = 1.0e-2;
|
||||||
eps[CV_P3P] = 1.0e-2;
|
eps[P3P] = 1.0e-2;
|
||||||
totalTestsCount = 10;
|
totalTestsCount = 10;
|
||||||
}
|
}
|
||||||
~CV_solvePnPRansac_Test() {}
|
~CV_solvePnPRansac_Test() {}
|
||||||
@ -193,9 +193,9 @@ class CV_solvePnP_Test : public CV_solvePnPRansac_Test
|
|||||||
public:
|
public:
|
||||||
CV_solvePnP_Test()
|
CV_solvePnP_Test()
|
||||||
{
|
{
|
||||||
eps[CV_ITERATIVE] = 1.0e-6;
|
eps[ITERATIVE] = 1.0e-6;
|
||||||
eps[CV_EPNP] = 1.0e-6;
|
eps[EPNP] = 1.0e-6;
|
||||||
eps[CV_P3P] = 1.0e-4;
|
eps[P3P] = 1.0e-4;
|
||||||
totalTestsCount = 1000;
|
totalTestsCount = 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ void computeTextureBasedMasks( const Mat& _img, Mat* texturelessMask, Mat* textu
|
|||||||
if( !texturelessMask && !texturedMask )
|
if( !texturelessMask && !texturedMask )
|
||||||
return;
|
return;
|
||||||
if( _img.empty() )
|
if( _img.empty() )
|
||||||
CV_Error( CV_StsBadArg, "img is empty" );
|
CV_Error( Error::StsBadArg, "img is empty" );
|
||||||
|
|
||||||
Mat img = _img;
|
Mat img = _img;
|
||||||
if( _img.channels() > 1)
|
if( _img.channels() > 1)
|
||||||
@ -95,21 +95,21 @@ void computeTextureBasedMasks( const Mat& _img, Mat* texturelessMask, Mat* textu
|
|||||||
void checkTypeAndSizeOfDisp( const Mat& dispMap, const Size* sz )
|
void checkTypeAndSizeOfDisp( const Mat& dispMap, const Size* sz )
|
||||||
{
|
{
|
||||||
if( dispMap.empty() )
|
if( dispMap.empty() )
|
||||||
CV_Error( CV_StsBadArg, "dispMap is empty" );
|
CV_Error( Error::StsBadArg, "dispMap is empty" );
|
||||||
if( dispMap.type() != CV_32FC1 )
|
if( dispMap.type() != CV_32FC1 )
|
||||||
CV_Error( CV_StsBadArg, "dispMap must have CV_32FC1 type" );
|
CV_Error( Error::StsBadArg, "dispMap must have CV_32FC1 type" );
|
||||||
if( sz && (dispMap.rows != sz->height || dispMap.cols != sz->width) )
|
if( sz && (dispMap.rows != sz->height || dispMap.cols != sz->width) )
|
||||||
CV_Error( CV_StsBadArg, "dispMap has incorrect size" );
|
CV_Error( Error::StsBadArg, "dispMap has incorrect size" );
|
||||||
}
|
}
|
||||||
|
|
||||||
void checkTypeAndSizeOfMask( const Mat& mask, Size sz )
|
void checkTypeAndSizeOfMask( const Mat& mask, Size sz )
|
||||||
{
|
{
|
||||||
if( mask.empty() )
|
if( mask.empty() )
|
||||||
CV_Error( CV_StsBadArg, "mask is empty" );
|
CV_Error( Error::StsBadArg, "mask is empty" );
|
||||||
if( mask.type() != CV_8UC1 )
|
if( mask.type() != CV_8UC1 )
|
||||||
CV_Error( CV_StsBadArg, "mask must have CV_8UC1 type" );
|
CV_Error( Error::StsBadArg, "mask must have CV_8UC1 type" );
|
||||||
if( mask.rows != sz.height || mask.cols != sz.width )
|
if( mask.rows != sz.height || mask.cols != sz.width )
|
||||||
CV_Error( CV_StsBadArg, "mask has incorrect size" );
|
CV_Error( Error::StsBadArg, "mask has incorrect size" );
|
||||||
}
|
}
|
||||||
|
|
||||||
void checkDispMapsAndUnknDispMasks( const Mat& leftDispMap, const Mat& rightDispMap,
|
void checkDispMapsAndUnknDispMasks( const Mat& leftDispMap, const Mat& rightDispMap,
|
||||||
@ -143,7 +143,7 @@ void checkDispMapsAndUnknDispMasks( const Mat& leftDispMap, const Mat& rightDisp
|
|||||||
minMaxLoc( rightDispMap, &rightMinVal, 0, 0, 0, ~rightUnknDispMask );
|
minMaxLoc( rightDispMap, &rightMinVal, 0, 0, 0, ~rightUnknDispMask );
|
||||||
}
|
}
|
||||||
if( leftMinVal < 0 || rightMinVal < 0)
|
if( leftMinVal < 0 || rightMinVal < 0)
|
||||||
CV_Error( CV_StsBadArg, "known disparity values must be positive" );
|
CV_Error( Error::StsBadArg, "known disparity values must be positive" );
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -163,7 +163,7 @@ void computeOcclusionBasedMasks( const Mat& leftDisp, const Mat& _rightDisp,
|
|||||||
if( _rightDisp.empty() )
|
if( _rightDisp.empty() )
|
||||||
{
|
{
|
||||||
if( !rightUnknDispMask.empty() )
|
if( !rightUnknDispMask.empty() )
|
||||||
CV_Error( CV_StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
|
CV_Error( Error::StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
|
||||||
rightDisp.create(leftDisp.size(), CV_32FC1);
|
rightDisp.create(leftDisp.size(), CV_32FC1);
|
||||||
rightDisp.setTo(Scalar::all(0) );
|
rightDisp.setTo(Scalar::all(0) );
|
||||||
for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
|
for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
|
||||||
@ -230,9 +230,9 @@ void computeDepthDiscontMask( const Mat& disp, Mat& depthDiscontMask, const Mat&
|
|||||||
float dispGap = EVAL_DISP_GAP, int discontWidth = EVAL_DISCONT_WIDTH )
|
float dispGap = EVAL_DISP_GAP, int discontWidth = EVAL_DISCONT_WIDTH )
|
||||||
{
|
{
|
||||||
if( disp.empty() )
|
if( disp.empty() )
|
||||||
CV_Error( CV_StsBadArg, "disp is empty" );
|
CV_Error( Error::StsBadArg, "disp is empty" );
|
||||||
if( disp.type() != CV_32FC1 )
|
if( disp.type() != CV_32FC1 )
|
||||||
CV_Error( CV_StsBadArg, "disp must have CV_32FC1 type" );
|
CV_Error( Error::StsBadArg, "disp must have CV_32FC1 type" );
|
||||||
if( !unknDispMask.empty() )
|
if( !unknDispMask.empty() )
|
||||||
checkTypeAndSizeOfMask( unknDispMask, disp.size() );
|
checkTypeAndSizeOfMask( unknDispMask, disp.size() );
|
||||||
|
|
||||||
@ -571,9 +571,9 @@ int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int ca
|
|||||||
if( isWrite )
|
if( isWrite )
|
||||||
{
|
{
|
||||||
fs << caseNames[caseIdx] << "{";
|
fs << caseNames[caseIdx] << "{";
|
||||||
cvWriteComment( fs.fs, RMS_STR.c_str(), 0 );
|
//cvWriteComment( fs.fs, RMS_STR.c_str(), 0 );
|
||||||
writeErrors( RMS_STR, rmss, &fs );
|
writeErrors( RMS_STR, rmss, &fs );
|
||||||
cvWriteComment( fs.fs, BAD_PXLS_FRACTION_STR.c_str(), 0 );
|
//cvWriteComment( fs.fs, BAD_PXLS_FRACTION_STR.c_str(), 0 );
|
||||||
writeErrors( BAD_PXLS_FRACTION_STR, badPxlsFractions, &fs );
|
writeErrors( BAD_PXLS_FRACTION_STR, badPxlsFractions, &fs );
|
||||||
fs << "}"; // datasetName
|
fs << "}"; // datasetName
|
||||||
}
|
}
|
||||||
|
@ -48,6 +48,8 @@
|
|||||||
#include "opencv2/features2d.hpp"
|
#include "opencv2/features2d.hpp"
|
||||||
#include "opencv2/objdetect.hpp"
|
#include "opencv2/objdetect.hpp"
|
||||||
|
|
||||||
|
#include "opencv2/core/core_c.h"
|
||||||
|
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/calib3d.hpp"
|
#include "opencv2/calib3d.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
|
@ -60,7 +60,7 @@ void CvMeanShiftTracker::newTrackingWindow(Mat image, Rect selection)
|
|||||||
float srange[] = { 0, 1 };
|
float srange[] = { 0, 1 };
|
||||||
const float* ranges[] = {hrange, srange};
|
const float* ranges[] = {hrange, srange};
|
||||||
|
|
||||||
cvtColor(image, hsv, CV_BGR2HSV);
|
cvtColor(image, hsv, COLOR_BGR2HSV);
|
||||||
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
|
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
|
||||||
|
|
||||||
hue.create(hsv.size(), CV_8UC2);
|
hue.create(hsv.size(), CV_8UC2);
|
||||||
@ -83,7 +83,7 @@ RotatedRect CvMeanShiftTracker::updateTrackingWindow(Mat image)
|
|||||||
float srange[] = { 0, 1 };
|
float srange[] = { 0, 1 };
|
||||||
const float* ranges[] = {hrange, srange};
|
const float* ranges[] = {hrange, srange};
|
||||||
|
|
||||||
cvtColor(image, hsv, CV_BGR2HSV);
|
cvtColor(image, hsv, COLOR_BGR2HSV);
|
||||||
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
|
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
|
||||||
hue.create(hsv.size(), CV_8UC2);
|
hue.create(hsv.size(), CV_8UC2);
|
||||||
mixChannels(&hsv, 1, &hue, 1, channels, 2);
|
mixChannels(&hsv, 1, &hue, 1, channels, 2);
|
||||||
|
@ -80,7 +80,7 @@ CvFeatureTracker::~CvFeatureTracker()
|
|||||||
void CvFeatureTracker::newTrackingWindow(Mat image, Rect selection)
|
void CvFeatureTracker::newTrackingWindow(Mat image, Rect selection)
|
||||||
{
|
{
|
||||||
image.copyTo(prev_image);
|
image.copyTo(prev_image);
|
||||||
cvtColor(prev_image, prev_image_bw, CV_BGR2GRAY);
|
cvtColor(prev_image, prev_image_bw, COLOR_BGR2GRAY);
|
||||||
prev_trackwindow = selection;
|
prev_trackwindow = selection;
|
||||||
prev_center.x = selection.x;
|
prev_center.x = selection.x;
|
||||||
prev_center.y = selection.y;
|
prev_center.y = selection.y;
|
||||||
@ -131,7 +131,7 @@ Rect CvFeatureTracker::updateTrackingWindowWithSIFT(Mat image)
|
|||||||
curr_keys.push_back(curr_keypoints[matches[i].trainIdx].pt);
|
curr_keys.push_back(curr_keypoints[matches[i].trainIdx].pt);
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat T = findHomography(prev_keys, curr_keys, CV_LMEDS);
|
Mat T = findHomography(prev_keys, curr_keys, LMEDS);
|
||||||
|
|
||||||
prev_trackwindow.x += cvRound(T.at<double> (0, 2));
|
prev_trackwindow.x += cvRound(T.at<double> (0, 2));
|
||||||
prev_trackwindow.y += cvRound(T.at<double> (1, 2));
|
prev_trackwindow.y += cvRound(T.at<double> (1, 2));
|
||||||
@ -148,12 +148,12 @@ Rect CvFeatureTracker::updateTrackingWindowWithFlow(Mat image)
|
|||||||
ittr++;
|
ittr++;
|
||||||
Size subPixWinSize(10,10), winSize(31,31);
|
Size subPixWinSize(10,10), winSize(31,31);
|
||||||
Mat image_bw;
|
Mat image_bw;
|
||||||
TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
|
TermCriteria termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.03);
|
||||||
std::vector<uchar> status;
|
std::vector<uchar> status;
|
||||||
std::vector<float> err;
|
std::vector<float> err;
|
||||||
|
|
||||||
cvtColor(image, image_bw, CV_BGR2GRAY);
|
cvtColor(image, image_bw, COLOR_BGR2GRAY);
|
||||||
cvtColor(prev_image, prev_image_bw, CV_BGR2GRAY);
|
cvtColor(prev_image, prev_image_bw, COLOR_BGR2GRAY);
|
||||||
|
|
||||||
if (ittr == 1)
|
if (ittr == 1)
|
||||||
{
|
{
|
||||||
|
@ -109,6 +109,52 @@ public:
|
|||||||
CV_EXPORTS void error( const Exception& exc );
|
CV_EXPORTS void error( const Exception& exc );
|
||||||
|
|
||||||
|
|
||||||
|
enum { SORT_EVERY_ROW = 0,
|
||||||
|
SORT_EVERY_COLUMN = 1,
|
||||||
|
SORT_ASCENDING = 0,
|
||||||
|
SORT_DESCENDING = 16
|
||||||
|
};
|
||||||
|
|
||||||
|
enum { COVAR_SCRAMBLED = 0,
|
||||||
|
COVAR_NORMAL = 1,
|
||||||
|
COVAR_USE_AVG = 2,
|
||||||
|
COVAR_SCALE = 4,
|
||||||
|
COVAR_ROWS = 8,
|
||||||
|
COVAR_COLS = 16
|
||||||
|
};
|
||||||
|
|
||||||
|
/*!
|
||||||
|
k-Means flags
|
||||||
|
*/
|
||||||
|
enum { KMEANS_RANDOM_CENTERS = 0, // Chooses random centers for k-Means initialization
|
||||||
|
KMEANS_PP_CENTERS = 2, // Uses k-Means++ algorithm for initialization
|
||||||
|
KMEANS_USE_INITIAL_LABELS = 1 // Uses the user-provided labels for K-Means initialization
|
||||||
|
};
|
||||||
|
|
||||||
|
enum { FILLED = -1,
|
||||||
|
LINE_4 = 4,
|
||||||
|
LINE_8 = 8,
|
||||||
|
LINE_AA = 16
|
||||||
|
};
|
||||||
|
|
||||||
|
enum { FONT_HERSHEY_SIMPLEX = 0,
|
||||||
|
FONT_HERSHEY_PLAIN = 1,
|
||||||
|
FONT_HERSHEY_DUPLEX = 2,
|
||||||
|
FONT_HERSHEY_COMPLEX = 3,
|
||||||
|
FONT_HERSHEY_TRIPLEX = 4,
|
||||||
|
FONT_HERSHEY_COMPLEX_SMALL = 5,
|
||||||
|
FONT_HERSHEY_SCRIPT_SIMPLEX = 6,
|
||||||
|
FONT_HERSHEY_SCRIPT_COMPLEX = 7,
|
||||||
|
FONT_ITALIC = 16
|
||||||
|
};
|
||||||
|
|
||||||
|
enum { REDUCE_SUM = 0,
|
||||||
|
REDUCE_AVG = 1,
|
||||||
|
REDUCE_MAX = 2,
|
||||||
|
REDUCE_MIN = 3
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
//! swaps two matrices
|
//! swaps two matrices
|
||||||
CV_EXPORTS void swap(Mat& a, Mat& b);
|
CV_EXPORTS void swap(Mat& a, Mat& b);
|
||||||
|
|
||||||
@ -371,14 +417,6 @@ CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags = DECOMP_L
|
|||||||
CV_EXPORTS_W bool solve(InputArray src1, InputArray src2,
|
CV_EXPORTS_W bool solve(InputArray src1, InputArray src2,
|
||||||
OutputArray dst, int flags = DECOMP_LU);
|
OutputArray dst, int flags = DECOMP_LU);
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
SORT_EVERY_ROW = 0,
|
|
||||||
SORT_EVERY_COLUMN = 1,
|
|
||||||
SORT_ASCENDING = 0,
|
|
||||||
SORT_DESCENDING = 16
|
|
||||||
};
|
|
||||||
|
|
||||||
//! sorts independently each matrix row or each matrix column
|
//! sorts independently each matrix row or each matrix column
|
||||||
CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags);
|
CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags);
|
||||||
|
|
||||||
@ -395,16 +433,6 @@ CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters
|
|||||||
CV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues,
|
CV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues,
|
||||||
OutputArray eigenvectors = noArray());
|
OutputArray eigenvectors = noArray());
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
COVAR_SCRAMBLED = 0,
|
|
||||||
COVAR_NORMAL = 1,
|
|
||||||
COVAR_USE_AVG = 2,
|
|
||||||
COVAR_SCALE = 4,
|
|
||||||
COVAR_ROWS = 8,
|
|
||||||
COVAR_COLS = 16
|
|
||||||
};
|
|
||||||
|
|
||||||
//! computes covariation matrix of a set of samples
|
//! computes covariation matrix of a set of samples
|
||||||
CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean,
|
CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean,
|
||||||
int flags, int ctype = CV_64F); //TODO: InputArrayOfArrays
|
int flags, int ctype = CV_64F); //TODO: InputArrayOfArrays
|
||||||
@ -454,16 +482,6 @@ CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c,
|
|||||||
//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently
|
//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently
|
||||||
CV_EXPORTS_W int getOptimalDFTSize(int vecsize);
|
CV_EXPORTS_W int getOptimalDFTSize(int vecsize);
|
||||||
|
|
||||||
/*!
|
|
||||||
k-Means flags
|
|
||||||
*/
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
KMEANS_RANDOM_CENTERS = 0, // Chooses random centers for k-Means initialization
|
|
||||||
KMEANS_PP_CENTERS = 2, // Uses k-Means++ algorithm for initialization
|
|
||||||
KMEANS_USE_INITIAL_LABELS = 1 // Uses the user-provided labels for K-Means initialization
|
|
||||||
};
|
|
||||||
|
|
||||||
//! clusters the input data using k-Means algorithm
|
//! clusters the input data using k-Means algorithm
|
||||||
CV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels,
|
CV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels,
|
||||||
TermCriteria criteria, int attempts,
|
TermCriteria criteria, int attempts,
|
||||||
@ -481,12 +499,6 @@ CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev
|
|||||||
//! shuffles the input array elements
|
//! shuffles the input array elements
|
||||||
CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0);
|
CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0);
|
||||||
|
|
||||||
enum { FILLED = -1,
|
|
||||||
LINE_4 = 4,
|
|
||||||
LINE_8 = 8,
|
|
||||||
LINE_AA = 16
|
|
||||||
};
|
|
||||||
|
|
||||||
//! draws the line segment (pt1, pt2) in the image
|
//! draws the line segment (pt1, pt2) in the image
|
||||||
CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
|
CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
|
||||||
int thickness = 1, int lineType = LINE_8, int shift = 0);
|
int thickness = 1, int lineType = LINE_8, int shift = 0);
|
||||||
@ -562,19 +574,6 @@ CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle,
|
|||||||
int arcStart, int arcEnd, int delta,
|
int arcStart, int arcEnd, int delta,
|
||||||
CV_OUT std::vector<Point>& pts );
|
CV_OUT std::vector<Point>& pts );
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
FONT_HERSHEY_SIMPLEX = 0,
|
|
||||||
FONT_HERSHEY_PLAIN = 1,
|
|
||||||
FONT_HERSHEY_DUPLEX = 2,
|
|
||||||
FONT_HERSHEY_COMPLEX = 3,
|
|
||||||
FONT_HERSHEY_TRIPLEX = 4,
|
|
||||||
FONT_HERSHEY_COMPLEX_SMALL = 5,
|
|
||||||
FONT_HERSHEY_SCRIPT_SIMPLEX = 6,
|
|
||||||
FONT_HERSHEY_SCRIPT_COMPLEX = 7,
|
|
||||||
FONT_ITALIC = 16
|
|
||||||
};
|
|
||||||
|
|
||||||
//! renders text string in the image
|
//! renders text string in the image
|
||||||
CV_EXPORTS_W void putText( Mat& img, const String& text, Point org,
|
CV_EXPORTS_W void putText( Mat& img, const String& text, Point org,
|
||||||
int fontFace, double fontScale, Scalar color,
|
int fontFace, double fontScale, Scalar color,
|
||||||
@ -694,7 +693,10 @@ public:
|
|||||||
class CV_EXPORTS SVD
|
class CV_EXPORTS SVD
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { MODIFY_A = 1, NO_UV = 2, FULL_UV = 4 };
|
enum { MODIFY_A = 1,
|
||||||
|
NO_UV = 2,
|
||||||
|
FULL_UV = 4
|
||||||
|
};
|
||||||
|
|
||||||
//! the default constructor
|
//! the default constructor
|
||||||
SVD();
|
SVD();
|
||||||
@ -861,7 +863,9 @@ public:
|
|||||||
class CV_EXPORTS RNG
|
class CV_EXPORTS RNG
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { UNIFORM = 0, NORMAL = 1 };
|
enum { UNIFORM = 0,
|
||||||
|
NORMAL = 1
|
||||||
|
};
|
||||||
|
|
||||||
RNG();
|
RNG();
|
||||||
RNG(uint64 state);
|
RNG(uint64 state);
|
||||||
|
@ -109,7 +109,8 @@ enum {
|
|||||||
GpuNotSupported= -216,
|
GpuNotSupported= -216,
|
||||||
GpuApiCallError= -217,
|
GpuApiCallError= -217,
|
||||||
OpenGlNotSupported= -218,
|
OpenGlNotSupported= -218,
|
||||||
OpenGlApiCallError= -219
|
OpenGlApiCallError= -219,
|
||||||
|
OpenCLApiCallError= -220
|
||||||
};
|
};
|
||||||
} //Error
|
} //Error
|
||||||
|
|
||||||
|
@ -43,10 +43,8 @@
|
|||||||
#ifndef __OPENCV_FEATURES_2D_HPP__
|
#ifndef __OPENCV_FEATURES_2D_HPP__
|
||||||
#define __OPENCV_FEATURES_2D_HPP__
|
#define __OPENCV_FEATURES_2D_HPP__
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
#include "opencv2/core.hpp"
|
#include "opencv2/core.hpp"
|
||||||
#include "opencv2/flann/miniflann.hpp"
|
#include "opencv2/flann/miniflann.hpp"
|
||||||
#include <limits>
|
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
@ -1521,8 +1519,4 @@ protected:
|
|||||||
|
|
||||||
} /* namespace cv */
|
} /* namespace cv */
|
||||||
|
|
||||||
#endif /* __cplusplus */
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* End of file. */
|
|
||||||
|
@ -241,7 +241,7 @@ TWeight GCGraph<TWeight>::maxFlow()
|
|||||||
|
|
||||||
// find the minimum edge weight along the path
|
// find the minimum edge weight along the path
|
||||||
minWeight = edgePtr[e0].weight;
|
minWeight = edgePtr[e0].weight;
|
||||||
assert( minWeight > 0 );
|
CV_Assert( minWeight > 0 );
|
||||||
// k = 1: source tree, k = 0: destination tree
|
// k = 1: source tree, k = 0: destination tree
|
||||||
for( int k = 1; k >= 0; k-- )
|
for( int k = 1; k >= 0; k-- )
|
||||||
{
|
{
|
||||||
@ -251,11 +251,11 @@ TWeight GCGraph<TWeight>::maxFlow()
|
|||||||
break;
|
break;
|
||||||
weight = edgePtr[ei^k].weight;
|
weight = edgePtr[ei^k].weight;
|
||||||
minWeight = MIN(minWeight, weight);
|
minWeight = MIN(minWeight, weight);
|
||||||
assert( minWeight > 0 );
|
CV_Assert( minWeight > 0 );
|
||||||
}
|
}
|
||||||
weight = fabs(v->weight);
|
weight = fabs(v->weight);
|
||||||
minWeight = MIN(minWeight, weight);
|
minWeight = MIN(minWeight, weight);
|
||||||
assert( minWeight > 0 );
|
CV_Assert( minWeight > 0 );
|
||||||
}
|
}
|
||||||
|
|
||||||
// modify weights of the edges along the path and collect orphans
|
// modify weights of the edges along the path and collect orphans
|
||||||
|
@ -188,13 +188,13 @@ public class Calib3dTest extends OpenCVTestCase {
|
|||||||
assertTrue(!corners.empty());
|
assertTrue(!corners.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testFindCirclesGridDefaultMatSizeMat() {
|
public void testFindCirclesGridMatSizeMat() {
|
||||||
int size = 300;
|
int size = 300;
|
||||||
Mat img = new Mat(size, size, CvType.CV_8U);
|
Mat img = new Mat(size, size, CvType.CV_8U);
|
||||||
img.setTo(new Scalar(255));
|
img.setTo(new Scalar(255));
|
||||||
Mat centers = new Mat();
|
Mat centers = new Mat();
|
||||||
|
|
||||||
assertFalse(Calib3d.findCirclesGridDefault(img, new Size(5, 5), centers));
|
assertFalse(Calib3d.findCirclesGrid(img, new Size(5, 5), centers));
|
||||||
|
|
||||||
for (int i = 0; i < 5; i++)
|
for (int i = 0; i < 5; i++)
|
||||||
for (int j = 0; j < 5; j++) {
|
for (int j = 0; j < 5; j++) {
|
||||||
@ -202,20 +202,20 @@ public class Calib3dTest extends OpenCVTestCase {
|
|||||||
Core.circle(img, pt, 10, new Scalar(0), -1);
|
Core.circle(img, pt, 10, new Scalar(0), -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
assertTrue(Calib3d.findCirclesGridDefault(img, new Size(5, 5), centers));
|
assertTrue(Calib3d.findCirclesGrid(img, new Size(5, 5), centers));
|
||||||
|
|
||||||
assertEquals(25, centers.rows());
|
assertEquals(25, centers.rows());
|
||||||
assertEquals(1, centers.cols());
|
assertEquals(1, centers.cols());
|
||||||
assertEquals(CvType.CV_32FC2, centers.type());
|
assertEquals(CvType.CV_32FC2, centers.type());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testFindCirclesGridDefaultMatSizeMatInt() {
|
public void testFindCirclesGridMatSizeMatInt() {
|
||||||
int size = 300;
|
int size = 300;
|
||||||
Mat img = new Mat(size, size, CvType.CV_8U);
|
Mat img = new Mat(size, size, CvType.CV_8U);
|
||||||
img.setTo(new Scalar(255));
|
img.setTo(new Scalar(255));
|
||||||
Mat centers = new Mat();
|
Mat centers = new Mat();
|
||||||
|
|
||||||
assertFalse(Calib3d.findCirclesGridDefault(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
|
assertFalse(Calib3d.findCirclesGrid(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
|
||||||
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
|
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
|
||||||
|
|
||||||
int step = size * 2 / 15;
|
int step = size * 2 / 15;
|
||||||
@ -227,7 +227,7 @@ public class Calib3dTest extends OpenCVTestCase {
|
|||||||
Core.circle(img, pt, 10, new Scalar(0), -1);
|
Core.circle(img, pt, 10, new Scalar(0), -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
assertTrue(Calib3d.findCirclesGridDefault(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
|
assertTrue(Calib3d.findCirclesGrid(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
|
||||||
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
|
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
|
||||||
|
|
||||||
assertEquals(15, centers.rows());
|
assertEquals(15, centers.rows());
|
||||||
|
@ -43,11 +43,11 @@
|
|||||||
#define __OPENCV_LEGACY_HPP__
|
#define __OPENCV_LEGACY_HPP__
|
||||||
|
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
#include "opencv2/features2d.hpp"
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
#include "opencv2/calib3d.hpp"
|
|
||||||
#include "opencv2/ml.hpp"
|
#include "opencv2/ml.hpp"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
#include "opencv2/features2d.hpp"
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/video/tracking_c.h"
|
||||||
|
|
||||||
/*======================= KALMAN FILTER =========================*/
|
/*======================= KALMAN FILTER =========================*/
|
||||||
/* State vector is (x,y,w,h,dx,dy,dw,dh). */
|
/* State vector is (x,y,w,h,dx,dy,dw,dh). */
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
//
|
//
|
||||||
//M*/
|
//M*/
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/video/tracking_c.h"
|
||||||
|
|
||||||
CvCamShiftTracker::CvCamShiftTracker()
|
CvCamShiftTracker::CvCamShiftTracker()
|
||||||
{
|
{
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
CvMat cvMatArray( int rows, int cols, int type,
|
CvMat cvMatArray( int rows, int cols, int type,
|
||||||
int count, void* data)
|
int count, void* data)
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d.hpp"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/calib3d/calib3d_c.h"
|
||||||
|
|
||||||
//#include "cvtypes.h"
|
//#include "cvtypes.h"
|
||||||
#include <float.h>
|
#include <float.h>
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
#include "opencv2/video/tracking.hpp"
|
#include "opencv2/video/tracking.hpp"
|
||||||
|
#include "opencv2/video/tracking_c.h"
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
@ -3,5 +3,4 @@ if(BUILD_ANDROID_PACKAGE)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(the_description "Functionality with possible limitations on the use")
|
set(the_description "Functionality with possible limitations on the use")
|
||||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
|
|
||||||
ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_gpu opencv_ocl)
|
ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_gpu opencv_ocl)
|
||||||
|
@ -45,8 +45,6 @@
|
|||||||
|
|
||||||
#include "opencv2/features2d.hpp"
|
#include "opencv2/features2d.hpp"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -58,9 +56,9 @@ namespace cv
|
|||||||
class CV_EXPORTS_W SIFT : public Feature2D
|
class CV_EXPORTS_W SIFT : public Feature2D
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CV_WRAP explicit SIFT( int nfeatures=0, int nOctaveLayers=3,
|
CV_WRAP explicit SIFT( int nfeatures = 0, int nOctaveLayers = 3,
|
||||||
double contrastThreshold=0.04, double edgeThreshold=10,
|
double contrastThreshold = 0.04, double edgeThreshold = 10,
|
||||||
double sigma=1.6);
|
double sigma = 1.6);
|
||||||
|
|
||||||
//! returns the descriptor size in floats (128)
|
//! returns the descriptor size in floats (128)
|
||||||
CV_WRAP int descriptorSize() const;
|
CV_WRAP int descriptorSize() const;
|
||||||
@ -76,7 +74,7 @@ public:
|
|||||||
void operator()(InputArray img, InputArray mask,
|
void operator()(InputArray img, InputArray mask,
|
||||||
std::vector<KeyPoint>& keypoints,
|
std::vector<KeyPoint>& keypoints,
|
||||||
OutputArray descriptors,
|
OutputArray descriptors,
|
||||||
bool useProvidedKeypoints=false) const;
|
bool useProvidedKeypoints = false) const;
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
@ -86,7 +84,7 @@ public:
|
|||||||
std::vector<KeyPoint>& keypoints ) const;
|
std::vector<KeyPoint>& keypoints ) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask = Mat() ) const;
|
||||||
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
|
|
||||||
CV_PROP_RW int nfeatures;
|
CV_PROP_RW int nfeatures;
|
||||||
@ -111,8 +109,8 @@ public:
|
|||||||
CV_WRAP SURF();
|
CV_WRAP SURF();
|
||||||
//! the full constructor taking all the necessary parameters
|
//! the full constructor taking all the necessary parameters
|
||||||
explicit CV_WRAP SURF(double hessianThreshold,
|
explicit CV_WRAP SURF(double hessianThreshold,
|
||||||
int nOctaves=4, int nOctaveLayers=2,
|
int nOctaves = 4, int nOctaveLayers = 2,
|
||||||
bool extended=true, bool upright=false);
|
bool extended = true, bool upright = false);
|
||||||
|
|
||||||
//! returns the descriptor size in float's (64 or 128)
|
//! returns the descriptor size in float's (64 or 128)
|
||||||
CV_WRAP int descriptorSize() const;
|
CV_WRAP int descriptorSize() const;
|
||||||
@ -127,7 +125,7 @@ public:
|
|||||||
void operator()(InputArray img, InputArray mask,
|
void operator()(InputArray img, InputArray mask,
|
||||||
CV_OUT std::vector<KeyPoint>& keypoints,
|
CV_OUT std::vector<KeyPoint>& keypoints,
|
||||||
OutputArray descriptors,
|
OutputArray descriptors,
|
||||||
bool useProvidedKeypoints=false) const;
|
bool useProvidedKeypoints = false) const;
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
AlgorithmInfo* info() const;
|
||||||
|
|
||||||
@ -139,7 +137,7 @@ public:
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
|
void detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask = Mat() ) const;
|
||||||
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -148,8 +146,4 @@ typedef SURF SurfDescriptorExtractor;
|
|||||||
|
|
||||||
} /* namespace cv */
|
} /* namespace cv */
|
||||||
|
|
||||||
#endif /* __cplusplus */
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* End of file. */
|
|
||||||
|
@ -121,4 +121,4 @@ namespace cv
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif //__OPENCV_NONFREE_OCL_HPP__
|
#endif //__OPENCV_NONFREE_OCL_HPP__
|
||||||
|
@ -182,7 +182,7 @@ public:
|
|||||||
|
|
||||||
if (use_mask)
|
if (use_mask)
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsBadFunc, "Masked SURF detector is not implemented yet");
|
CV_Error(Error::StsBadFunc, "Masked SURF detector is not implemented yet");
|
||||||
//!FIXME
|
//!FIXME
|
||||||
// temp fix for missing min overload
|
// temp fix for missing min overload
|
||||||
//oclMat temp(mask.size(), mask.type());
|
//oclMat temp(mask.size(), mask.type());
|
||||||
|
@ -275,12 +275,16 @@ static Mat readMatFromBin( const string& filename )
|
|||||||
size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
|
size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
|
||||||
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
|
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
|
||||||
|
|
||||||
uchar* data = (uchar*)cvAlloc(dataSize);
|
size_t step = dataSize / rows / CV_ELEM_SIZE(type);
|
||||||
size_t elements_read = fread( (void*)data, 1, dataSize, f );
|
CV_Assert(step >= (size_t)cols);
|
||||||
|
|
||||||
|
Mat m = Mat( rows, step, type).colRange(0, cols);
|
||||||
|
|
||||||
|
size_t elements_read = fread( m.ptr(), 1, dataSize, f );
|
||||||
CV_Assert(elements_read == (size_t)(dataSize));
|
CV_Assert(elements_read == (size_t)(dataSize));
|
||||||
fclose(f);
|
fclose(f);
|
||||||
|
|
||||||
return Mat( rows, cols, type, data );
|
return m;
|
||||||
}
|
}
|
||||||
return Mat();
|
return Mat();
|
||||||
}
|
}
|
||||||
@ -402,7 +406,7 @@ protected:
|
|||||||
double t = (double)getTickCount();
|
double t = (double)getTickCount();
|
||||||
dextractor->compute( img, keypoints, calcDescriptors );
|
dextractor->compute( img, keypoints, calcDescriptors );
|
||||||
t = getTickCount() - t;
|
t = getTickCount() - t;
|
||||||
ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)cvGetTickFrequency()*1000.)/calcDescriptors.rows );
|
ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)getTickFrequency()*1000.)/calcDescriptors.rows );
|
||||||
|
|
||||||
if( calcDescriptors.rows != (int)keypoints.size() )
|
if( calcDescriptors.rows != (int)keypoints.size() )
|
||||||
{
|
{
|
||||||
|
@ -7,11 +7,12 @@
|
|||||||
// copy or use the software.
|
// copy or use the software.
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
// License Agreement
|
// License Agreement
|
||||||
// For Open Source Computer Vision Library
|
// For Open Source Computer Vision Library
|
||||||
//
|
//
|
||||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||||
// Third party copyrights are property of their respective owners.
|
// Third party copyrights are property of their respective owners.
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without modification,
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
@ -43,248 +44,10 @@
|
|||||||
#ifndef __OPENCV_OBJDETECT_HPP__
|
#ifndef __OPENCV_OBJDETECT_HPP__
|
||||||
#define __OPENCV_OBJDETECT_HPP__
|
#define __OPENCV_OBJDETECT_HPP__
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#include "opencv2/core.hpp"
|
||||||
# include "opencv2/core.hpp"
|
|
||||||
#endif
|
|
||||||
#include "opencv2/core/core_c.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
typedef struct CvLatentSvmDetector CvLatentSvmDetector;
|
||||||
#include <map>
|
typedef struct CvHaarClassifierCascade CvHaarClassifierCascade;
|
||||||
#include <deque>
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* Haar-like Object Detection functions *
|
|
||||||
\****************************************************************************************/
|
|
||||||
|
|
||||||
#define CV_HAAR_MAGIC_VAL 0x42500000
|
|
||||||
#define CV_TYPE_NAME_HAAR "opencv-haar-classifier"
|
|
||||||
|
|
||||||
#define CV_IS_HAAR_CLASSIFIER( haar ) \
|
|
||||||
((haar) != NULL && \
|
|
||||||
(((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL)
|
|
||||||
|
|
||||||
#define CV_HAAR_FEATURE_MAX 3
|
|
||||||
|
|
||||||
typedef struct CvHaarFeature
|
|
||||||
{
|
|
||||||
int tilted;
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
CvRect r;
|
|
||||||
float weight;
|
|
||||||
} rect[CV_HAAR_FEATURE_MAX];
|
|
||||||
} CvHaarFeature;
|
|
||||||
|
|
||||||
typedef struct CvHaarClassifier
|
|
||||||
{
|
|
||||||
int count;
|
|
||||||
CvHaarFeature* haar_feature;
|
|
||||||
float* threshold;
|
|
||||||
int* left;
|
|
||||||
int* right;
|
|
||||||
float* alpha;
|
|
||||||
} CvHaarClassifier;
|
|
||||||
|
|
||||||
typedef struct CvHaarStageClassifier
|
|
||||||
{
|
|
||||||
int count;
|
|
||||||
float threshold;
|
|
||||||
CvHaarClassifier* classifier;
|
|
||||||
|
|
||||||
int next;
|
|
||||||
int child;
|
|
||||||
int parent;
|
|
||||||
} CvHaarStageClassifier;
|
|
||||||
|
|
||||||
typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade;
|
|
||||||
|
|
||||||
typedef struct CvHaarClassifierCascade
|
|
||||||
{
|
|
||||||
int flags;
|
|
||||||
int count;
|
|
||||||
CvSize orig_window_size;
|
|
||||||
CvSize real_window_size;
|
|
||||||
double scale;
|
|
||||||
CvHaarStageClassifier* stage_classifier;
|
|
||||||
CvHidHaarClassifierCascade* hid_cascade;
|
|
||||||
} CvHaarClassifierCascade;
|
|
||||||
|
|
||||||
typedef struct CvAvgComp
|
|
||||||
{
|
|
||||||
CvRect rect;
|
|
||||||
int neighbors;
|
|
||||||
} CvAvgComp;
|
|
||||||
|
|
||||||
/* Loads haar classifier cascade from a directory.
|
|
||||||
It is obsolete: convert your cascade to xml and use cvLoad instead */
|
|
||||||
CVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade(
|
|
||||||
const char* directory, CvSize orig_window_size);
|
|
||||||
|
|
||||||
CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade );
|
|
||||||
|
|
||||||
#define CV_HAAR_DO_CANNY_PRUNING 1
|
|
||||||
#define CV_HAAR_SCALE_IMAGE 2
|
|
||||||
#define CV_HAAR_FIND_BIGGEST_OBJECT 4
|
|
||||||
#define CV_HAAR_DO_ROUGH_SEARCH 8
|
|
||||||
|
|
||||||
//CVAPI(CvSeq*) cvHaarDetectObjectsForROC( const CvArr* image,
|
|
||||||
// CvHaarClassifierCascade* cascade, CvMemStorage* storage,
|
|
||||||
// CvSeq** rejectLevels, CvSeq** levelWeightds,
|
|
||||||
// double scale_factor CV_DEFAULT(1.1),
|
|
||||||
// int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
|
|
||||||
// CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)),
|
|
||||||
// bool outputRejectLevels = false );
|
|
||||||
|
|
||||||
|
|
||||||
CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image,
|
|
||||||
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
|
|
||||||
double scale_factor CV_DEFAULT(1.1),
|
|
||||||
int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
|
|
||||||
CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)));
|
|
||||||
|
|
||||||
/* sets images for haar classifier cascade */
|
|
||||||
CVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade,
|
|
||||||
const CvArr* sum, const CvArr* sqsum,
|
|
||||||
const CvArr* tilted_sum, double scale );
|
|
||||||
|
|
||||||
/* runs the cascade on the specified window */
|
|
||||||
CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade,
|
|
||||||
CvPoint pt, int start_stage CV_DEFAULT(0));
|
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* Latent SVM Object Detection functions *
|
|
||||||
\****************************************************************************************/
|
|
||||||
|
|
||||||
// DataType: STRUCT position
|
|
||||||
// Structure describes the position of the filter in the feature pyramid
|
|
||||||
// l - level in the feature pyramid
|
|
||||||
// (x, y) - coordinate in level l
|
|
||||||
typedef struct CvLSVMFilterPosition
|
|
||||||
{
|
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
int l;
|
|
||||||
} CvLSVMFilterPosition;
|
|
||||||
|
|
||||||
// DataType: STRUCT filterObject
|
|
||||||
// Description of the filter, which corresponds to the part of the object
|
|
||||||
// V - ideal (penalty = 0) position of the partial filter
|
|
||||||
// from the root filter position (V_i in the paper)
|
|
||||||
// penaltyFunction - vector describes penalty function (d_i in the paper)
|
|
||||||
// pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2
|
|
||||||
// FILTER DESCRIPTION
|
|
||||||
// Rectangular map (sizeX x sizeY),
|
|
||||||
// every cell stores feature vector (dimension = p)
|
|
||||||
// H - matrix of feature vectors
|
|
||||||
// to set and get feature vectors (i,j)
|
|
||||||
// used formula H[(j * sizeX + i) * p + k], where
|
|
||||||
// k - component of feature vector in cell (i, j)
|
|
||||||
// END OF FILTER DESCRIPTION
|
|
||||||
typedef struct CvLSVMFilterObject{
|
|
||||||
CvLSVMFilterPosition V;
|
|
||||||
float fineFunction[4];
|
|
||||||
int sizeX;
|
|
||||||
int sizeY;
|
|
||||||
int numFeatures;
|
|
||||||
float *H;
|
|
||||||
} CvLSVMFilterObject;
|
|
||||||
|
|
||||||
// data type: STRUCT CvLatentSvmDetector
|
|
||||||
// structure contains internal representation of trained Latent SVM detector
|
|
||||||
// num_filters - total number of filters (root plus part) in model
|
|
||||||
// num_components - number of components in model
|
|
||||||
// num_part_filters - array containing number of part filters for each component
|
|
||||||
// filters - root and part filters for all model components
|
|
||||||
// b - biases for all model components
|
|
||||||
// score_threshold - confidence level threshold
|
|
||||||
typedef struct CvLatentSvmDetector
|
|
||||||
{
|
|
||||||
int num_filters;
|
|
||||||
int num_components;
|
|
||||||
int* num_part_filters;
|
|
||||||
CvLSVMFilterObject** filters;
|
|
||||||
float* b;
|
|
||||||
float score_threshold;
|
|
||||||
}
|
|
||||||
CvLatentSvmDetector;
|
|
||||||
|
|
||||||
// data type: STRUCT CvObjectDetection
|
|
||||||
// structure contains the bounding box and confidence level for detected object
|
|
||||||
// rect - bounding box for a detected object
|
|
||||||
// score - confidence level
|
|
||||||
typedef struct CvObjectDetection
|
|
||||||
{
|
|
||||||
CvRect rect;
|
|
||||||
float score;
|
|
||||||
} CvObjectDetection;
|
|
||||||
|
|
||||||
//////////////// Object Detection using Latent SVM //////////////
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
// load trained detector from a file
|
|
||||||
//
|
|
||||||
// API
|
|
||||||
// CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename);
|
|
||||||
// INPUT
|
|
||||||
// filename - path to the file containing the parameters of
|
|
||||||
- trained Latent SVM detector
|
|
||||||
// OUTPUT
|
|
||||||
// trained Latent SVM detector in internal representation
|
|
||||||
*/
|
|
||||||
CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename);
|
|
||||||
|
|
||||||
/*
|
|
||||||
// release memory allocated for CvLatentSvmDetector structure
|
|
||||||
//
|
|
||||||
// API
|
|
||||||
// void cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector);
|
|
||||||
// INPUT
|
|
||||||
// detector - CvLatentSvmDetector structure to be released
|
|
||||||
// OUTPUT
|
|
||||||
*/
|
|
||||||
CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector);
|
|
||||||
|
|
||||||
/*
|
|
||||||
// find rectangular regions in the given image that are likely
|
|
||||||
// to contain objects and corresponding confidence levels
|
|
||||||
//
|
|
||||||
// API
|
|
||||||
// CvSeq* cvLatentSvmDetectObjects(const IplImage* image,
|
|
||||||
// CvLatentSvmDetector* detector,
|
|
||||||
// CvMemStorage* storage,
|
|
||||||
// float overlap_threshold = 0.5f,
|
|
||||||
// int numThreads = -1);
|
|
||||||
// INPUT
|
|
||||||
// image - image to detect objects in
|
|
||||||
// detector - Latent SVM detector in internal representation
|
|
||||||
// storage - memory storage to store the resultant sequence
|
|
||||||
// of the object candidate rectangles
|
|
||||||
// overlap_threshold - threshold for the non-maximum suppression algorithm
|
|
||||||
= 0.5f [here will be the reference to original paper]
|
|
||||||
// OUTPUT
|
|
||||||
// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures)
|
|
||||||
*/
|
|
||||||
CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image,
|
|
||||||
CvLatentSvmDetector* detector,
|
|
||||||
CvMemStorage* storage,
|
|
||||||
float overlap_threshold CV_DEFAULT(0.5f),
|
|
||||||
int numThreads CV_DEFAULT(-1));
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
|
|
||||||
CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image,
|
|
||||||
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
|
|
||||||
std::vector<int>& rejectLevels, std::vector<double>& levelWeightds,
|
|
||||||
double scale_factor CV_DEFAULT(1.1),
|
|
||||||
int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
|
|
||||||
CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)),
|
|
||||||
bool outputRejectLevels = false );
|
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
@ -303,24 +66,24 @@ public:
|
|||||||
struct CV_EXPORTS ObjectDetection
|
struct CV_EXPORTS ObjectDetection
|
||||||
{
|
{
|
||||||
ObjectDetection();
|
ObjectDetection();
|
||||||
ObjectDetection( const Rect& rect, float score, int classID=-1 );
|
ObjectDetection( const Rect& rect, float score, int classID = -1 );
|
||||||
Rect rect;
|
Rect rect;
|
||||||
float score;
|
float score;
|
||||||
int classID;
|
int classID;
|
||||||
};
|
};
|
||||||
|
|
||||||
LatentSvmDetector();
|
LatentSvmDetector();
|
||||||
LatentSvmDetector( const std::vector<String>& filenames, const std::vector<String>& classNames=std::vector<String>() );
|
LatentSvmDetector( const std::vector<String>& filenames, const std::vector<String>& classNames = std::vector<String>() );
|
||||||
virtual ~LatentSvmDetector();
|
virtual ~LatentSvmDetector();
|
||||||
|
|
||||||
virtual void clear();
|
virtual void clear();
|
||||||
virtual bool empty() const;
|
virtual bool empty() const;
|
||||||
bool load( const std::vector<String>& filenames, const std::vector<String>& classNames=std::vector<String>() );
|
bool load( const std::vector<String>& filenames, const std::vector<String>& classNames = std::vector<String>() );
|
||||||
|
|
||||||
virtual void detect( const Mat& image,
|
virtual void detect( const Mat& image,
|
||||||
std::vector<ObjectDetection>& objectDetections,
|
std::vector<ObjectDetection>& objectDetections,
|
||||||
float overlapThreshold=0.5f,
|
float overlapThreshold = 0.5f,
|
||||||
int numThreads=-1 );
|
int numThreads = -1 );
|
||||||
|
|
||||||
const std::vector<String>& getClassNames() const;
|
const std::vector<String>& getClassNames() const;
|
||||||
size_t getClassCount() const;
|
size_t getClassCount() const;
|
||||||
@ -330,19 +93,22 @@ private:
|
|||||||
std::vector<String> classNames;
|
std::vector<String> classNames;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_EXPORTS void groupRectangles(CV_OUT CV_IN_OUT std::vector<Rect>& rectList, int groupThreshold, double eps=0.2);
|
CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps = 0.2);
|
||||||
CV_EXPORTS_W void groupRectangles(CV_OUT CV_IN_OUT std::vector<Rect>& rectList, CV_OUT std::vector<int>& weights, int groupThreshold, double eps=0.2);
|
CV_EXPORTS_W void groupRectangles(CV_IN_OUT std::vector<Rect>& rectList, CV_OUT std::vector<int>& weights, int groupThreshold, double eps = 0.2);
|
||||||
CV_EXPORTS void groupRectangles( std::vector<Rect>& rectList, int groupThreshold, double eps, std::vector<int>* weights, std::vector<double>* levelWeights );
|
CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps, std::vector<int>* weights, std::vector<double>* levelWeights );
|
||||||
CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, std::vector<int>& rejectLevels,
|
CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, std::vector<int>& rejectLevels,
|
||||||
std::vector<double>& levelWeights, int groupThreshold, double eps=0.2);
|
std::vector<double>& levelWeights, int groupThreshold, double eps = 0.2);
|
||||||
CV_EXPORTS void groupRectangles_meanshift(std::vector<Rect>& rectList, std::vector<double>& foundWeights, std::vector<double>& foundScales,
|
CV_EXPORTS void groupRectangles_meanshift(std::vector<Rect>& rectList, std::vector<double>& foundWeights, std::vector<double>& foundScales,
|
||||||
double detectThreshold = 0.0, Size winDetSize = Size(64, 128));
|
double detectThreshold = 0.0, Size winDetSize = Size(64, 128));
|
||||||
|
|
||||||
|
|
||||||
class CV_EXPORTS FeatureEvaluator
|
class CV_EXPORTS FeatureEvaluator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { HAAR = 0, LBP = 1, HOG = 2 };
|
enum { HAAR = 0,
|
||||||
|
LBP = 1,
|
||||||
|
HOG = 2
|
||||||
|
};
|
||||||
|
|
||||||
virtual ~FeatureEvaluator();
|
virtual ~FeatureEvaluator();
|
||||||
|
|
||||||
virtual bool read(const FileNode& node);
|
virtual bool read(const FileNode& node);
|
||||||
@ -360,13 +126,11 @@ public:
|
|||||||
|
|
||||||
template<> CV_EXPORTS void Ptr<CvHaarClassifierCascade>::delete_obj();
|
template<> CV_EXPORTS void Ptr<CvHaarClassifierCascade>::delete_obj();
|
||||||
|
|
||||||
enum
|
enum { CASCADE_DO_CANNY_PRUNING = 1,
|
||||||
{
|
CASCADE_SCALE_IMAGE = 2,
|
||||||
CASCADE_DO_CANNY_PRUNING=1,
|
CASCADE_FIND_BIGGEST_OBJECT = 4,
|
||||||
CASCADE_SCALE_IMAGE=2,
|
CASCADE_DO_ROUGH_SEARCH = 8
|
||||||
CASCADE_FIND_BIGGEST_OBJECT=4,
|
};
|
||||||
CASCADE_DO_ROUGH_SEARCH=8
|
|
||||||
};
|
|
||||||
|
|
||||||
class CV_EXPORTS_W CascadeClassifier
|
class CV_EXPORTS_W CascadeClassifier
|
||||||
{
|
{
|
||||||
@ -380,20 +144,20 @@ public:
|
|||||||
virtual bool read( const FileNode& node );
|
virtual bool read( const FileNode& node );
|
||||||
CV_WRAP virtual void detectMultiScale( const Mat& image,
|
CV_WRAP virtual void detectMultiScale( const Mat& image,
|
||||||
CV_OUT std::vector<Rect>& objects,
|
CV_OUT std::vector<Rect>& objects,
|
||||||
double scaleFactor=1.1,
|
double scaleFactor = 1.1,
|
||||||
int minNeighbors=3, int flags=0,
|
int minNeighbors = 3, int flags = 0,
|
||||||
Size minSize=Size(),
|
Size minSize = Size(),
|
||||||
Size maxSize=Size() );
|
Size maxSize = Size() );
|
||||||
|
|
||||||
CV_WRAP virtual void detectMultiScale( const Mat& image,
|
CV_WRAP virtual void detectMultiScale( const Mat& image,
|
||||||
CV_OUT std::vector<Rect>& objects,
|
CV_OUT std::vector<Rect>& objects,
|
||||||
CV_OUT std::vector<int>& rejectLevels,
|
CV_OUT std::vector<int>& rejectLevels,
|
||||||
CV_OUT std::vector<double>& levelWeights,
|
CV_OUT std::vector<double>& levelWeights,
|
||||||
double scaleFactor=1.1,
|
double scaleFactor = 1.1,
|
||||||
int minNeighbors=3, int flags=0,
|
int minNeighbors = 3, int flags = 0,
|
||||||
Size minSize=Size(),
|
Size minSize = Size(),
|
||||||
Size maxSize=Size(),
|
Size maxSize = Size(),
|
||||||
bool outputRejectLevels=false );
|
bool outputRejectLevels = false );
|
||||||
|
|
||||||
|
|
||||||
bool isOldFormatCascade() const;
|
bool isOldFormatCascade() const;
|
||||||
@ -402,17 +166,18 @@ public:
|
|||||||
bool setImage( const Mat& );
|
bool setImage( const Mat& );
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
//virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
|
|
||||||
// int stripSize, int yStep, double factor, std::vector<Rect>& candidates );
|
|
||||||
|
|
||||||
virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
|
virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
|
||||||
int stripSize, int yStep, double factor, std::vector<Rect>& candidates,
|
int stripSize, int yStep, double factor, std::vector<Rect>& candidates,
|
||||||
std::vector<int>& rejectLevels, std::vector<double>& levelWeights, bool outputRejectLevels=false);
|
std::vector<int>& rejectLevels, std::vector<double>& levelWeights, bool outputRejectLevels = false);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
enum { BOOST = 0 };
|
enum { BOOST = 0
|
||||||
enum { DO_CANNY_PRUNING = 1, SCALE_IMAGE = 2,
|
};
|
||||||
FIND_BIGGEST_OBJECT = 4, DO_ROUGH_SEARCH = 8 };
|
enum { DO_CANNY_PRUNING = CASCADE_DO_CANNY_PRUNING,
|
||||||
|
SCALE_IMAGE = CASCADE_SCALE_IMAGE,
|
||||||
|
FIND_BIGGEST_OBJECT = CASCADE_FIND_BIGGEST_OBJECT,
|
||||||
|
DO_ROUGH_SEARCH = CASCADE_DO_ROUGH_SEARCH
|
||||||
|
};
|
||||||
|
|
||||||
friend class CascadeClassifierInvoker;
|
friend class CascadeClassifierInvoker;
|
||||||
|
|
||||||
@ -507,8 +272,10 @@ struct DetectionROI
|
|||||||
struct CV_EXPORTS_W HOGDescriptor
|
struct CV_EXPORTS_W HOGDescriptor
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { L2Hys=0 };
|
enum { L2Hys = 0
|
||||||
enum { DEFAULT_NLEVELS=64 };
|
};
|
||||||
|
enum { DEFAULT_NLEVELS = 64
|
||||||
|
};
|
||||||
|
|
||||||
CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),
|
CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),
|
||||||
cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),
|
cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),
|
||||||
@ -548,38 +315,38 @@ public:
|
|||||||
virtual bool read(FileNode& fn);
|
virtual bool read(FileNode& fn);
|
||||||
virtual void write(FileStorage& fs, const String& objname) const;
|
virtual void write(FileStorage& fs, const String& objname) const;
|
||||||
|
|
||||||
CV_WRAP virtual bool load(const String& filename, const String& objname=String());
|
CV_WRAP virtual bool load(const String& filename, const String& objname = String());
|
||||||
CV_WRAP virtual void save(const String& filename, const String& objname=String()) const;
|
CV_WRAP virtual void save(const String& filename, const String& objname = String()) const;
|
||||||
virtual void copyTo(HOGDescriptor& c) const;
|
virtual void copyTo(HOGDescriptor& c) const;
|
||||||
|
|
||||||
CV_WRAP virtual void compute(const Mat& img,
|
CV_WRAP virtual void compute(const Mat& img,
|
||||||
CV_OUT std::vector<float>& descriptors,
|
CV_OUT std::vector<float>& descriptors,
|
||||||
Size winStride=Size(), Size padding=Size(),
|
Size winStride = Size(), Size padding = Size(),
|
||||||
const std::vector<Point>& locations=std::vector<Point>()) const;
|
const std::vector<Point>& locations = std::vector<Point>()) const;
|
||||||
//with found weights output
|
//with found weights output
|
||||||
CV_WRAP virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,
|
CV_WRAP virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,
|
||||||
CV_OUT std::vector<double>& weights,
|
CV_OUT std::vector<double>& weights,
|
||||||
double hitThreshold=0, Size winStride=Size(),
|
double hitThreshold = 0, Size winStride = Size(),
|
||||||
Size padding=Size(),
|
Size padding = Size(),
|
||||||
const std::vector<Point>& searchLocations=std::vector<Point>()) const;
|
const std::vector<Point>& searchLocations = std::vector<Point>()) const;
|
||||||
//without found weights output
|
//without found weights output
|
||||||
virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,
|
virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,
|
||||||
double hitThreshold=0, Size winStride=Size(),
|
double hitThreshold = 0, Size winStride = Size(),
|
||||||
Size padding=Size(),
|
Size padding = Size(),
|
||||||
const std::vector<Point>& searchLocations=std::vector<Point>()) const;
|
const std::vector<Point>& searchLocations=std::vector<Point>()) const;
|
||||||
//with result weights output
|
//with result weights output
|
||||||
CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT std::vector<Rect>& foundLocations,
|
CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT std::vector<Rect>& foundLocations,
|
||||||
CV_OUT std::vector<double>& foundWeights, double hitThreshold=0,
|
CV_OUT std::vector<double>& foundWeights, double hitThreshold = 0,
|
||||||
Size winStride=Size(), Size padding=Size(), double scale=1.05,
|
Size winStride = Size(), Size padding = Size(), double scale = 1.05,
|
||||||
double finalThreshold=2.0,bool useMeanshiftGrouping = false) const;
|
double finalThreshold = 2.0,bool useMeanshiftGrouping = false) const;
|
||||||
//without found weights output
|
//without found weights output
|
||||||
virtual void detectMultiScale(const Mat& img, CV_OUT std::vector<Rect>& foundLocations,
|
virtual void detectMultiScale(const Mat& img, CV_OUT std::vector<Rect>& foundLocations,
|
||||||
double hitThreshold=0, Size winStride=Size(),
|
double hitThreshold = 0, Size winStride = Size(),
|
||||||
Size padding=Size(), double scale=1.05,
|
Size padding = Size(), double scale = 1.05,
|
||||||
double finalThreshold=2.0, bool useMeanshiftGrouping = false) const;
|
double finalThreshold = 2.0, bool useMeanshiftGrouping = false) const;
|
||||||
|
|
||||||
CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,
|
CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,
|
||||||
Size paddingTL=Size(), Size paddingBR=Size()) const;
|
Size paddingTL = Size(), Size paddingBR = Size()) const;
|
||||||
|
|
||||||
CV_WRAP static std::vector<float> getDefaultPeopleDetector();
|
CV_WRAP static std::vector<float> getDefaultPeopleDetector();
|
||||||
CV_WRAP static std::vector<float> getDaimlerPeopleDetector();
|
CV_WRAP static std::vector<float> getDaimlerPeopleDetector();
|
||||||
@ -618,430 +385,14 @@ public:
|
|||||||
|
|
||||||
CV_EXPORTS_W void findDataMatrix(InputArray image,
|
CV_EXPORTS_W void findDataMatrix(InputArray image,
|
||||||
CV_OUT std::vector<String>& codes,
|
CV_OUT std::vector<String>& codes,
|
||||||
OutputArray corners=noArray(),
|
OutputArray corners = noArray(),
|
||||||
OutputArrayOfArrays dmtx=noArray());
|
OutputArrayOfArrays dmtx = noArray());
|
||||||
|
|
||||||
CV_EXPORTS_W void drawDataMatrixCodes(InputOutputArray image,
|
CV_EXPORTS_W void drawDataMatrixCodes(InputOutputArray image,
|
||||||
const std::vector<String>& codes,
|
const std::vector<String>& codes,
|
||||||
InputArray corners);
|
InputArray corners);
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************************\
|
#include "opencv2/objdetect/linemod.hpp"
|
||||||
* Datamatrix *
|
|
||||||
\****************************************************************************************/
|
|
||||||
|
|
||||||
struct CV_EXPORTS CvDataMatrixCode {
|
|
||||||
char msg[4];
|
|
||||||
CvMat *original;
|
|
||||||
CvMat *corners;
|
|
||||||
};
|
|
||||||
|
|
||||||
CV_EXPORTS std::deque<CvDataMatrixCode> cvFindDataMatrix(CvMat *im);
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* LINE-MOD *
|
|
||||||
\****************************************************************************************/
|
|
||||||
|
|
||||||
namespace cv {
|
|
||||||
namespace linemod {
|
|
||||||
|
|
||||||
/// @todo Convert doxy comments to rst
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Discriminant feature described by its location and label.
|
|
||||||
*/
|
|
||||||
struct CV_EXPORTS Feature
|
|
||||||
{
|
|
||||||
int x; ///< x offset
|
|
||||||
int y; ///< y offset
|
|
||||||
int label; ///< Quantization
|
|
||||||
|
|
||||||
Feature() : x(0), y(0), label(0) {}
|
|
||||||
Feature(int x, int y, int label);
|
|
||||||
|
|
||||||
void read(const FileNode& fn);
|
|
||||||
void write(FileStorage& fs) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {}
|
|
||||||
|
|
||||||
struct CV_EXPORTS Template
|
|
||||||
{
|
|
||||||
int width;
|
|
||||||
int height;
|
|
||||||
int pyramid_level;
|
|
||||||
std::vector<Feature> features;
|
|
||||||
|
|
||||||
void read(const FileNode& fn);
|
|
||||||
void write(FileStorage& fs) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Represents a modality operating over an image pyramid.
|
|
||||||
*/
|
|
||||||
class QuantizedPyramid
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
// Virtual destructor
|
|
||||||
virtual ~QuantizedPyramid() {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Compute quantized image at current pyramid level for online detection.
|
|
||||||
*
|
|
||||||
* \param[out] dst The destination 8-bit image. For each pixel at most one bit is set,
|
|
||||||
* representing its classification.
|
|
||||||
*/
|
|
||||||
virtual void quantize(Mat& dst) const =0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Extract most discriminant features at current pyramid level to form a new template.
|
|
||||||
*
|
|
||||||
* \param[out] templ The new template.
|
|
||||||
*/
|
|
||||||
virtual bool extractTemplate(Template& templ) const =0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Go to the next pyramid level.
|
|
||||||
*
|
|
||||||
* \todo Allow pyramid scale factor other than 2
|
|
||||||
*/
|
|
||||||
virtual void pyrDown() =0;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
/// Candidate feature with a score
|
|
||||||
struct Candidate
|
|
||||||
{
|
|
||||||
Candidate(int x, int y, int label, float score);
|
|
||||||
|
|
||||||
/// Sort candidates with high score to the front
|
|
||||||
bool operator<(const Candidate& rhs) const
|
|
||||||
{
|
|
||||||
return score > rhs.score;
|
|
||||||
}
|
|
||||||
|
|
||||||
Feature f;
|
|
||||||
float score;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Choose candidate features so that they are not bunched together.
|
|
||||||
*
|
|
||||||
* \param[in] candidates Candidate features sorted by score.
|
|
||||||
* \param[out] features Destination vector of selected features.
|
|
||||||
* \param[in] num_features Number of candidates to select.
|
|
||||||
* \param[in] distance Hint for desired distance between features.
|
|
||||||
*/
|
|
||||||
static void selectScatteredFeatures(const std::vector<Candidate>& candidates,
|
|
||||||
std::vector<Feature>& features,
|
|
||||||
size_t num_features, float distance);
|
|
||||||
};
|
|
||||||
|
|
||||||
inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Interface for modalities that plug into the LINE template matching representation.
|
|
||||||
*
|
|
||||||
* \todo Max response, to allow optimization of summing (255/MAX) features as uint8
|
|
||||||
*/
|
|
||||||
class CV_EXPORTS Modality
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
// Virtual destructor
|
|
||||||
virtual ~Modality() {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Form a quantized image pyramid from a source image.
|
|
||||||
*
|
|
||||||
* \param[in] src The source image. Type depends on the modality.
|
|
||||||
* \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero
|
|
||||||
* in quantized image and cannot be extracted as features.
|
|
||||||
*/
|
|
||||||
Ptr<QuantizedPyramid> process(const Mat& src,
|
|
||||||
const Mat& mask = Mat()) const
|
|
||||||
{
|
|
||||||
return processImpl(src, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual String name() const =0;
|
|
||||||
|
|
||||||
virtual void read(const FileNode& fn) =0;
|
|
||||||
virtual void write(FileStorage& fs) const =0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Create modality by name.
|
|
||||||
*
|
|
||||||
* The following modality types are supported:
|
|
||||||
* - "ColorGradient"
|
|
||||||
* - "DepthNormal"
|
|
||||||
*/
|
|
||||||
static Ptr<Modality> create(const String& modality_type);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Load a modality from file.
|
|
||||||
*/
|
|
||||||
static Ptr<Modality> create(const FileNode& fn);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
// Indirection is because process() has a default parameter.
|
|
||||||
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
|
|
||||||
const Mat& mask) const =0;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Modality that computes quantized gradient orientations from a color image.
|
|
||||||
*/
|
|
||||||
class CV_EXPORTS ColorGradient : public Modality
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* \brief Default constructor. Uses reasonable default parameter values.
|
|
||||||
*/
|
|
||||||
ColorGradient();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Constructor.
|
|
||||||
*
|
|
||||||
* \param weak_threshold When quantizing, discard gradients with magnitude less than this.
|
|
||||||
* \param num_features How many features a template must contain.
|
|
||||||
* \param strong_threshold Consider as candidate features only gradients whose norms are
|
|
||||||
* larger than this.
|
|
||||||
*/
|
|
||||||
ColorGradient(float weak_threshold, size_t num_features, float strong_threshold);
|
|
||||||
|
|
||||||
virtual String name() const;
|
|
||||||
|
|
||||||
virtual void read(const FileNode& fn);
|
|
||||||
virtual void write(FileStorage& fs) const;
|
|
||||||
|
|
||||||
float weak_threshold;
|
|
||||||
size_t num_features;
|
|
||||||
float strong_threshold;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
|
|
||||||
const Mat& mask) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Modality that computes quantized surface normals from a dense depth map.
|
|
||||||
*/
|
|
||||||
class CV_EXPORTS DepthNormal : public Modality
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* \brief Default constructor. Uses reasonable default parameter values.
|
|
||||||
*/
|
|
||||||
DepthNormal();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Constructor.
|
|
||||||
*
|
|
||||||
* \param distance_threshold Ignore pixels beyond this distance.
|
|
||||||
* \param difference_threshold When computing normals, ignore contributions of pixels whose
|
|
||||||
* depth difference with the central pixel is above this threshold.
|
|
||||||
* \param num_features How many features a template must contain.
|
|
||||||
* \param extract_threshold Consider as candidate feature only if there are no differing
|
|
||||||
* orientations within a distance of extract_threshold.
|
|
||||||
*/
|
|
||||||
DepthNormal(int distance_threshold, int difference_threshold, size_t num_features,
|
|
||||||
int extract_threshold);
|
|
||||||
|
|
||||||
virtual String name() const;
|
|
||||||
|
|
||||||
virtual void read(const FileNode& fn);
|
|
||||||
virtual void write(FileStorage& fs) const;
|
|
||||||
|
|
||||||
int distance_threshold;
|
|
||||||
int difference_threshold;
|
|
||||||
size_t num_features;
|
|
||||||
int extract_threshold;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
|
|
||||||
const Mat& mask) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Debug function to colormap a quantized image for viewing.
|
|
||||||
*/
|
|
||||||
void colormap(const Mat& quantized, Mat& dst);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Represents a successful template match.
|
|
||||||
*/
|
|
||||||
struct CV_EXPORTS Match
|
|
||||||
{
|
|
||||||
Match()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
Match(int x, int y, float similarity, const String& class_id, int template_id);
|
|
||||||
|
|
||||||
/// Sort matches with high similarity to the front
|
|
||||||
bool operator<(const Match& rhs) const
|
|
||||||
{
|
|
||||||
// Secondarily sort on template_id for the sake of duplicate removal
|
|
||||||
if (similarity != rhs.similarity)
|
|
||||||
return similarity > rhs.similarity;
|
|
||||||
else
|
|
||||||
return template_id < rhs.template_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool operator==(const Match& rhs) const
|
|
||||||
{
|
|
||||||
return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
float similarity;
|
|
||||||
String class_id;
|
|
||||||
int template_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
inline Match::Match(int _x, int _y, float _similarity, const String& _class_id, int _template_id)
|
|
||||||
: x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Object detector using the LINE template matching algorithm with any set of
|
|
||||||
* modalities.
|
|
||||||
*/
|
|
||||||
class CV_EXPORTS Detector
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* \brief Empty constructor, initialize with read().
|
|
||||||
*/
|
|
||||||
Detector();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Constructor.
|
|
||||||
*
|
|
||||||
* \param modalities Modalities to use (color gradients, depth normals, ...).
|
|
||||||
* \param T_pyramid Value of the sampling step T at each pyramid level. The
|
|
||||||
* number of pyramid levels is T_pyramid.size().
|
|
||||||
*/
|
|
||||||
Detector(const std::vector< Ptr<Modality> >& modalities, const std::vector<int>& T_pyramid);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Detect objects by template matching.
|
|
||||||
*
|
|
||||||
* Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.
|
|
||||||
*
|
|
||||||
* \param sources Source images, one for each modality.
|
|
||||||
* \param threshold Similarity threshold, a percentage between 0 and 100.
|
|
||||||
* \param[out] matches Template matches, sorted by similarity score.
|
|
||||||
* \param class_ids If non-empty, only search for the desired object classes.
|
|
||||||
* \param[out] quantized_images Optionally return vector<Mat> of quantized images.
|
|
||||||
* \param masks The masks for consideration during matching. The masks should be CV_8UC1
|
|
||||||
* where 255 represents a valid pixel. If non-empty, the vector must be
|
|
||||||
* the same size as sources. Each element must be
|
|
||||||
* empty or the same size as its corresponding source.
|
|
||||||
*/
|
|
||||||
void match(const std::vector<Mat>& sources, float threshold, std::vector<Match>& matches,
|
|
||||||
const std::vector<String>& class_ids = std::vector<String>(),
|
|
||||||
OutputArrayOfArrays quantized_images = noArray(),
|
|
||||||
const std::vector<Mat>& masks = std::vector<Mat>()) const;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Add new object template.
|
|
||||||
*
|
|
||||||
* \param sources Source images, one for each modality.
|
|
||||||
* \param class_id Object class ID.
|
|
||||||
* \param object_mask Mask separating object from background.
|
|
||||||
* \param[out] bounding_box Optionally return bounding box of the extracted features.
|
|
||||||
*
|
|
||||||
* \return Template ID, or -1 if failed to extract a valid template.
|
|
||||||
*/
|
|
||||||
int addTemplate(const std::vector<Mat>& sources, const String& class_id,
|
|
||||||
const Mat& object_mask, Rect* bounding_box = NULL);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Add a new object template computed by external means.
|
|
||||||
*/
|
|
||||||
int addSyntheticTemplate(const std::vector<Template>& templates, const String& class_id);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Get the modalities used by this detector.
|
|
||||||
*
|
|
||||||
* You are not permitted to add/remove modalities, but you may dynamic_cast them to
|
|
||||||
* tweak parameters.
|
|
||||||
*/
|
|
||||||
const std::vector< Ptr<Modality> >& getModalities() const { return modalities; }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Get sampling step T at pyramid_level.
|
|
||||||
*/
|
|
||||||
int getT(int pyramid_level) const { return T_at_level[pyramid_level]; }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Get number of pyramid levels used by this detector.
|
|
||||||
*/
|
|
||||||
int pyramidLevels() const { return pyramid_levels; }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Get the template pyramid identified by template_id.
|
|
||||||
*
|
|
||||||
* For example, with 2 modalities (Gradient, Normal) and two pyramid levels
|
|
||||||
* (L0, L1), the order is (GradientL0, NormalL0, GradientL1, NormalL1).
|
|
||||||
*/
|
|
||||||
const std::vector<Template>& getTemplates(const String& class_id, int template_id) const;
|
|
||||||
|
|
||||||
int numTemplates() const;
|
|
||||||
int numTemplates(const String& class_id) const;
|
|
||||||
int numClasses() const { return static_cast<int>(class_templates.size()); }
|
|
||||||
|
|
||||||
std::vector<String> classIds() const;
|
|
||||||
|
|
||||||
void read(const FileNode& fn);
|
|
||||||
void write(FileStorage& fs) const;
|
|
||||||
|
|
||||||
String readClass(const FileNode& fn, const String &class_id_override = "");
|
|
||||||
void writeClass(const String& class_id, FileStorage& fs) const;
|
|
||||||
|
|
||||||
void readClasses(const std::vector<String>& class_ids,
|
|
||||||
const String& format = "templates_%s.yml.gz");
|
|
||||||
void writeClasses(const String& format = "templates_%s.yml.gz") const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
std::vector< Ptr<Modality> > modalities;
|
|
||||||
int pyramid_levels;
|
|
||||||
std::vector<int> T_at_level;
|
|
||||||
|
|
||||||
typedef std::vector<Template> TemplatePyramid;
|
|
||||||
typedef std::map<String, std::vector<TemplatePyramid> > TemplatesMap;
|
|
||||||
TemplatesMap class_templates;
|
|
||||||
|
|
||||||
typedef std::vector<Mat> LinearMemories;
|
|
||||||
// Indexed as [pyramid level][modality][quantized label]
|
|
||||||
typedef std::vector< std::vector<LinearMemories> > LinearMemoryPyramid;
|
|
||||||
|
|
||||||
void matchClass(const LinearMemoryPyramid& lm_pyramid,
|
|
||||||
const std::vector<Size>& sizes,
|
|
||||||
float threshold, std::vector<Match>& matches,
|
|
||||||
const String& class_id,
|
|
||||||
const std::vector<TemplatePyramid>& template_pyramids) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Factory function for detector using LINE algorithm with color gradients.
|
|
||||||
*
|
|
||||||
* Default parameter settings suitable for VGA images.
|
|
||||||
*/
|
|
||||||
CV_EXPORTS Ptr<Detector> getDefaultLINE();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Factory function for detector using LINE-MOD algorithm with color gradients
|
|
||||||
* and depth normals.
|
|
||||||
*
|
|
||||||
* Default parameter settings suitable for VGA images.
|
|
||||||
*/
|
|
||||||
CV_EXPORTS Ptr<Detector> getDefaultLINEMOD();
|
|
||||||
|
|
||||||
} // namespace linemod
|
|
||||||
} // namespace cv
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
455
modules/objdetect/include/opencv2/objdetect/linemod.hpp
Normal file
455
modules/objdetect/include/opencv2/objdetect/linemod.hpp
Normal file
@ -0,0 +1,455 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_OBJDETECT_LINEMOD_HPP__
|
||||||
|
#define __OPENCV_OBJDETECT_LINEMOD_HPP__
|
||||||
|
|
||||||
|
#include "opencv2/core.hpp"
|
||||||
|
#include <map>
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* LINE-MOD *
|
||||||
|
\****************************************************************************************/
|
||||||
|
|
||||||
|
namespace cv {
|
||||||
|
namespace linemod {
|
||||||
|
|
||||||
|
/// @todo Convert doxy comments to rst
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Discriminant feature described by its location and label.
|
||||||
|
*/
|
||||||
|
struct CV_EXPORTS Feature
|
||||||
|
{
|
||||||
|
int x; ///< x offset
|
||||||
|
int y; ///< y offset
|
||||||
|
int label; ///< Quantization
|
||||||
|
|
||||||
|
Feature() : x(0), y(0), label(0) {}
|
||||||
|
Feature(int x, int y, int label);
|
||||||
|
|
||||||
|
void read(const FileNode& fn);
|
||||||
|
void write(FileStorage& fs) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {}
|
||||||
|
|
||||||
|
struct CV_EXPORTS Template
|
||||||
|
{
|
||||||
|
int width;
|
||||||
|
int height;
|
||||||
|
int pyramid_level;
|
||||||
|
std::vector<Feature> features;
|
||||||
|
|
||||||
|
void read(const FileNode& fn);
|
||||||
|
void write(FileStorage& fs) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Represents a modality operating over an image pyramid.
|
||||||
|
*/
|
||||||
|
class QuantizedPyramid
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
// Virtual destructor
|
||||||
|
virtual ~QuantizedPyramid() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Compute quantized image at current pyramid level for online detection.
|
||||||
|
*
|
||||||
|
* \param[out] dst The destination 8-bit image. For each pixel at most one bit is set,
|
||||||
|
* representing its classification.
|
||||||
|
*/
|
||||||
|
virtual void quantize(Mat& dst) const =0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Extract most discriminant features at current pyramid level to form a new template.
|
||||||
|
*
|
||||||
|
* \param[out] templ The new template.
|
||||||
|
*/
|
||||||
|
virtual bool extractTemplate(Template& templ) const =0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Go to the next pyramid level.
|
||||||
|
*
|
||||||
|
* \todo Allow pyramid scale factor other than 2
|
||||||
|
*/
|
||||||
|
virtual void pyrDown() =0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/// Candidate feature with a score
|
||||||
|
struct Candidate
|
||||||
|
{
|
||||||
|
Candidate(int x, int y, int label, float score);
|
||||||
|
|
||||||
|
/// Sort candidates with high score to the front
|
||||||
|
bool operator<(const Candidate& rhs) const
|
||||||
|
{
|
||||||
|
return score > rhs.score;
|
||||||
|
}
|
||||||
|
|
||||||
|
Feature f;
|
||||||
|
float score;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Choose candidate features so that they are not bunched together.
|
||||||
|
*
|
||||||
|
* \param[in] candidates Candidate features sorted by score.
|
||||||
|
* \param[out] features Destination vector of selected features.
|
||||||
|
* \param[in] num_features Number of candidates to select.
|
||||||
|
* \param[in] distance Hint for desired distance between features.
|
||||||
|
*/
|
||||||
|
static void selectScatteredFeatures(const std::vector<Candidate>& candidates,
|
||||||
|
std::vector<Feature>& features,
|
||||||
|
size_t num_features, float distance);
|
||||||
|
};
|
||||||
|
|
||||||
|
inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Interface for modalities that plug into the LINE template matching representation.
|
||||||
|
*
|
||||||
|
* \todo Max response, to allow optimization of summing (255/MAX) features as uint8
|
||||||
|
*/
|
||||||
|
class CV_EXPORTS Modality
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
// Virtual destructor
|
||||||
|
virtual ~Modality() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Form a quantized image pyramid from a source image.
|
||||||
|
*
|
||||||
|
* \param[in] src The source image. Type depends on the modality.
|
||||||
|
* \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero
|
||||||
|
* in quantized image and cannot be extracted as features.
|
||||||
|
*/
|
||||||
|
Ptr<QuantizedPyramid> process(const Mat& src,
|
||||||
|
const Mat& mask = Mat()) const
|
||||||
|
{
|
||||||
|
return processImpl(src, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual String name() const =0;
|
||||||
|
|
||||||
|
virtual void read(const FileNode& fn) =0;
|
||||||
|
virtual void write(FileStorage& fs) const =0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Create modality by name.
|
||||||
|
*
|
||||||
|
* The following modality types are supported:
|
||||||
|
* - "ColorGradient"
|
||||||
|
* - "DepthNormal"
|
||||||
|
*/
|
||||||
|
static Ptr<Modality> create(const String& modality_type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Load a modality from file.
|
||||||
|
*/
|
||||||
|
static Ptr<Modality> create(const FileNode& fn);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Indirection is because process() has a default parameter.
|
||||||
|
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
|
||||||
|
const Mat& mask) const =0;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Modality that computes quantized gradient orientations from a color image.
|
||||||
|
*/
|
||||||
|
class CV_EXPORTS ColorGradient : public Modality
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* \brief Default constructor. Uses reasonable default parameter values.
|
||||||
|
*/
|
||||||
|
ColorGradient();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Constructor.
|
||||||
|
*
|
||||||
|
* \param weak_threshold When quantizing, discard gradients with magnitude less than this.
|
||||||
|
* \param num_features How many features a template must contain.
|
||||||
|
* \param strong_threshold Consider as candidate features only gradients whose norms are
|
||||||
|
* larger than this.
|
||||||
|
*/
|
||||||
|
ColorGradient(float weak_threshold, size_t num_features, float strong_threshold);
|
||||||
|
|
||||||
|
virtual String name() const;
|
||||||
|
|
||||||
|
virtual void read(const FileNode& fn);
|
||||||
|
virtual void write(FileStorage& fs) const;
|
||||||
|
|
||||||
|
float weak_threshold;
|
||||||
|
size_t num_features;
|
||||||
|
float strong_threshold;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
|
||||||
|
const Mat& mask) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Modality that computes quantized surface normals from a dense depth map.
|
||||||
|
*/
|
||||||
|
class CV_EXPORTS DepthNormal : public Modality
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* \brief Default constructor. Uses reasonable default parameter values.
|
||||||
|
*/
|
||||||
|
DepthNormal();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Constructor.
|
||||||
|
*
|
||||||
|
* \param distance_threshold Ignore pixels beyond this distance.
|
||||||
|
* \param difference_threshold When computing normals, ignore contributions of pixels whose
|
||||||
|
* depth difference with the central pixel is above this threshold.
|
||||||
|
* \param num_features How many features a template must contain.
|
||||||
|
* \param extract_threshold Consider as candidate feature only if there are no differing
|
||||||
|
* orientations within a distance of extract_threshold.
|
||||||
|
*/
|
||||||
|
DepthNormal(int distance_threshold, int difference_threshold, size_t num_features,
|
||||||
|
int extract_threshold);
|
||||||
|
|
||||||
|
virtual String name() const;
|
||||||
|
|
||||||
|
virtual void read(const FileNode& fn);
|
||||||
|
virtual void write(FileStorage& fs) const;
|
||||||
|
|
||||||
|
int distance_threshold;
|
||||||
|
int difference_threshold;
|
||||||
|
size_t num_features;
|
||||||
|
int extract_threshold;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
|
||||||
|
const Mat& mask) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Debug function to colormap a quantized image for viewing.
|
||||||
|
*/
|
||||||
|
void colormap(const Mat& quantized, Mat& dst);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Represents a successful template match.
|
||||||
|
*/
|
||||||
|
struct CV_EXPORTS Match
|
||||||
|
{
|
||||||
|
Match()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
Match(int x, int y, float similarity, const String& class_id, int template_id);
|
||||||
|
|
||||||
|
/// Sort matches with high similarity to the front
|
||||||
|
bool operator<(const Match& rhs) const
|
||||||
|
{
|
||||||
|
// Secondarily sort on template_id for the sake of duplicate removal
|
||||||
|
if (similarity != rhs.similarity)
|
||||||
|
return similarity > rhs.similarity;
|
||||||
|
else
|
||||||
|
return template_id < rhs.template_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator==(const Match& rhs) const
|
||||||
|
{
|
||||||
|
return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
int x;
|
||||||
|
int y;
|
||||||
|
float similarity;
|
||||||
|
String class_id;
|
||||||
|
int template_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
inline
|
||||||
|
Match::Match(int _x, int _y, float _similarity, const String& _class_id, int _template_id)
|
||||||
|
: x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id)
|
||||||
|
{}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Object detector using the LINE template matching algorithm with any set of
|
||||||
|
* modalities.
|
||||||
|
*/
|
||||||
|
class CV_EXPORTS Detector
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* \brief Empty constructor, initialize with read().
|
||||||
|
*/
|
||||||
|
Detector();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Constructor.
|
||||||
|
*
|
||||||
|
* \param modalities Modalities to use (color gradients, depth normals, ...).
|
||||||
|
* \param T_pyramid Value of the sampling step T at each pyramid level. The
|
||||||
|
* number of pyramid levels is T_pyramid.size().
|
||||||
|
*/
|
||||||
|
Detector(const std::vector< Ptr<Modality> >& modalities, const std::vector<int>& T_pyramid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Detect objects by template matching.
|
||||||
|
*
|
||||||
|
* Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.
|
||||||
|
*
|
||||||
|
* \param sources Source images, one for each modality.
|
||||||
|
* \param threshold Similarity threshold, a percentage between 0 and 100.
|
||||||
|
* \param[out] matches Template matches, sorted by similarity score.
|
||||||
|
* \param class_ids If non-empty, only search for the desired object classes.
|
||||||
|
* \param[out] quantized_images Optionally return vector<Mat> of quantized images.
|
||||||
|
* \param masks The masks for consideration during matching. The masks should be CV_8UC1
|
||||||
|
* where 255 represents a valid pixel. If non-empty, the vector must be
|
||||||
|
* the same size as sources. Each element must be
|
||||||
|
* empty or the same size as its corresponding source.
|
||||||
|
*/
|
||||||
|
void match(const std::vector<Mat>& sources, float threshold, std::vector<Match>& matches,
|
||||||
|
const std::vector<String>& class_ids = std::vector<String>(),
|
||||||
|
OutputArrayOfArrays quantized_images = noArray(),
|
||||||
|
const std::vector<Mat>& masks = std::vector<Mat>()) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Add new object template.
|
||||||
|
*
|
||||||
|
* \param sources Source images, one for each modality.
|
||||||
|
* \param class_id Object class ID.
|
||||||
|
* \param object_mask Mask separating object from background.
|
||||||
|
* \param[out] bounding_box Optionally return bounding box of the extracted features.
|
||||||
|
*
|
||||||
|
* \return Template ID, or -1 if failed to extract a valid template.
|
||||||
|
*/
|
||||||
|
int addTemplate(const std::vector<Mat>& sources, const String& class_id,
|
||||||
|
const Mat& object_mask, Rect* bounding_box = NULL);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Add a new object template computed by external means.
|
||||||
|
*/
|
||||||
|
int addSyntheticTemplate(const std::vector<Template>& templates, const String& class_id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the modalities used by this detector.
|
||||||
|
*
|
||||||
|
* You are not permitted to add/remove modalities, but you may dynamic_cast them to
|
||||||
|
* tweak parameters.
|
||||||
|
*/
|
||||||
|
const std::vector< Ptr<Modality> >& getModalities() const { return modalities; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get sampling step T at pyramid_level.
|
||||||
|
*/
|
||||||
|
int getT(int pyramid_level) const { return T_at_level[pyramid_level]; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get number of pyramid levels used by this detector.
|
||||||
|
*/
|
||||||
|
int pyramidLevels() const { return pyramid_levels; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the template pyramid identified by template_id.
|
||||||
|
*
|
||||||
|
* For example, with 2 modalities (Gradient, Normal) and two pyramid levels
|
||||||
|
* (L0, L1), the order is (GradientL0, NormalL0, GradientL1, NormalL1).
|
||||||
|
*/
|
||||||
|
const std::vector<Template>& getTemplates(const String& class_id, int template_id) const;
|
||||||
|
|
||||||
|
int numTemplates() const;
|
||||||
|
int numTemplates(const String& class_id) const;
|
||||||
|
int numClasses() const { return static_cast<int>(class_templates.size()); }
|
||||||
|
|
||||||
|
std::vector<String> classIds() const;
|
||||||
|
|
||||||
|
void read(const FileNode& fn);
|
||||||
|
void write(FileStorage& fs) const;
|
||||||
|
|
||||||
|
String readClass(const FileNode& fn, const String &class_id_override = "");
|
||||||
|
void writeClass(const String& class_id, FileStorage& fs) const;
|
||||||
|
|
||||||
|
void readClasses(const std::vector<String>& class_ids,
|
||||||
|
const String& format = "templates_%s.yml.gz");
|
||||||
|
void writeClasses(const String& format = "templates_%s.yml.gz") const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
std::vector< Ptr<Modality> > modalities;
|
||||||
|
int pyramid_levels;
|
||||||
|
std::vector<int> T_at_level;
|
||||||
|
|
||||||
|
typedef std::vector<Template> TemplatePyramid;
|
||||||
|
typedef std::map<String, std::vector<TemplatePyramid> > TemplatesMap;
|
||||||
|
TemplatesMap class_templates;
|
||||||
|
|
||||||
|
typedef std::vector<Mat> LinearMemories;
|
||||||
|
// Indexed as [pyramid level][modality][quantized label]
|
||||||
|
typedef std::vector< std::vector<LinearMemories> > LinearMemoryPyramid;
|
||||||
|
|
||||||
|
void matchClass(const LinearMemoryPyramid& lm_pyramid,
|
||||||
|
const std::vector<Size>& sizes,
|
||||||
|
float threshold, std::vector<Match>& matches,
|
||||||
|
const String& class_id,
|
||||||
|
const std::vector<TemplatePyramid>& template_pyramids) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Factory function for detector using LINE algorithm with color gradients.
|
||||||
|
*
|
||||||
|
* Default parameter settings suitable for VGA images.
|
||||||
|
*/
|
||||||
|
CV_EXPORTS Ptr<Detector> getDefaultLINE();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Factory function for detector using LINE-MOD algorithm with color gradients
|
||||||
|
* and depth normals.
|
||||||
|
*
|
||||||
|
* Default parameter settings suitable for VGA images.
|
||||||
|
*/
|
||||||
|
CV_EXPORTS Ptr<Detector> getDefaultLINEMOD();
|
||||||
|
|
||||||
|
} // namespace linemod
|
||||||
|
} // namespace cv
|
||||||
|
|
||||||
|
#endif // __OPENCV_OBJDETECT_LINEMOD_HPP__
|
289
modules/objdetect/include/opencv2/objdetect/objdetect_c.h
Normal file
289
modules/objdetect/include/opencv2/objdetect/objdetect_c.h
Normal file
@ -0,0 +1,289 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_OBJDETECT_C_H__
|
||||||
|
#define __OPENCV_OBJDETECT_C_H__
|
||||||
|
|
||||||
|
#include "opencv2/core/core_c.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
#include <deque>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* Haar-like Object Detection functions *
|
||||||
|
\****************************************************************************************/
|
||||||
|
|
||||||
|
#define CV_HAAR_MAGIC_VAL 0x42500000
|
||||||
|
#define CV_TYPE_NAME_HAAR "opencv-haar-classifier"
|
||||||
|
|
||||||
|
#define CV_IS_HAAR_CLASSIFIER( haar ) \
|
||||||
|
((haar) != NULL && \
|
||||||
|
(((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL)
|
||||||
|
|
||||||
|
#define CV_HAAR_FEATURE_MAX 3
|
||||||
|
|
||||||
|
typedef struct CvHaarFeature
|
||||||
|
{
|
||||||
|
int tilted;
|
||||||
|
struct
|
||||||
|
{
|
||||||
|
CvRect r;
|
||||||
|
float weight;
|
||||||
|
} rect[CV_HAAR_FEATURE_MAX];
|
||||||
|
} CvHaarFeature;
|
||||||
|
|
||||||
|
typedef struct CvHaarClassifier
|
||||||
|
{
|
||||||
|
int count;
|
||||||
|
CvHaarFeature* haar_feature;
|
||||||
|
float* threshold;
|
||||||
|
int* left;
|
||||||
|
int* right;
|
||||||
|
float* alpha;
|
||||||
|
} CvHaarClassifier;
|
||||||
|
|
||||||
|
typedef struct CvHaarStageClassifier
|
||||||
|
{
|
||||||
|
int count;
|
||||||
|
float threshold;
|
||||||
|
CvHaarClassifier* classifier;
|
||||||
|
|
||||||
|
int next;
|
||||||
|
int child;
|
||||||
|
int parent;
|
||||||
|
} CvHaarStageClassifier;
|
||||||
|
|
||||||
|
typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade;
|
||||||
|
|
||||||
|
typedef struct CvHaarClassifierCascade
|
||||||
|
{
|
||||||
|
int flags;
|
||||||
|
int count;
|
||||||
|
CvSize orig_window_size;
|
||||||
|
CvSize real_window_size;
|
||||||
|
double scale;
|
||||||
|
CvHaarStageClassifier* stage_classifier;
|
||||||
|
CvHidHaarClassifierCascade* hid_cascade;
|
||||||
|
} CvHaarClassifierCascade;
|
||||||
|
|
||||||
|
typedef struct CvAvgComp
|
||||||
|
{
|
||||||
|
CvRect rect;
|
||||||
|
int neighbors;
|
||||||
|
} CvAvgComp;
|
||||||
|
|
||||||
|
/* Loads haar classifier cascade from a directory.
|
||||||
|
It is obsolete: convert your cascade to xml and use cvLoad instead */
|
||||||
|
CVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade(
|
||||||
|
const char* directory, CvSize orig_window_size);
|
||||||
|
|
||||||
|
CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade );
|
||||||
|
|
||||||
|
#define CV_HAAR_DO_CANNY_PRUNING 1
|
||||||
|
#define CV_HAAR_SCALE_IMAGE 2
|
||||||
|
#define CV_HAAR_FIND_BIGGEST_OBJECT 4
|
||||||
|
#define CV_HAAR_DO_ROUGH_SEARCH 8
|
||||||
|
|
||||||
|
CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image,
|
||||||
|
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
|
||||||
|
double scale_factor CV_DEFAULT(1.1),
|
||||||
|
int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
|
||||||
|
CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)));
|
||||||
|
|
||||||
|
/* sets images for haar classifier cascade */
|
||||||
|
CVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade,
|
||||||
|
const CvArr* sum, const CvArr* sqsum,
|
||||||
|
const CvArr* tilted_sum, double scale );
|
||||||
|
|
||||||
|
/* runs the cascade on the specified window */
|
||||||
|
CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade,
|
||||||
|
CvPoint pt, int start_stage CV_DEFAULT(0));
|
||||||
|
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* Latent SVM Object Detection functions *
|
||||||
|
\****************************************************************************************/
|
||||||
|
|
||||||
|
// DataType: STRUCT position
|
||||||
|
// Structure describes the position of the filter in the feature pyramid
|
||||||
|
// l - level in the feature pyramid
|
||||||
|
// (x, y) - coordinate in level l
|
||||||
|
typedef struct CvLSVMFilterPosition
|
||||||
|
{
|
||||||
|
int x;
|
||||||
|
int y;
|
||||||
|
int l;
|
||||||
|
} CvLSVMFilterPosition;
|
||||||
|
|
||||||
|
// DataType: STRUCT filterObject
|
||||||
|
// Description of the filter, which corresponds to the part of the object
|
||||||
|
// V - ideal (penalty = 0) position of the partial filter
|
||||||
|
// from the root filter position (V_i in the paper)
|
||||||
|
// penaltyFunction - vector describes penalty function (d_i in the paper)
|
||||||
|
// pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2
|
||||||
|
// FILTER DESCRIPTION
|
||||||
|
// Rectangular map (sizeX x sizeY),
|
||||||
|
// every cell stores feature vector (dimension = p)
|
||||||
|
// H - matrix of feature vectors
|
||||||
|
// to set and get feature vectors (i,j)
|
||||||
|
// used formula H[(j * sizeX + i) * p + k], where
|
||||||
|
// k - component of feature vector in cell (i, j)
|
||||||
|
// END OF FILTER DESCRIPTION
|
||||||
|
typedef struct CvLSVMFilterObject{
|
||||||
|
CvLSVMFilterPosition V;
|
||||||
|
float fineFunction[4];
|
||||||
|
int sizeX;
|
||||||
|
int sizeY;
|
||||||
|
int numFeatures;
|
||||||
|
float *H;
|
||||||
|
} CvLSVMFilterObject;
|
||||||
|
|
||||||
|
// data type: STRUCT CvLatentSvmDetector
|
||||||
|
// structure contains internal representation of trained Latent SVM detector
|
||||||
|
// num_filters - total number of filters (root plus part) in model
|
||||||
|
// num_components - number of components in model
|
||||||
|
// num_part_filters - array containing number of part filters for each component
|
||||||
|
// filters - root and part filters for all model components
|
||||||
|
// b - biases for all model components
|
||||||
|
// score_threshold - confidence level threshold
|
||||||
|
typedef struct CvLatentSvmDetector
|
||||||
|
{
|
||||||
|
int num_filters;
|
||||||
|
int num_components;
|
||||||
|
int* num_part_filters;
|
||||||
|
CvLSVMFilterObject** filters;
|
||||||
|
float* b;
|
||||||
|
float score_threshold;
|
||||||
|
} CvLatentSvmDetector;
|
||||||
|
|
||||||
|
// data type: STRUCT CvObjectDetection
|
||||||
|
// structure contains the bounding box and confidence level for detected object
|
||||||
|
// rect - bounding box for a detected object
|
||||||
|
// score - confidence level
|
||||||
|
typedef struct CvObjectDetection
|
||||||
|
{
|
||||||
|
CvRect rect;
|
||||||
|
float score;
|
||||||
|
} CvObjectDetection;
|
||||||
|
|
||||||
|
//////////////// Object Detection using Latent SVM //////////////
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
// load trained detector from a file
|
||||||
|
//
|
||||||
|
// API
|
||||||
|
// CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename);
|
||||||
|
// INPUT
|
||||||
|
// filename - path to the file containing the parameters of
|
||||||
|
- trained Latent SVM detector
|
||||||
|
// OUTPUT
|
||||||
|
// trained Latent SVM detector in internal representation
|
||||||
|
*/
|
||||||
|
CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename);
|
||||||
|
|
||||||
|
/*
|
||||||
|
// release memory allocated for CvLatentSvmDetector structure
|
||||||
|
//
|
||||||
|
// API
|
||||||
|
// void cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector);
|
||||||
|
// INPUT
|
||||||
|
// detector - CvLatentSvmDetector structure to be released
|
||||||
|
// OUTPUT
|
||||||
|
*/
|
||||||
|
CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector);
|
||||||
|
|
||||||
|
/*
|
||||||
|
// find rectangular regions in the given image that are likely
|
||||||
|
// to contain objects and corresponding confidence levels
|
||||||
|
//
|
||||||
|
// API
|
||||||
|
// CvSeq* cvLatentSvmDetectObjects(const IplImage* image,
|
||||||
|
// CvLatentSvmDetector* detector,
|
||||||
|
// CvMemStorage* storage,
|
||||||
|
// float overlap_threshold = 0.5f,
|
||||||
|
// int numThreads = -1);
|
||||||
|
// INPUT
|
||||||
|
// image - image to detect objects in
|
||||||
|
// detector - Latent SVM detector in internal representation
|
||||||
|
// storage - memory storage to store the resultant sequence
|
||||||
|
// of the object candidate rectangles
|
||||||
|
// overlap_threshold - threshold for the non-maximum suppression algorithm
|
||||||
|
= 0.5f [here will be the reference to original paper]
|
||||||
|
// OUTPUT
|
||||||
|
// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures)
|
||||||
|
*/
|
||||||
|
CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image,
|
||||||
|
CvLatentSvmDetector* detector,
|
||||||
|
CvMemStorage* storage,
|
||||||
|
float overlap_threshold CV_DEFAULT(0.5f),
|
||||||
|
int numThreads CV_DEFAULT(-1));
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
|
||||||
|
CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image,
|
||||||
|
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
|
||||||
|
std::vector<int>& rejectLevels, std::vector<double>& levelWeightds,
|
||||||
|
double scale_factor = 1.1,
|
||||||
|
int min_neighbors = 3, int flags = 0,
|
||||||
|
CvSize min_size = cvSize(0, 0), CvSize max_size = cvSize(0, 0),
|
||||||
|
bool outputRejectLevels = false );
|
||||||
|
|
||||||
|
struct CvDataMatrixCode
|
||||||
|
{
|
||||||
|
char msg[4];
|
||||||
|
CvMat* original;
|
||||||
|
CvMat* corners;
|
||||||
|
};
|
||||||
|
|
||||||
|
CV_EXPORTS std::deque<CvDataMatrixCode> cvFindDataMatrix(CvMat *im);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* __OPENCV_OBJDETECT_C_H__ */
|
@ -1,6 +1,8 @@
|
|||||||
#ifndef _LSVM_ROUTINE_H_
|
#ifndef _LSVM_ROUTINE_H_
|
||||||
#define _LSVM_ROUTINE_H_
|
#define _LSVM_ROUTINE_H_
|
||||||
|
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
|
|
||||||
#include "_lsvm_types.h"
|
#include "_lsvm_types.h"
|
||||||
#include "_lsvm_error.h"
|
#include "_lsvm_error.h"
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#ifndef LSVM_PARSER
|
#ifndef LSVM_PARSER
|
||||||
#define LSVM_PARSER
|
#define LSVM_PARSER
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
|
|
||||||
#include "_lsvm_types.h"
|
#include "_lsvm_types.h"
|
||||||
|
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
|
||||||
#include "cascadedetect.hpp"
|
#include "cascadedetect.hpp"
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
|
|
||||||
#if defined (LOG_CASCADE_STATISTIC)
|
#if defined (LOG_CASCADE_STATISTIC)
|
||||||
struct Logger
|
struct Logger
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
|
|
||||||
#include <deque>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
class Sampler {
|
class Sampler {
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#if CV_SSE2
|
#if CV_SSE2
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/core/core_c.h"
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
@ -2862,7 +2863,7 @@ void HOGDescriptor::readALTModel(String modelfile)
|
|||||||
String eerr("file not exist");
|
String eerr("file not exist");
|
||||||
String efile(__FILE__);
|
String efile(__FILE__);
|
||||||
String efunc(__FUNCTION__);
|
String efunc(__FUNCTION__);
|
||||||
throw Exception(CV_StsError, eerr, efile, efunc, __LINE__);
|
throw Exception(Error::StsError, eerr, efile, efunc, __LINE__);
|
||||||
}
|
}
|
||||||
char version_buffer[10];
|
char version_buffer[10];
|
||||||
if (!fread (&version_buffer,sizeof(char),10,modelfl))
|
if (!fread (&version_buffer,sizeof(char),10,modelfl))
|
||||||
@ -2870,13 +2871,13 @@ void HOGDescriptor::readALTModel(String modelfile)
|
|||||||
String eerr("version?");
|
String eerr("version?");
|
||||||
String efile(__FILE__);
|
String efile(__FILE__);
|
||||||
String efunc(__FUNCTION__);
|
String efunc(__FUNCTION__);
|
||||||
throw Exception(CV_StsError, eerr, efile, efunc, __LINE__);
|
throw Exception(Error::StsError, eerr, efile, efunc, __LINE__);
|
||||||
}
|
}
|
||||||
if(strcmp(version_buffer,"V6.01")) {
|
if(strcmp(version_buffer,"V6.01")) {
|
||||||
String eerr("version doesnot match");
|
String eerr("version doesnot match");
|
||||||
String efile(__FILE__);
|
String efile(__FILE__);
|
||||||
String efunc(__FUNCTION__);
|
String efunc(__FUNCTION__);
|
||||||
throw Exception(CV_StsError, eerr, efile, efunc, __LINE__);
|
throw Exception(Error::StsError, eerr, efile, efunc, __LINE__);
|
||||||
}
|
}
|
||||||
/* read version number */
|
/* read version number */
|
||||||
int version = 0;
|
int version = 0;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
#include "opencv2/imgproc/imgproc_c.h"
|
#include "opencv2/imgproc/imgproc_c.h"
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
#include "_lsvmparser.h"
|
#include "_lsvmparser.h"
|
||||||
#include "_lsvm_matching.h"
|
#include "_lsvm_matching.h"
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ static inline int getLabel(int quantized)
|
|||||||
case 64: return 6;
|
case 64: return 6;
|
||||||
case 128: return 7;
|
case 128: return 7;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsBadArg, "Invalid value of quantized parameter");
|
CV_Error(Error::StsBadArg, "Invalid value of quantized parameter");
|
||||||
return -1; //avoid warning
|
return -1; //avoid warning
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1398,17 +1398,17 @@ void Detector::match(const std::vector<Mat>& sources, float threshold, std::vect
|
|||||||
if (quantized_images.needed())
|
if (quantized_images.needed())
|
||||||
quantized_images.create(1, static_cast<int>(pyramid_levels * modalities.size()), CV_8U);
|
quantized_images.create(1, static_cast<int>(pyramid_levels * modalities.size()), CV_8U);
|
||||||
|
|
||||||
assert(sources.size() == modalities.size());
|
CV_Assert(sources.size() == modalities.size());
|
||||||
// Initialize each modality with our sources
|
// Initialize each modality with our sources
|
||||||
std::vector< Ptr<QuantizedPyramid> > quantizers;
|
std::vector< Ptr<QuantizedPyramid> > quantizers;
|
||||||
for (int i = 0; i < (int)modalities.size(); ++i){
|
for (int i = 0; i < (int)modalities.size(); ++i){
|
||||||
Mat mask, source;
|
Mat mask, source;
|
||||||
source = sources[i];
|
source = sources[i];
|
||||||
if(!masks.empty()){
|
if(!masks.empty()){
|
||||||
assert(masks.size() == modalities.size());
|
CV_Assert(masks.size() == modalities.size());
|
||||||
mask = masks[i];
|
mask = masks[i];
|
||||||
}
|
}
|
||||||
assert(mask.empty() || mask.size() == source.size());
|
CV_Assert(mask.empty() || mask.size() == source.size());
|
||||||
quantizers.push_back(modalities[i]->process(source, mask));
|
quantizers.push_back(modalities[i]->process(source, mask));
|
||||||
}
|
}
|
||||||
// pyramid level -> modality -> quantization
|
// pyramid level -> modality -> quantization
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
#include "_lsvm_matching.h"
|
#include "_lsvm_matching.h"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
#include "opencv2/imgproc.hpp"
|
#include "opencv2/imgproc.hpp"
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
@ -117,7 +118,7 @@ int CV_DetectorTest::prepareData( FileStorage& _fs )
|
|||||||
// fn[TOTAL_NO_PAIR_E] >> eps.totalNoPair;
|
// fn[TOTAL_NO_PAIR_E] >> eps.totalNoPair;
|
||||||
|
|
||||||
// read detectors
|
// read detectors
|
||||||
if( fn[DETECTOR_NAMES].node->data.seq != 0 )
|
if( fn[DETECTOR_NAMES].size() != 0 )
|
||||||
{
|
{
|
||||||
FileNodeIterator it = fn[DETECTOR_NAMES].begin();
|
FileNodeIterator it = fn[DETECTOR_NAMES].begin();
|
||||||
for( ; it != fn[DETECTOR_NAMES].end(); )
|
for( ; it != fn[DETECTOR_NAMES].end(); )
|
||||||
@ -132,7 +133,7 @@ int CV_DetectorTest::prepareData( FileStorage& _fs )
|
|||||||
|
|
||||||
// read images filenames and images
|
// read images filenames and images
|
||||||
string dataPath = ts->get_data_path();
|
string dataPath = ts->get_data_path();
|
||||||
if( fn[IMAGE_FILENAMES].node->data.seq != 0 )
|
if( fn[IMAGE_FILENAMES].size() != 0 )
|
||||||
{
|
{
|
||||||
for( FileNodeIterator it = fn[IMAGE_FILENAMES].begin(); it != fn[IMAGE_FILENAMES].end(); )
|
for( FileNodeIterator it = fn[IMAGE_FILENAMES].begin(); it != fn[IMAGE_FILENAMES].end(); )
|
||||||
{
|
{
|
||||||
@ -210,7 +211,7 @@ void CV_DetectorTest::run( int )
|
|||||||
{
|
{
|
||||||
char buf[10];
|
char buf[10];
|
||||||
sprintf( buf, "%s%d", "img_", ii );
|
sprintf( buf, "%s%d", "img_", ii );
|
||||||
cvWriteComment( validationFS.fs, buf, 0 );
|
//cvWriteComment( validationFS.fs, buf, 0 );
|
||||||
validationFS << *it;
|
validationFS << *it;
|
||||||
}
|
}
|
||||||
validationFS << "]"; // IMAGE_FILENAMES
|
validationFS << "]"; // IMAGE_FILENAMES
|
||||||
@ -316,7 +317,7 @@ int CV_DetectorTest::validate( int detectorIdx, vector<vector<Rect> >& objects )
|
|||||||
string imageIdxStr = buf;
|
string imageIdxStr = buf;
|
||||||
FileNode node = validationFS.getFirstTopLevelNode()[VALIDATION][detectorNames[detectorIdx]][imageIdxStr];
|
FileNode node = validationFS.getFirstTopLevelNode()[VALIDATION][detectorNames[detectorIdx]][imageIdxStr];
|
||||||
vector<Rect> valRects;
|
vector<Rect> valRects;
|
||||||
if( node.node->data.seq != 0 )
|
if( node.size() != 0 )
|
||||||
{
|
{
|
||||||
for( FileNodeIterator it2 = node.begin(); it2 != node.end(); )
|
for( FileNodeIterator it2 = node.begin(); it2 != node.end(); )
|
||||||
{
|
{
|
||||||
@ -410,12 +411,12 @@ void CV_CascadeDetectorTest::readDetector( const FileNode& fn )
|
|||||||
if( flag )
|
if( flag )
|
||||||
flags.push_back( 0 );
|
flags.push_back( 0 );
|
||||||
else
|
else
|
||||||
flags.push_back( CV_HAAR_SCALE_IMAGE );
|
flags.push_back( CASCADE_SCALE_IMAGE );
|
||||||
}
|
}
|
||||||
|
|
||||||
void CV_CascadeDetectorTest::writeDetector( FileStorage& fs, int di )
|
void CV_CascadeDetectorTest::writeDetector( FileStorage& fs, int di )
|
||||||
{
|
{
|
||||||
int sc = flags[di] & CV_HAAR_SCALE_IMAGE ? 0 : 1;
|
int sc = flags[di] & CASCADE_SCALE_IMAGE ? 0 : 1;
|
||||||
fs << FILENAME << detectorFilenames[di];
|
fs << FILENAME << detectorFilenames[di];
|
||||||
fs << C_SCALE_CASCADE << sc;
|
fs << C_SCALE_CASCADE << sc;
|
||||||
}
|
}
|
||||||
@ -439,7 +440,7 @@ int CV_CascadeDetectorTest::detectMultiScale_C( const string& filename,
|
|||||||
|
|
||||||
CvMat c_gray = grayImg;
|
CvMat c_gray = grayImg;
|
||||||
CvSeq* rs = cvHaarDetectObjects(&c_gray, c_cascade, storage, 1.1, 3, flags[di] );
|
CvSeq* rs = cvHaarDetectObjects(&c_gray, c_cascade, storage, 1.1, 3, flags[di] );
|
||||||
|
|
||||||
objects.clear();
|
objects.clear();
|
||||||
for( int i = 0; i < rs->total; i++ )
|
for( int i = 0; i < rs->total; i++ )
|
||||||
{
|
{
|
||||||
@ -494,7 +495,7 @@ CV_HOGDetectorTest::CV_HOGDetectorTest()
|
|||||||
void CV_HOGDetectorTest::readDetector( const FileNode& fn )
|
void CV_HOGDetectorTest::readDetector( const FileNode& fn )
|
||||||
{
|
{
|
||||||
String filename;
|
String filename;
|
||||||
if( fn[FILENAME].node->data.seq != 0 )
|
if( fn[FILENAME].size() != 0 )
|
||||||
fn[FILENAME] >> filename;
|
fn[FILENAME] >> filename;
|
||||||
detectorFilenames.push_back( filename);
|
detectorFilenames.push_back( filename);
|
||||||
}
|
}
|
||||||
@ -1085,7 +1086,7 @@ void HOGDescriptorTester::detect(const Mat& img,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const double eps = 0.0;
|
const double eps = 0.0;
|
||||||
double diff_norm = norm(Mat(actual_weights) - Mat(weights), CV_L2);
|
double diff_norm = norm(Mat(actual_weights) - Mat(weights), NORM_L2);
|
||||||
if (diff_norm > eps)
|
if (diff_norm > eps)
|
||||||
{
|
{
|
||||||
ts->printf(cvtest::TS::SUMMARY, "Weights for found locations aren't equal.\n"
|
ts->printf(cvtest::TS::SUMMARY, "Weights for found locations aren't equal.\n"
|
||||||
@ -1164,7 +1165,7 @@ void HOGDescriptorTester::compute(const Mat& img, vector<float>& descriptors,
|
|||||||
std::vector<float> actual_descriptors;
|
std::vector<float> actual_descriptors;
|
||||||
actual_hog->compute(img, actual_descriptors, winStride, padding, locations);
|
actual_hog->compute(img, actual_descriptors, winStride, padding, locations);
|
||||||
|
|
||||||
double diff_norm = cv::norm(Mat(actual_descriptors) - Mat(descriptors), CV_L2);
|
double diff_norm = cv::norm(Mat(actual_descriptors) - Mat(descriptors), NORM_L2);
|
||||||
const double eps = 0.0;
|
const double eps = 0.0;
|
||||||
if (diff_norm > eps)
|
if (diff_norm > eps)
|
||||||
{
|
{
|
||||||
@ -1314,7 +1315,7 @@ void HOGDescriptorTester::computeGradient(const Mat& img, Mat& grad, Mat& qangle
|
|||||||
const double eps = 0.0;
|
const double eps = 0.0;
|
||||||
for (i = 0; i < 2; ++i)
|
for (i = 0; i < 2; ++i)
|
||||||
{
|
{
|
||||||
double diff_norm = norm(reference_mats[i] - actual_mats[i], CV_L2);
|
double diff_norm = norm(reference_mats[i] - actual_mats[i], NORM_L2);
|
||||||
if (diff_norm > eps)
|
if (diff_norm > eps)
|
||||||
{
|
{
|
||||||
ts->printf(cvtest::TS::LOG, "%s matrices are not equal\n"
|
ts->printf(cvtest::TS::LOG, "%s matrices are not equal\n"
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "test_precomp.hpp"
|
#include "test_precomp.hpp"
|
||||||
|
#include "opencv2/objdetect/objdetect_c.h"
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#ifdef HAVE_TBB
|
#ifdef HAVE_TBB
|
||||||
|
@ -131,9 +131,6 @@ namespace cv
|
|||||||
//getDevice also need to be called before this function
|
//getDevice also need to be called before this function
|
||||||
CV_EXPORTS void setDeviceEx(Info &oclinfo, void *ctx, void *qu, int devnum = 0);
|
CV_EXPORTS void setDeviceEx(Info &oclinfo, void *ctx, void *qu, int devnum = 0);
|
||||||
|
|
||||||
//////////////////////////////// Error handling ////////////////////////
|
|
||||||
CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func);
|
|
||||||
|
|
||||||
//////////////////////////////// OpenCL context ////////////////////////
|
//////////////////////////////// OpenCL context ////////////////////////
|
||||||
//This is a global singleton class used to represent a OpenCL context.
|
//This is a global singleton class used to represent a OpenCL context.
|
||||||
class CV_EXPORTS Context
|
class CV_EXPORTS Context
|
||||||
@ -811,7 +808,8 @@ namespace cv
|
|||||||
///////////////////////////////////////////CascadeClassifier//////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////CascadeClassifier//////////////////////////////////////////////////////////////////
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
class CV_EXPORTS_W OclCascadeClassifier : public cv::CascadeClassifier
|
#if 0
|
||||||
|
class CV_EXPORTS OclCascadeClassifier : public cv::CascadeClassifier
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
OclCascadeClassifier() {};
|
OclCascadeClassifier() {};
|
||||||
@ -820,6 +818,7 @@ namespace cv
|
|||||||
CvSeq* oclHaarDetectObjects(oclMat &gimg, CvMemStorage *storage, double scaleFactor,
|
CvSeq* oclHaarDetectObjects(oclMat &gimg, CvMemStorage *storage, double scaleFactor,
|
||||||
int minNeighbors, int flags, CvSize minSize = cvSize(0, 0), CvSize maxSize = cvSize(0, 0));
|
int minNeighbors, int flags, CvSize minSize = cvSize(0, 0), CvSize maxSize = cvSize(0, 0));
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -459,28 +459,28 @@ namespace cv
|
|||||||
inline uchar *oclMat::ptr(int y)
|
inline uchar *oclMat::ptr(int y)
|
||||||
{
|
{
|
||||||
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
||||||
CV_Error(CV_GpuNotSupported, "This function hasn't been supported yet.\n");
|
CV_Error(Error::GpuNotSupported, "This function hasn't been supported yet.\n");
|
||||||
return data + step * y;
|
return data + step * y;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline const uchar *oclMat::ptr(int y) const
|
inline const uchar *oclMat::ptr(int y) const
|
||||||
{
|
{
|
||||||
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
||||||
CV_Error(CV_GpuNotSupported, "This function hasn't been supported yet.\n");
|
CV_Error(Error::GpuNotSupported, "This function hasn't been supported yet.\n");
|
||||||
return data + step * y;
|
return data + step * y;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename _Tp> inline _Tp *oclMat::ptr(int y)
|
template<typename _Tp> inline _Tp *oclMat::ptr(int y)
|
||||||
{
|
{
|
||||||
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
||||||
CV_Error(CV_GpuNotSupported, "This function hasn't been supported yet.\n");
|
CV_Error(Error::GpuNotSupported, "This function hasn't been supported yet.\n");
|
||||||
return (_Tp *)(data + step * y);
|
return (_Tp *)(data + step * y);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename _Tp> inline const _Tp *oclMat::ptr(int y) const
|
template<typename _Tp> inline const _Tp *oclMat::ptr(int y) const
|
||||||
{
|
{
|
||||||
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
||||||
CV_Error(CV_GpuNotSupported, "This function hasn't been supported yet.\n");
|
CV_Error(Error::GpuNotSupported, "This function hasn't been supported yet.\n");
|
||||||
return (const _Tp *)(data + step * y);
|
return (const _Tp *)(data + step * y);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,8 +63,6 @@ int main(int argc, const char *argv[])
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
redirectError(cvErrorCallback);
|
|
||||||
|
|
||||||
const char *keys =
|
const char *keys =
|
||||||
"{ h help | false | print help message }"
|
"{ h help | false | print help message }"
|
||||||
"{ f filter | | filter for test }"
|
"{ f filter | | filter for test }"
|
||||||
|
@ -44,6 +44,8 @@
|
|||||||
//M*/
|
//M*/
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
|
||||||
///////////// Haar ////////////////////////
|
///////////// Haar ////////////////////////
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
@ -135,4 +137,6 @@ TEST(Haar)
|
|||||||
faceCascade.detectMultiScale(d_img, faces,
|
faceCascade.detectMultiScale(d_img, faces,
|
||||||
1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
|
1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
|
||||||
GPU_FULL_OFF;
|
GPU_FULL_OFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -746,12 +746,12 @@ void meanShiftProc_(const Mat &src_roi, Mat &dst_roi, Mat &dstCoor_roi, int sp,
|
|||||||
|
|
||||||
if (src_roi.empty())
|
if (src_roi.empty())
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsBadArg, "The input image is empty");
|
CV_Error(Error::StsBadArg, "The input image is empty");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (src_roi.depth() != CV_8U || src_roi.channels() != 4)
|
if (src_roi.depth() != CV_8U || src_roi.channels() != 4)
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported");
|
CV_Error(Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_Assert((src_roi.cols == dst_roi.cols) && (src_roi.rows == dst_roi.rows) &&
|
CV_Assert((src_roi.cols == dst_roi.cols) && (src_roi.rows == dst_roi.rows) &&
|
||||||
|
@ -349,14 +349,3 @@ string abspath(const string &relpath)
|
|||||||
{
|
{
|
||||||
return TestSystem::instance().workingDir() + relpath;
|
return TestSystem::instance().workingDir() + relpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int CV_CDECL cvErrorCallback(int /*status*/, const char * /*func_name*/,
|
|
||||||
const char *err_msg, const char * /*file_name*/,
|
|
||||||
int /*line*/, void * /*userdata*/)
|
|
||||||
{
|
|
||||||
TestSystem::instance().printError(err_msg);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,6 +55,8 @@
|
|||||||
#include "opencv2/features2d.hpp"
|
#include "opencv2/features2d.hpp"
|
||||||
#include "opencv2/ocl.hpp"
|
#include "opencv2/ocl.hpp"
|
||||||
|
|
||||||
|
#include "opencv2/core/utility.hpp"
|
||||||
|
|
||||||
#define Min_Size 1000
|
#define Min_Size 1000
|
||||||
#define Max_Size 4000
|
#define Max_Size 4000
|
||||||
#define Multiple 2
|
#define Multiple 2
|
||||||
@ -65,7 +67,7 @@ using namespace cv;
|
|||||||
|
|
||||||
void gen(Mat &mat, int rows, int cols, int type, Scalar low, Scalar high);
|
void gen(Mat &mat, int rows, int cols, int type, Scalar low, Scalar high);
|
||||||
string abspath(const string &relpath);
|
string abspath(const string &relpath);
|
||||||
int CV_CDECL cvErrorCallback(int, const char *, const char *, const char *, int, void *);
|
|
||||||
typedef struct
|
typedef struct
|
||||||
{
|
{
|
||||||
short x;
|
short x;
|
||||||
|
@ -133,7 +133,7 @@ void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, String
|
|||||||
{
|
{
|
||||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst,
|
|||||||
{
|
{
|
||||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -290,7 +290,7 @@ void arithmetic_scalar_run(const oclMat &src1, const Scalar &src2, oclMat &dst,
|
|||||||
{
|
{
|
||||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,7 +362,7 @@ static void arithmetic_scalar_run(const oclMat &src, oclMat &dst, String kernelN
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -433,7 +433,7 @@ static void arithmetic_scalar(const oclMat &src1, const Scalar &src2, oclMat &ds
|
|||||||
};
|
};
|
||||||
ArithmeticFuncS func = tab[src1.depth()];
|
ArithmeticFuncS func = tab[src1.depth()];
|
||||||
if(func == 0)
|
if(func == 0)
|
||||||
cv::ocl::error("Unsupported arithmetic operation", __FILE__, __LINE__);
|
cv::error(Error::StsBadArg, "Unsupported arithmetic operation", "", __FILE__, __LINE__);
|
||||||
func(src1, src2, dst, mask, kernelName, kernelString, isMatSubScalar);
|
func(src1, src2, dst, mask, kernelName, kernelString, isMatSubScalar);
|
||||||
}
|
}
|
||||||
static void arithmetic_scalar(const oclMat &src1, const Scalar &src2, oclMat &dst, const oclMat &mask, String kernelName, const char **kernelString)
|
static void arithmetic_scalar(const oclMat &src1, const Scalar &src2, oclMat &dst, const oclMat &mask, String kernelName, const char **kernelString)
|
||||||
@ -465,7 +465,7 @@ void cv::ocl::divide(double scalar, const oclMat &src, oclMat &dst)
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE))
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE))
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -557,7 +557,7 @@ void cv::ocl::compare(const oclMat &src1, const oclMat &src2, oclMat &dst , int
|
|||||||
kernelString = &arithm_compare_ne;
|
kernelString = &arithm_compare_ne;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsBadArg, "Unknown comparison method");
|
CV_Error(Error::StsBadArg, "Unknown comparison method");
|
||||||
}
|
}
|
||||||
compare_run(src1, src2, dst, kernelName, kernelString);
|
compare_run(src1, src2, dst, kernelName, kernelString);
|
||||||
}
|
}
|
||||||
@ -628,7 +628,7 @@ Scalar cv::ocl::sum(const oclMat &src)
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
static sumFunc functab[2] =
|
static sumFunc functab[2] =
|
||||||
{
|
{
|
||||||
@ -645,7 +645,7 @@ Scalar cv::ocl::absSum(const oclMat &src)
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
static sumFunc functab[2] =
|
static sumFunc functab[2] =
|
||||||
{
|
{
|
||||||
@ -662,7 +662,7 @@ Scalar cv::ocl::sqrSum(const oclMat &src)
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
static sumFunc functab[2] =
|
static sumFunc functab[2] =
|
||||||
{
|
{
|
||||||
@ -811,7 +811,7 @@ void cv::ocl::minMax(const oclMat &src, double *minVal, double *maxVal, const oc
|
|||||||
CV_Assert(src.oclchannels() == 1);
|
CV_Assert(src.oclchannels() == 1);
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
static minMaxFunc functab[8] =
|
static minMaxFunc functab[8] =
|
||||||
{
|
{
|
||||||
@ -895,7 +895,7 @@ static void arithmetic_flip_rows_run(const oclMat &src, oclMat &dst, String kern
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -944,7 +944,7 @@ static void arithmetic_flip_cols_run(const oclMat &src, oclMat &dst, String kern
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1124,7 +1124,7 @@ static void arithmetic_exp_log_run(const oclMat &src, oclMat &dst, String kernel
|
|||||||
Context *clCxt = src.clCxt;
|
Context *clCxt = src.clCxt;
|
||||||
if(!clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
if(!clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
//int channels = dst.oclchannels();
|
//int channels = dst.oclchannels();
|
||||||
@ -1165,7 +1165,7 @@ static void arithmetic_magnitude_phase_run(const oclMat &src1, const oclMat &src
|
|||||||
{
|
{
|
||||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1213,7 +1213,7 @@ static void arithmetic_phase_run(const oclMat &src1, const oclMat &src2, oclMat
|
|||||||
{
|
{
|
||||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1277,7 +1277,7 @@ static void arithmetic_cartToPolar_run(const oclMat &src1, const oclMat &src2, o
|
|||||||
{
|
{
|
||||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1332,7 +1332,7 @@ static void arithmetic_ptc_run(const oclMat &src1, const oclMat &src2, oclMat &d
|
|||||||
{
|
{
|
||||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1514,7 +1514,7 @@ void cv::ocl::minMaxLoc(const oclMat &src, double *minVal, double *maxVal,
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
static minMaxLocFunc functab[2] =
|
static minMaxLocFunc functab[2] =
|
||||||
{
|
{
|
||||||
@ -1561,7 +1561,7 @@ int cv::ocl::countNonZero(const oclMat &src)
|
|||||||
size_t groupnum = src.clCxt->computeUnits();
|
size_t groupnum = src.clCxt->computeUnits();
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
CV_Assert(groupnum != 0);
|
CV_Assert(groupnum != 0);
|
||||||
groupnum = groupnum * 2;
|
groupnum = groupnum * 2;
|
||||||
@ -1834,7 +1834,7 @@ static void bitwise_scalar(const oclMat &src1, const Scalar &src2, oclMat &dst,
|
|||||||
};
|
};
|
||||||
BitwiseFuncS func = tab[src1.depth()];
|
BitwiseFuncS func = tab[src1.depth()];
|
||||||
if(func == 0)
|
if(func == 0)
|
||||||
cv::ocl::error("Unsupported arithmetic operation", __FILE__, __LINE__);
|
cv::error(Error::StsBadArg, "Unsupported arithmetic operation", "", __FILE__, __LINE__);
|
||||||
func(src1, src2, dst, mask, kernelName, kernelString, isMatSubScalar);
|
func(src1, src2, dst, mask, kernelName, kernelString, isMatSubScalar);
|
||||||
}
|
}
|
||||||
static void bitwise_scalar(const oclMat &src1, const Scalar &src2, oclMat &dst, const oclMat &mask, String kernelName, const char **kernelString)
|
static void bitwise_scalar(const oclMat &src1, const Scalar &src2, oclMat &dst, const oclMat &mask, String kernelName, const char **kernelString)
|
||||||
@ -2037,7 +2037,7 @@ static void transpose_run(const oclMat &src, oclMat &dst, String kernelName)
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ void cv::ocl::blendLinear(const oclMat &img1, const oclMat &img2, const oclMat &
|
|||||||
oclMat &result)
|
oclMat &result)
|
||||||
{
|
{
|
||||||
cv::ocl::Context *ctx = img1.clCxt;
|
cv::ocl::Context *ctx = img1.clCxt;
|
||||||
assert(ctx == img2.clCxt && ctx == weights1.clCxt && ctx == weights2.clCxt);
|
CV_Assert(ctx == img2.clCxt && ctx == weights1.clCxt && ctx == weights2.clCxt);
|
||||||
int channels = img1.oclchannels();
|
int channels = img1.oclchannels();
|
||||||
int depth = img1.depth();
|
int depth = img1.depth();
|
||||||
int rows = img1.rows;
|
int rows = img1.rows;
|
||||||
|
@ -64,7 +64,7 @@ template < int BLOCK_SIZE, int MAX_DESC_LEN/*, typename Mask*/ >
|
|||||||
void matchUnrolledCached(const oclMat &query, const oclMat &train, const oclMat &/*mask*/,
|
void matchUnrolledCached(const oclMat &query, const oclMat &train, const oclMat &/*mask*/,
|
||||||
const oclMat &trainIdx, const oclMat &distance, int distType)
|
const oclMat &trainIdx, const oclMat &distance, int distType)
|
||||||
{
|
{
|
||||||
assert(query.type() == CV_32F);
|
CV_Assert(query.type() == CV_32F);
|
||||||
cv::ocl::Context *ctx = query.clCxt;
|
cv::ocl::Context *ctx = query.clCxt;
|
||||||
size_t globalSize[] = {(query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, BLOCK_SIZE, 1};
|
size_t globalSize[] = {(query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, BLOCK_SIZE, 1};
|
||||||
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
||||||
@ -106,7 +106,7 @@ template < int BLOCK_SIZE/*, typename Mask*/ >
|
|||||||
void match(const oclMat &query, const oclMat &train, const oclMat &/*mask*/,
|
void match(const oclMat &query, const oclMat &train, const oclMat &/*mask*/,
|
||||||
const oclMat &trainIdx, const oclMat &distance, int distType)
|
const oclMat &trainIdx, const oclMat &distance, int distType)
|
||||||
{
|
{
|
||||||
assert(query.type() == CV_32F);
|
CV_Assert(query.type() == CV_32F);
|
||||||
cv::ocl::Context *ctx = query.clCxt;
|
cv::ocl::Context *ctx = query.clCxt;
|
||||||
size_t globalSize[] = {(query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, BLOCK_SIZE, 1};
|
size_t globalSize[] = {(query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, BLOCK_SIZE, 1};
|
||||||
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
||||||
@ -147,7 +147,7 @@ template < int BLOCK_SIZE, int MAX_DESC_LEN/*, typename Mask*/ >
|
|||||||
void matchUnrolledCached(const oclMat &query, const oclMat &train, float maxDistance, const oclMat &/*mask*/,
|
void matchUnrolledCached(const oclMat &query, const oclMat &train, float maxDistance, const oclMat &/*mask*/,
|
||||||
const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches, int distType)
|
const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches, int distType)
|
||||||
{
|
{
|
||||||
assert(query.type() == CV_32F);
|
CV_Assert(query.type() == CV_32F);
|
||||||
cv::ocl::Context *ctx = query.clCxt;
|
cv::ocl::Context *ctx = query.clCxt;
|
||||||
size_t globalSize[] = {(train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, (query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, 1};
|
size_t globalSize[] = {(train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, (query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, 1};
|
||||||
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
||||||
@ -188,7 +188,7 @@ template < int BLOCK_SIZE/*, typename Mask*/ >
|
|||||||
void radius_match(const oclMat &query, const oclMat &train, float maxDistance, const oclMat &/*mask*/,
|
void radius_match(const oclMat &query, const oclMat &train, float maxDistance, const oclMat &/*mask*/,
|
||||||
const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches, int distType)
|
const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches, int distType)
|
||||||
{
|
{
|
||||||
assert(query.type() == CV_32F);
|
CV_Assert(query.type() == CV_32F);
|
||||||
cv::ocl::Context *ctx = query.clCxt;
|
cv::ocl::Context *ctx = query.clCxt;
|
||||||
size_t globalSize[] = {(train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, (query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, 1};
|
size_t globalSize[] = {(train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, (query.rows + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE, 1};
|
||||||
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
size_t localSize[] = {BLOCK_SIZE, BLOCK_SIZE, 1};
|
||||||
@ -533,14 +533,13 @@ void cv::ocl::BruteForceMatcher_OCL_base::matchSingle(const oclMat &query, const
|
|||||||
|
|
||||||
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
||||||
int callType = query.depth();
|
int callType = query.depth();
|
||||||
char cvFuncName[] = "singleMatch";
|
|
||||||
if (callType != 5)
|
if (callType != 5)
|
||||||
CV_ERROR(CV_UNSUPPORTED_FORMAT_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::StsUnsupportedFormat, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
|
|
||||||
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
||||||
|| callType != 2 || callType != 4)))
|
|| callType != 2 || callType != 4)))
|
||||||
{
|
{
|
||||||
CV_ERROR(CV_UNSUPPORTED_DEPTH_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::BadDepth, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||||
@ -550,8 +549,6 @@ void cv::ocl::BruteForceMatcher_OCL_base::matchSingle(const oclMat &query, const
|
|||||||
distance.create(1, query.rows, CV_32F);
|
distance.create(1, query.rows, CV_32F);
|
||||||
|
|
||||||
matchDispatcher(query, train, mask, trainIdx, distance, distType);
|
matchDispatcher(query, train, mask, trainIdx, distance, distType);
|
||||||
exit:
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::ocl::BruteForceMatcher_OCL_base::matchDownload(const oclMat &trainIdx, const oclMat &distance, std::vector<DMatch> &matches)
|
void cv::ocl::BruteForceMatcher_OCL_base::matchDownload(const oclMat &trainIdx, const oclMat &distance, std::vector<DMatch> &matches)
|
||||||
@ -597,7 +594,7 @@ void cv::ocl::BruteForceMatcher_OCL_base::matchConvert(const Mat &trainIdx, cons
|
|||||||
|
|
||||||
void cv::ocl::BruteForceMatcher_OCL_base::match(const oclMat &query, const oclMat &train, std::vector<DMatch> &matches, const oclMat &mask)
|
void cv::ocl::BruteForceMatcher_OCL_base::match(const oclMat &query, const oclMat &train, std::vector<DMatch> &matches, const oclMat &mask)
|
||||||
{
|
{
|
||||||
assert(mask.empty()); // mask is not supported at the moment
|
CV_Assert(mask.empty()); // mask is not supported at the moment
|
||||||
oclMat trainIdx, distance;
|
oclMat trainIdx, distance;
|
||||||
matchSingle(query, train, trainIdx, distance, mask);
|
matchSingle(query, train, trainIdx, distance, mask);
|
||||||
matchDownload(trainIdx, distance, matches);
|
matchDownload(trainIdx, distance, matches);
|
||||||
@ -655,14 +652,13 @@ void cv::ocl::BruteForceMatcher_OCL_base::matchCollection(const oclMat &query, c
|
|||||||
|
|
||||||
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
||||||
int callType = query.depth();
|
int callType = query.depth();
|
||||||
char cvFuncName[] = "matchCollection";
|
|
||||||
if (callType != 5)
|
if (callType != 5)
|
||||||
CV_ERROR(CV_UNSUPPORTED_FORMAT_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::StsUnsupportedFormat, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
|
|
||||||
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
||||||
|| callType != 2 || callType != 4)))
|
|| callType != 2 || callType != 4)))
|
||||||
{
|
{
|
||||||
CV_ERROR(CV_UNSUPPORTED_DEPTH_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::BadDepth, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||||
@ -672,8 +668,6 @@ void cv::ocl::BruteForceMatcher_OCL_base::matchCollection(const oclMat &query, c
|
|||||||
distance.create(1, query.rows, CV_32F);
|
distance.create(1, query.rows, CV_32F);
|
||||||
|
|
||||||
matchDispatcher(query, (const oclMat *)trainCollection.ptr(), trainCollection.cols, masks, trainIdx, imgIdx, distance, distType);
|
matchDispatcher(query, (const oclMat *)trainCollection.ptr(), trainCollection.cols, masks, trainIdx, imgIdx, distance, distType);
|
||||||
exit:
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::ocl::BruteForceMatcher_OCL_base::matchDownload(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance, std::vector<DMatch> &matches)
|
void cv::ocl::BruteForceMatcher_OCL_base::matchDownload(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance, std::vector<DMatch> &matches)
|
||||||
@ -745,14 +739,13 @@ void cv::ocl::BruteForceMatcher_OCL_base::knnMatchSingle(const oclMat &query, co
|
|||||||
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
||||||
int callType = query.depth();
|
int callType = query.depth();
|
||||||
|
|
||||||
char cvFuncName[] = "knnMatchSingle";
|
|
||||||
if (callType != 5)
|
if (callType != 5)
|
||||||
CV_ERROR(CV_UNSUPPORTED_FORMAT_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::StsUnsupportedFormat, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
|
|
||||||
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
||||||
|| callType != 2 || callType != 4)))
|
|| callType != 2 || callType != 4)))
|
||||||
{
|
{
|
||||||
CV_ERROR(CV_UNSUPPORTED_DEPTH_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::BadDepth, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||||
@ -773,8 +766,6 @@ void cv::ocl::BruteForceMatcher_OCL_base::knnMatchSingle(const oclMat &query, co
|
|||||||
trainIdx.setTo(Scalar::all(-1));
|
trainIdx.setTo(Scalar::all(-1));
|
||||||
|
|
||||||
kmatchDispatcher(query, train, k, mask, trainIdx, distance, allDist, distType);
|
kmatchDispatcher(query, train, k, mask, trainIdx, distance, allDist, distType);
|
||||||
exit:
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::ocl::BruteForceMatcher_OCL_base::knnMatchDownload(const oclMat &trainIdx, const oclMat &distance, std::vector< std::vector<DMatch> > &matches, bool compactResult)
|
void cv::ocl::BruteForceMatcher_OCL_base::knnMatchDownload(const oclMat &trainIdx, const oclMat &distance, std::vector< std::vector<DMatch> > &matches, bool compactResult)
|
||||||
@ -1020,14 +1011,13 @@ void cv::ocl::BruteForceMatcher_OCL_base::radiusMatchSingle(const oclMat &query,
|
|||||||
|
|
||||||
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
// match1 doesn't support signed char type, match2 only support float, hamming support uchar, ushort and int
|
||||||
int callType = query.depth();
|
int callType = query.depth();
|
||||||
char cvFuncName[] = "radiusMatchSingle";
|
|
||||||
if (callType != 5)
|
if (callType != 5)
|
||||||
CV_ERROR(CV_UNSUPPORTED_FORMAT_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::StsUnsupportedFormat, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
|
|
||||||
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
if ((distType == 0 && callType == 1 ) || (distType == 1 && callType != 5) || (distType == 2 && (callType != 0
|
||||||
|| callType != 2 || callType != 4)))
|
|| callType != 2 || callType != 4)))
|
||||||
{
|
{
|
||||||
CV_ERROR(CV_UNSUPPORTED_DEPTH_ERR, "BruteForceMatch OpenCL only support float type query!\n");
|
CV_Error(Error::BadDepth, "BruteForceMatch OpenCL only support float type query!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||||
@ -1044,8 +1034,6 @@ void cv::ocl::BruteForceMatcher_OCL_base::radiusMatchSingle(const oclMat &query,
|
|||||||
nMatches.setTo(Scalar::all(0));
|
nMatches.setTo(Scalar::all(0));
|
||||||
|
|
||||||
matchDispatcher(query, train, maxDistance, mask, trainIdx, distance, nMatches, distType);
|
matchDispatcher(query, train, maxDistance, mask, trainIdx, distance, nMatches, distType);
|
||||||
exit:
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::ocl::BruteForceMatcher_OCL_base::radiusMatchDownload(const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches,
|
void cv::ocl::BruteForceMatcher_OCL_base::radiusMatchDownload(const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches,
|
||||||
|
@ -268,7 +268,7 @@ void cvtColor_caller(const oclMat &src, oclMat &dst, int code, int dcn)
|
|||||||
case COLOR_HLS2BGR: case COLOR_HLS2RGB: case COLOR_HLS2BGR_FULL: case COLOR_HLS2RGB_FULL:
|
case COLOR_HLS2BGR: case COLOR_HLS2RGB: case COLOR_HLS2BGR_FULL: case COLOR_HLS2RGB_FULL:
|
||||||
*/
|
*/
|
||||||
default:
|
default:
|
||||||
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
|
CV_Error(Error::StsBadFlag, "Unknown/unsupported color conversion code" );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -170,21 +170,5 @@ namespace cv
|
|||||||
sprintf(buf, "%d", err);
|
sprintf(buf, "%d", err);
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
void error(const char *error_string, const char *file, const int line, const char *func)
|
|
||||||
{
|
|
||||||
int code = CV_GpuApiCallError;
|
|
||||||
|
|
||||||
if (std::uncaught_exception())
|
|
||||||
{
|
|
||||||
const char *errorStr = cvErrorStr(code);
|
|
||||||
const char *function = func ? func : "unknown function";
|
|
||||||
|
|
||||||
std::cerr << "OpenCV Error: " << errorStr << "(" << error_string << ") in " << function << ", file " << file << ", line " << line;
|
|
||||||
std::cerr.flush();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
cv::error( cv::Exception(code, error_string, func, file, line) );
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ using namespace cv::ocl;
|
|||||||
#if !defined HAVE_CLAMDFFT
|
#if !defined HAVE_CLAMDFFT
|
||||||
void cv::ocl::dft(const oclMat&, oclMat&, Size, int)
|
void cv::ocl::dft(const oclMat&, oclMat&, Size, int)
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsNotImplemented, "OpenCL DFT is not implemented");
|
CV_Error(Error::StsNotImplemented, "OpenCL DFT is not implemented");
|
||||||
}
|
}
|
||||||
namespace cv { namespace ocl {
|
namespace cv { namespace ocl {
|
||||||
void fft_teardown();
|
void fft_teardown();
|
||||||
|
@ -270,7 +270,7 @@ static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel,
|
|||||||
sprintf(s, "-D VAL=FLT_MAX -D GENTYPE=float4");
|
sprintf(s, "-D VAL=FLT_MAX -D GENTYPE=float4");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported type");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported type");
|
||||||
}
|
}
|
||||||
|
|
||||||
char compile_option[128];
|
char compile_option[128];
|
||||||
@ -350,7 +350,7 @@ static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel,
|
|||||||
sprintf(s, "-D VAL=-FLT_MAX -D GENTYPE=float4");
|
sprintf(s, "-D VAL=-FLT_MAX -D GENTYPE=float4");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported type");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported type");
|
||||||
}
|
}
|
||||||
|
|
||||||
char compile_option[128];
|
char compile_option[128];
|
||||||
@ -462,7 +462,7 @@ void morphOp(int op, const oclMat &src, oclMat &dst, const Mat &_kernel, Point a
|
|||||||
{
|
{
|
||||||
if ((borderType != cv::BORDER_CONSTANT) || (borderValue != morphologyDefaultBorderValue()))
|
if ((borderType != cv::BORDER_CONSTANT) || (borderValue != morphologyDefaultBorderValue()))
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsBadArg, "unsupported border type");
|
CV_Error(Error::StsBadArg, "unsupported border type");
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat kernel;
|
Mat kernel;
|
||||||
@ -564,7 +564,7 @@ void cv::ocl::morphologyEx(const oclMat &src, oclMat &dst, int op, const Mat &ke
|
|||||||
subtract(temp, src, dst);
|
subtract(temp, src, dst);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsBadArg, "unknown morphological operation");
|
CV_Error(Error::StsBadArg, "unknown morphological operation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -778,7 +778,7 @@ static void GPUFilterBox_8u_C1R(const oclMat &src, oclMat &dst,
|
|||||||
sprintf(btype, "BORDER_REFLECT");
|
sprintf(btype, "BORDER_REFLECT");
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
CV_Error(Error::StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
||||||
return;
|
return;
|
||||||
case 4:
|
case 4:
|
||||||
sprintf(btype, "BORDER_REFLECT_101");
|
sprintf(btype, "BORDER_REFLECT_101");
|
||||||
@ -840,7 +840,7 @@ static void GPUFilterBox_8u_C4R(const oclMat &src, oclMat &dst,
|
|||||||
sprintf(btype, "BORDER_REFLECT");
|
sprintf(btype, "BORDER_REFLECT");
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
CV_Error(Error::StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
||||||
return;
|
return;
|
||||||
case 4:
|
case 4:
|
||||||
sprintf(btype, "BORDER_REFLECT_101");
|
sprintf(btype, "BORDER_REFLECT_101");
|
||||||
@ -902,7 +902,7 @@ static void GPUFilterBox_32F_C1R(const oclMat &src, oclMat &dst,
|
|||||||
sprintf(btype, "BORDER_REFLECT");
|
sprintf(btype, "BORDER_REFLECT");
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
CV_Error(Error::StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
||||||
return;
|
return;
|
||||||
case 4:
|
case 4:
|
||||||
sprintf(btype, "BORDER_REFLECT_101");
|
sprintf(btype, "BORDER_REFLECT_101");
|
||||||
@ -965,7 +965,7 @@ static void GPUFilterBox_32F_C4R(const oclMat &src, oclMat &dst,
|
|||||||
sprintf(btype, "BORDER_REFLECT");
|
sprintf(btype, "BORDER_REFLECT");
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
CV_Error(Error::StsUnsupportedFormat, "BORDER_WRAP is not supported!");
|
||||||
return;
|
return;
|
||||||
case 4:
|
case 4:
|
||||||
sprintf(btype, "BORDER_REFLECT_101");
|
sprintf(btype, "BORDER_REFLECT_101");
|
||||||
@ -1396,7 +1396,7 @@ void cv::ocl::sepFilter2D(const oclMat &src, oclMat &dst, int ddepth, const Mat
|
|||||||
if ((bordertype != cv::BORDER_CONSTANT) &&
|
if ((bordertype != cv::BORDER_CONSTANT) &&
|
||||||
(bordertype != cv::BORDER_REPLICATE))
|
(bordertype != cv::BORDER_REPLICATE))
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsBadArg, "unsupported border type");
|
CV_Error(Error::StsBadArg, "unsupported border type");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1479,7 +1479,7 @@ void cv::ocl::Laplacian(const oclMat &src, oclMat &dst, int ddepth, int ksize, d
|
|||||||
{
|
{
|
||||||
if (!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
if (!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.type() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
|
CV_Error(Error::GpuNotSupported, "Selected device don't support double\r\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1563,7 +1563,7 @@ void cv::ocl::GaussianBlur(const oclMat &src, oclMat &dst, Size ksize, double si
|
|||||||
if ((bordertype != cv::BORDER_CONSTANT) &&
|
if ((bordertype != cv::BORDER_CONSTANT) &&
|
||||||
(bordertype != cv::BORDER_REPLICATE))
|
(bordertype != cv::BORDER_REPLICATE))
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsBadArg, "unsupported border type");
|
CV_Error(Error::StsBadArg, "unsupported border type");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@
|
|||||||
void cv::ocl::gemm(const oclMat&, const oclMat&, double,
|
void cv::ocl::gemm(const oclMat&, const oclMat&, double,
|
||||||
const oclMat&, double, oclMat&, int)
|
const oclMat&, double, oclMat&, int)
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsNotImplemented, "OpenCL BLAS is not implemented");
|
CV_Error(Error::StsNotImplemented, "OpenCL BLAS is not implemented");
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#include "clAmdBlas.h"
|
#include "clAmdBlas.h"
|
||||||
|
@ -53,6 +53,8 @@
|
|||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace cv::ocl;
|
using namespace cv::ocl;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
namespace ocl
|
namespace ocl
|
||||||
@ -1493,3 +1495,4 @@ struct gpuHaarDetectObjects_ScaleCascade_Invoker
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
@ -267,7 +267,7 @@ void cv::ocl::HOGDescriptor::getDescriptors(const oclMat &img, Size win_stride,
|
|||||||
win_stride.height, win_stride.width, effect_size.height, effect_size.width, block_hists, descriptors);
|
win_stride.height, win_stride.width, effect_size.height, effect_size.width, block_hists, descriptors);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsBadArg, "Unknown descriptor format");
|
CV_Error(Error::StsBadArg, "Unknown descriptor format");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -353,7 +353,7 @@ void cv::ocl::HOGDescriptor::detectMultiScale(const oclMat &img, std::vector<Rec
|
|||||||
}
|
}
|
||||||
Size scaled_win_size(cvRound(win_size.width * scale), cvRound(win_size.height * scale));
|
Size scaled_win_size(cvRound(win_size.width * scale), cvRound(win_size.height * scale));
|
||||||
for (size_t j = 0; j < locations.size(); j++)
|
for (size_t j = 0; j < locations.size(); j++)
|
||||||
all_candidates.push_back(Rect(Point2d((CvPoint)locations[j]) * scale, scaled_win_size));
|
all_candidates.push_back(Rect(Point2d(locations[j]) * scale, scaled_win_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
found_locations.assign(all_candidates.begin(), all_candidates.end());
|
found_locations.assign(all_candidates.begin(), all_candidates.end());
|
||||||
@ -1627,7 +1627,7 @@ void cv::ocl::device::hog::normalize_hists(int nbins, int block_stride_x, int bl
|
|||||||
size_t localThreads[3] = { nthreads, 1, 1 };
|
size_t localThreads[3] = { nthreads, 1, 1 };
|
||||||
|
|
||||||
if ((nthreads < 32) || (nthreads > 512) )
|
if ((nthreads < 32) || (nthreads > 512) )
|
||||||
cv::ocl::error("normalize_hists: histogram's size is too small or too big", __FILE__, __LINE__, "normalize_hists");
|
cv::error(Error::StsBadArg, "normalize_hists: histogram's size is too small or too big", "cv::ocl::device::hog::normalize_hists", __FILE__, __LINE__);
|
||||||
|
|
||||||
args.push_back( std::make_pair( sizeof(cl_int), (void *)&nthreads));
|
args.push_back( std::make_pair( sizeof(cl_int), (void *)&nthreads));
|
||||||
args.push_back( std::make_pair( sizeof(cl_int), (void *)&block_hist_size));
|
args.push_back( std::make_pair( sizeof(cl_int), (void *)&block_hist_size));
|
||||||
|
@ -385,7 +385,7 @@ void cv::ocl::HoughCircles(const oclMat& src, oclMat& circles, HoughCirclesBuf&
|
|||||||
void cv::ocl::HoughCirclesDownload(const oclMat& d_circles, cv::OutputArray h_circles_)
|
void cv::ocl::HoughCirclesDownload(const oclMat& d_circles, cv::OutputArray h_circles_)
|
||||||
{
|
{
|
||||||
// FIX ME: garbage values are copied!
|
// FIX ME: garbage values are copied!
|
||||||
CV_Error(CV_StsNotImplemented, "HoughCirclesDownload is not implemented");
|
CV_Error(Error::StsNotImplemented, "HoughCirclesDownload is not implemented");
|
||||||
|
|
||||||
if (d_circles.empty())
|
if (d_circles.empty())
|
||||||
{
|
{
|
||||||
|
@ -428,7 +428,7 @@ namespace cv
|
|||||||
{
|
{
|
||||||
if(dsize.width != (int)(src.cols * fx) || dsize.height != (int)(src.rows * fy))
|
if(dsize.width != (int)(src.cols * fx) || dsize.height != (int)(src.rows * fy))
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsUnmatchedSizes, "invalid dsize and fx, fy!");
|
CV_Error(Error::StsUnmatchedSizes, "invalid dsize and fx, fy!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( dsize == Size() )
|
if( dsize == Size() )
|
||||||
@ -448,7 +448,7 @@ namespace cv
|
|||||||
resize_gpu( src, dst, fx, fy, interpolation);
|
resize_gpu( src, dst, fx, fy, interpolation);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
CV_Error(CV_StsUnsupportedFormat, "Non-supported interpolation method");
|
CV_Error(Error::StsUnsupportedFormat, "Non-supported interpolation method");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -501,7 +501,7 @@ namespace cv
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsUnsupportedFormat, "Non-supported filter length");
|
CV_Error(Error::StsUnsupportedFormat, "Non-supported filter length");
|
||||||
//String kernelName = "medianFilter";
|
//String kernelName = "medianFilter";
|
||||||
//args.push_back( std::make_pair( sizeof(cl_int),(void*)&m));
|
//args.push_back( std::make_pair( sizeof(cl_int),(void*)&m));
|
||||||
|
|
||||||
@ -522,7 +522,7 @@ namespace cv
|
|||||||
(bordertype != cv::BORDER_CONSTANT) &&
|
(bordertype != cv::BORDER_CONSTANT) &&
|
||||||
(bordertype != cv::BORDER_REPLICATE))
|
(bordertype != cv::BORDER_REPLICATE))
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsBadArg, "unsupported border type");
|
CV_Error(Error::StsBadArg, "unsupported border type");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bordertype &= ~cv::BORDER_ISOLATED;
|
bordertype &= ~cv::BORDER_ISOLATED;
|
||||||
@ -549,7 +549,7 @@ namespace cv
|
|||||||
}
|
}
|
||||||
if(bordertype_index == sizeof(__bordertype) / sizeof(int))
|
if(bordertype_index == sizeof(__bordertype) / sizeof(int))
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsBadArg, "unsupported border type");
|
CV_Error(Error::StsBadArg, "unsupported border type");
|
||||||
}
|
}
|
||||||
String kernelName = "copymakeborder";
|
String kernelName = "copymakeborder";
|
||||||
size_t localThreads[3] = {16, 16, 1};
|
size_t localThreads[3] = {16, 16, 1};
|
||||||
@ -604,7 +604,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_uchar4) , (void *)&val.uval ));
|
args.push_back( std::make_pair( sizeof(cl_uchar4) , (void *)&val.uval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_8S:
|
case CV_8S:
|
||||||
@ -623,7 +623,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_char4) , (void *)&val.cval ));
|
args.push_back( std::make_pair( sizeof(cl_char4) , (void *)&val.cval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_16U:
|
case CV_16U:
|
||||||
@ -642,7 +642,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_ushort4) , (void *)&val.usval ));
|
args.push_back( std::make_pair( sizeof(cl_ushort4) , (void *)&val.usval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_16S:
|
case CV_16S:
|
||||||
@ -661,7 +661,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_short4) , (void *)&val.shval ));
|
args.push_back( std::make_pair( sizeof(cl_short4) , (void *)&val.shval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_32S:
|
case CV_32S:
|
||||||
@ -687,7 +687,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_int4) , (void *)&val.ival ));
|
args.push_back( std::make_pair( sizeof(cl_int4) , (void *)&val.ival ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_32F:
|
case CV_32F:
|
||||||
@ -706,7 +706,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_float4) , (void *)&val.fval ));
|
args.push_back( std::make_pair( sizeof(cl_float4) , (void *)&val.fval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_64F:
|
case CV_64F:
|
||||||
@ -725,11 +725,11 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_double4) , (void *)&val.dval ));
|
args.push_back( std::make_pair( sizeof(cl_double4) , (void *)&val.dval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unknown depth");
|
CV_Error(Error::StsUnsupportedFormat, "unknown depth");
|
||||||
}
|
}
|
||||||
|
|
||||||
openCLExecuteKernel(src.clCxt, &imgproc_copymakeboder, kernelName, globalThreads, localThreads, args, -1, -1, compile_option);
|
openCLExecuteKernel(src.clCxt, &imgproc_copymakeboder, kernelName, globalThreads, localThreads, args, -1, -1, compile_option);
|
||||||
@ -1021,7 +1021,7 @@ namespace cv
|
|||||||
CV_Assert(src.type() == CV_8UC1);
|
CV_Assert(src.type() == CV_8UC1);
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
int vlen = 4;
|
int vlen = 4;
|
||||||
int offset = src.offset / vlen;
|
int offset = src.offset / vlen;
|
||||||
@ -1195,7 +1195,7 @@ namespace cv
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
CV_Assert(src.cols >= blockSize / 2 && src.rows >= blockSize / 2);
|
CV_Assert(src.cols >= blockSize / 2 && src.rows >= blockSize / 2);
|
||||||
oclMat Dx, Dy;
|
oclMat Dx, Dy;
|
||||||
@ -1209,7 +1209,7 @@ namespace cv
|
|||||||
{
|
{
|
||||||
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F)
|
||||||
{
|
{
|
||||||
CV_Error(CV_GpuNotSupported, "select device don't support double");
|
CV_Error(Error::GpuNotSupported, "select device don't support double");
|
||||||
}
|
}
|
||||||
CV_Assert(src.cols >= blockSize / 2 && src.rows >= blockSize / 2);
|
CV_Assert(src.cols >= blockSize / 2 && src.rows >= blockSize / 2);
|
||||||
oclMat Dx, Dy;
|
oclMat Dx, Dy;
|
||||||
@ -1256,15 +1256,10 @@ namespace cv
|
|||||||
void meanShiftFiltering(const oclMat &src, oclMat &dst, int sp, int sr, TermCriteria criteria)
|
void meanShiftFiltering(const oclMat &src, oclMat &dst, int sp, int sr, TermCriteria criteria)
|
||||||
{
|
{
|
||||||
if( src.empty() )
|
if( src.empty() )
|
||||||
CV_Error( CV_StsBadArg, "The input image is empty" );
|
CV_Error(Error::StsBadArg, "The input image is empty" );
|
||||||
|
|
||||||
if( src.depth() != CV_8U || src.oclchannels() != 4 )
|
if( src.depth() != CV_8U || src.oclchannels() != 4 )
|
||||||
CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
|
CV_Error(Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
|
||||||
|
|
||||||
// if(!src.clCxt->supportsFeature(Context::CL_DOUBLE))
|
|
||||||
// {
|
|
||||||
// CV_Error( CV_GpuNotSupported, "Selected device doesn't support double, so a deviation exists.\nIf the accuracy is acceptable, the error can be ignored.\n");
|
|
||||||
// }
|
|
||||||
|
|
||||||
dst.create( src.size(), CV_8UC4 );
|
dst.create( src.size(), CV_8UC4 );
|
||||||
|
|
||||||
@ -1324,15 +1319,10 @@ namespace cv
|
|||||||
void meanShiftProc(const oclMat &src, oclMat &dstr, oclMat &dstsp, int sp, int sr, TermCriteria criteria)
|
void meanShiftProc(const oclMat &src, oclMat &dstr, oclMat &dstsp, int sp, int sr, TermCriteria criteria)
|
||||||
{
|
{
|
||||||
if( src.empty() )
|
if( src.empty() )
|
||||||
CV_Error( CV_StsBadArg, "The input image is empty" );
|
CV_Error(Error::StsBadArg, "The input image is empty" );
|
||||||
|
|
||||||
if( src.depth() != CV_8U || src.oclchannels() != 4 )
|
if( src.depth() != CV_8U || src.oclchannels() != 4 )
|
||||||
CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
|
CV_Error(Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
|
||||||
|
|
||||||
// if(!src.clCxt->supportsFeature(Context::CL_DOUBLE))
|
|
||||||
// {
|
|
||||||
// CV_Error( CV_GpuNotSupported, "Selected device doesn't support double, so a deviation exists.\nIf the accuracy is acceptable, the error can be ignored.\n");
|
|
||||||
// }
|
|
||||||
|
|
||||||
dstr.create( src.size(), CV_8UC4 );
|
dstr.create( src.size(), CV_8UC4 );
|
||||||
dstsp.create( src.size(), CV_16SC2 );
|
dstsp.create( src.size(), CV_16SC2 );
|
||||||
@ -1581,8 +1571,7 @@ namespace cv
|
|||||||
if( src.depth() == CV_8U )
|
if( src.depth() == CV_8U )
|
||||||
oclbilateralFilter_8u( src, dst, radius, sigmaclr, sigmaspc, borderType );
|
oclbilateralFilter_8u( src, dst, radius, sigmaclr, sigmaspc, borderType );
|
||||||
else
|
else
|
||||||
CV_Error( CV_StsUnsupportedFormat,
|
CV_Error(Error::StsUnsupportedFormat, "Bilateral filtering is only implemented for 8uimages" );
|
||||||
"Bilateral filtering is only implemented for 8uimages" );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1726,7 +1715,7 @@ static void convolve_run_fft(const oclMat &image, const oclMat &templ, oclMat &r
|
|||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
CV_Error(CV_StsNotImplemented, "OpenCL DFT is not implemented");
|
CV_Error(Error::StsNotImplemented, "OpenCL DFT is not implemented");
|
||||||
#define UNUSED(x) (void)(x);
|
#define UNUSED(x) (void)(x);
|
||||||
UNUSED(image) UNUSED(templ) UNUSED(result) UNUSED(ccorr) UNUSED(buf)
|
UNUSED(image) UNUSED(templ) UNUSED(result) UNUSED(ccorr) UNUSED(buf)
|
||||||
#undef UNUSED
|
#undef UNUSED
|
||||||
|
@ -505,7 +505,7 @@ namespace cv
|
|||||||
char* binary = (char*)malloc(binarySize);
|
char* binary = (char*)malloc(binarySize);
|
||||||
if(binary == NULL)
|
if(binary == NULL)
|
||||||
{
|
{
|
||||||
CV_Error(CV_StsNoMem, "Failed to allocate host memory.");
|
CV_Error(Error::StsNoMem, "Failed to allocate host memory.");
|
||||||
}
|
}
|
||||||
openCLSafeCall(clGetProgramInfo(program,
|
openCLSafeCall(clGetProgramInfo(program,
|
||||||
CL_PROGRAM_BINARIES,
|
CL_PROGRAM_BINARIES,
|
||||||
|
@ -407,7 +407,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_float), (void *)&templ_sum[3]) );
|
args.push_back( std::make_pair( sizeof(cl_float), (void *)&templ_sum[3]) );
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsBadArg, "matchTemplate: unsupported number of channels");
|
CV_Error(Error::StsBadArg, "matchTemplate: unsupported number of channels");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -513,7 +513,7 @@ namespace cv
|
|||||||
args.push_back( std::make_pair( sizeof(cl_float), (void *)&templ_sqsum_sum) );
|
args.push_back( std::make_pair( sizeof(cl_float), (void *)&templ_sqsum_sum) );
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsBadArg, "matchTemplate: unsupported number of channels");
|
CV_Error(Error::StsBadArg, "matchTemplate: unsupported number of channels");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ static void convert_C3C4(const cl_mem &src, oclMat &dst)
|
|||||||
sprintf(compile_option, "-D GENTYPE4=double4");
|
sprintf(compile_option, "-D GENTYPE4=double4");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unknown depth");
|
CV_Error(Error::StsUnsupportedFormat, "unknown depth");
|
||||||
}
|
}
|
||||||
std::vector< std::pair<size_t, const void *> > args;
|
std::vector< std::pair<size_t, const void *> > args;
|
||||||
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&src));
|
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&src));
|
||||||
@ -154,7 +154,7 @@ static void convert_C4C3(const oclMat &src, cl_mem &dst)
|
|||||||
sprintf(compile_option, "-D GENTYPE4=double4");
|
sprintf(compile_option, "-D GENTYPE4=double4");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unknown depth");
|
CV_Error(Error::StsUnsupportedFormat, "unknown depth");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector< std::pair<size_t, const void *> > args;
|
std::vector< std::pair<size_t, const void *> > args;
|
||||||
@ -464,7 +464,7 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, Stri
|
|||||||
args.push_back( std::make_pair( sizeof(cl_uchar4) , (void *)&val.uval ));
|
args.push_back( std::make_pair( sizeof(cl_uchar4) , (void *)&val.uval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_8S:
|
case CV_8S:
|
||||||
@ -483,7 +483,7 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, Stri
|
|||||||
args.push_back( std::make_pair( sizeof(cl_char4) , (void *)&val.cval ));
|
args.push_back( std::make_pair( sizeof(cl_char4) , (void *)&val.cval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_16U:
|
case CV_16U:
|
||||||
@ -502,7 +502,7 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, Stri
|
|||||||
args.push_back( std::make_pair( sizeof(cl_ushort4) , (void *)&val.usval ));
|
args.push_back( std::make_pair( sizeof(cl_ushort4) , (void *)&val.usval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_16S:
|
case CV_16S:
|
||||||
@ -521,7 +521,7 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, Stri
|
|||||||
args.push_back( std::make_pair( sizeof(cl_short4) , (void *)&val.shval ));
|
args.push_back( std::make_pair( sizeof(cl_short4) , (void *)&val.shval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_32S:
|
case CV_32S:
|
||||||
@ -547,7 +547,7 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, Stri
|
|||||||
args.push_back( std::make_pair( sizeof(cl_int4) , (void *)&val.ival ));
|
args.push_back( std::make_pair( sizeof(cl_int4) , (void *)&val.ival ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_32F:
|
case CV_32F:
|
||||||
@ -566,7 +566,7 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, Stri
|
|||||||
args.push_back( std::make_pair( sizeof(cl_float4) , (void *)&val.fval ));
|
args.push_back( std::make_pair( sizeof(cl_float4) , (void *)&val.fval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_64F:
|
case CV_64F:
|
||||||
@ -585,11 +585,11 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, Stri
|
|||||||
args.push_back( std::make_pair( sizeof(cl_double4) , (void *)&val.dval ));
|
args.push_back( std::make_pair( sizeof(cl_double4) , (void *)&val.dval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unknown depth");
|
CV_Error(Error::StsUnsupportedFormat, "unknown depth");
|
||||||
}
|
}
|
||||||
#ifdef CL_VERSION_1_2
|
#ifdef CL_VERSION_1_2
|
||||||
if(dst.offset == 0 && dst.cols == dst.wholecols)
|
if(dst.offset == 0 && dst.cols == dst.wholecols)
|
||||||
@ -656,7 +656,7 @@ static void set_to_withmask_run(const oclMat &dst, const Scalar &scalar, const o
|
|||||||
args.push_back( std::make_pair( sizeof(cl_uchar4) , (void *)&val.uval ));
|
args.push_back( std::make_pair( sizeof(cl_uchar4) , (void *)&val.uval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_8S:
|
case CV_8S:
|
||||||
@ -675,7 +675,7 @@ static void set_to_withmask_run(const oclMat &dst, const Scalar &scalar, const o
|
|||||||
args.push_back( std::make_pair( sizeof(cl_char4) , (void *)&val.cval ));
|
args.push_back( std::make_pair( sizeof(cl_char4) , (void *)&val.cval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_16U:
|
case CV_16U:
|
||||||
@ -694,7 +694,7 @@ static void set_to_withmask_run(const oclMat &dst, const Scalar &scalar, const o
|
|||||||
args.push_back( std::make_pair( sizeof(cl_ushort4) , (void *)&val.usval ));
|
args.push_back( std::make_pair( sizeof(cl_ushort4) , (void *)&val.usval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_16S:
|
case CV_16S:
|
||||||
@ -713,7 +713,7 @@ static void set_to_withmask_run(const oclMat &dst, const Scalar &scalar, const o
|
|||||||
args.push_back( std::make_pair( sizeof(cl_short4) , (void *)&val.shval ));
|
args.push_back( std::make_pair( sizeof(cl_short4) , (void *)&val.shval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_32S:
|
case CV_32S:
|
||||||
@ -732,7 +732,7 @@ static void set_to_withmask_run(const oclMat &dst, const Scalar &scalar, const o
|
|||||||
args.push_back( std::make_pair( sizeof(cl_int4) , (void *)&val.ival ));
|
args.push_back( std::make_pair( sizeof(cl_int4) , (void *)&val.ival ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_32F:
|
case CV_32F:
|
||||||
@ -751,7 +751,7 @@ static void set_to_withmask_run(const oclMat &dst, const Scalar &scalar, const o
|
|||||||
args.push_back( std::make_pair( sizeof(cl_float4) , (void *)&val.fval ));
|
args.push_back( std::make_pair( sizeof(cl_float4) , (void *)&val.fval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CV_64F:
|
case CV_64F:
|
||||||
@ -770,11 +770,11 @@ static void set_to_withmask_run(const oclMat &dst, const Scalar &scalar, const o
|
|||||||
args.push_back( std::make_pair( sizeof(cl_double4) , (void *)&val.dval ));
|
args.push_back( std::make_pair( sizeof(cl_double4) , (void *)&val.dval ));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
|
CV_Error(Error::StsUnsupportedFormat, "unsupported channels");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
CV_Error(CV_StsUnsupportedFormat, "unknown depth");
|
CV_Error(Error::StsUnsupportedFormat, "unknown depth");
|
||||||
}
|
}
|
||||||
args.push_back( std::make_pair( sizeof(cl_mem) , (void *)&dst.data ));
|
args.push_back( std::make_pair( sizeof(cl_mem) , (void *)&dst.data ));
|
||||||
args.push_back( std::make_pair( sizeof(cl_int) , (void *)&dst.cols ));
|
args.push_back( std::make_pair( sizeof(cl_int) , (void *)&dst.cols ));
|
||||||
@ -824,13 +824,8 @@ oclMat &cv::ocl::oclMat::setTo(const Scalar &scalar, const oclMat &mask)
|
|||||||
oclMat cv::ocl::oclMat::reshape(int new_cn, int new_rows) const
|
oclMat cv::ocl::oclMat::reshape(int new_cn, int new_rows) const
|
||||||
{
|
{
|
||||||
if( new_rows != 0 && new_rows != rows)
|
if( new_rows != 0 && new_rows != rows)
|
||||||
|
|
||||||
{
|
{
|
||||||
|
CV_Error( Error::StsBadFunc, "oclMat's number of rows can not be changed for current version" );
|
||||||
CV_Error( CV_StsBadFunc,
|
|
||||||
|
|
||||||
"oclMat's number of rows can not be changed for current version" );
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
oclMat hdr = *this;
|
oclMat hdr = *this;
|
||||||
@ -863,13 +858,13 @@ oclMat cv::ocl::oclMat::reshape(int new_cn, int new_rows) const
|
|||||||
|
|
||||||
if (!isContinuous())
|
if (!isContinuous())
|
||||||
|
|
||||||
CV_Error(CV_BadStep, "The matrix is not continuous, thus its number of rows can not be changed");
|
CV_Error(Error::BadStep, "The matrix is not continuous, thus its number of rows can not be changed");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if ((unsigned)new_rows > (unsigned)total_size)
|
if ((unsigned)new_rows > (unsigned)total_size)
|
||||||
|
|
||||||
CV_Error(CV_StsOutOfRange, "Bad new number of rows");
|
CV_Error(Error::StsOutOfRange, "Bad new number of rows");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -879,7 +874,7 @@ oclMat cv::ocl::oclMat::reshape(int new_cn, int new_rows) const
|
|||||||
|
|
||||||
if (total_width * new_rows != total_size)
|
if (total_width * new_rows != total_size)
|
||||||
|
|
||||||
CV_Error(CV_StsBadArg, "The total number of matrix elements is not divisible by the new number of rows");
|
CV_Error(Error::StsBadArg, "The total number of matrix elements is not divisible by the new number of rows");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -897,7 +892,7 @@ oclMat cv::ocl::oclMat::reshape(int new_cn, int new_rows) const
|
|||||||
|
|
||||||
if (new_width * new_cn != total_width)
|
if (new_width * new_cn != total_width)
|
||||||
|
|
||||||
CV_Error(CV_BadNumChannels, "The total width is not divisible by the new number of channels");
|
CV_Error(Error::BadNumChannels, "The total width is not divisible by the new number of channels");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user