Merged the trunk r8467:8507 (inclusive) (big bunch of documentation fixes)
This commit is contained in:
@@ -13,8 +13,9 @@ Finds edges in an image using the [Canny86]_ algorithm.
|
||||
|
||||
.. ocv:pyfunction:: cv2.Canny(image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]]) -> edges
|
||||
|
||||
.. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int apertureSize=3 )
|
||||
.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, apertureSize=3)-> None
|
||||
.. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int aperture_size=3 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, aperture_size=3) -> None
|
||||
|
||||
:param image: Single-channel 8-bit input image.
|
||||
|
||||
@@ -37,20 +38,21 @@ cornerEigenValsAndVecs
|
||||
----------------------
|
||||
Calculates eigenvalues and eigenvectors of image blocks for corner detection.
|
||||
|
||||
.. ocv:function:: void cornerEigenValsAndVecs( InputArray src, OutputArray dst, int blockSize, int apertureSize, int borderType=BORDER_DEFAULT )
|
||||
.. ocv:function:: void cornerEigenValsAndVecs( InputArray src, OutputArray dst, int blockSize, int ksize, int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.cornerEigenValsAndVecs(src, blockSize, ksize[, dst[, borderType]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int blockSize, int apertureSize=3 )
|
||||
.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, apertureSize=3)-> None
|
||||
.. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int block_size, int aperture_size=3 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, aperture_size=3) -> None
|
||||
|
||||
:param src: Input single-channel 8-bit or floating-point image.
|
||||
|
||||
:param dst: Image to store the results. It has the same size as ``src`` and the type ``CV_32FC(6)`` .
|
||||
|
||||
|
||||
:param blockSize: Neighborhood size (see details below).
|
||||
|
||||
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
|
||||
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
|
||||
|
||||
@@ -72,7 +74,7 @@ After that, it finds eigenvectors and eigenvalues of
|
||||
* :math:`\lambda_1, \lambda_2` are the non-sorted eigenvalues of :math:`M`
|
||||
|
||||
* :math:`x_1, y_1` are the eigenvectors corresponding to :math:`\lambda_1`
|
||||
|
||||
|
||||
* :math:`x_2, y_2` are the eigenvectors corresponding to :math:`\lambda_2`
|
||||
|
||||
The output of the function can be used for robust edge or corner detection.
|
||||
@@ -89,20 +91,21 @@ cornerHarris
|
||||
------------
|
||||
Harris edge detector.
|
||||
|
||||
.. ocv:function:: void cornerHarris( InputArray src, OutputArray dst, int blockSize, int apertureSize, double k, int borderType=BORDER_DEFAULT )
|
||||
.. ocv:function:: void cornerHarris( InputArray src, OutputArray dst, int blockSize, int ksize, double k, int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.cornerHarris(src, blockSize, ksize, k[, dst[, borderType]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harrisDst, int blockSize, int apertureSize=3, double k=0.04 )
|
||||
.. ocv:pyoldfunction:: cv.CornerHarris(image, harrisDst, blockSize, apertureSize=3, k=0.04)-> None
|
||||
.. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harris_responce, int block_size, int aperture_size=3, double k=0.04 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CornerHarris(image, harris_dst, blockSize, aperture_size=3, k=0.04) -> None
|
||||
|
||||
:param src: Input single-channel 8-bit or floating-point image.
|
||||
|
||||
:param dst: Image to store the Harris detector responses. It has the type ``CV_32FC1`` and the same size as ``src`` .
|
||||
|
||||
|
||||
:param blockSize: Neighborhood size (see the details on :ocv:func:`cornerEigenValsAndVecs` ).
|
||||
|
||||
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
|
||||
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
|
||||
|
||||
:param k: Harris detector free parameter. See the formula below.
|
||||
|
||||
@@ -128,21 +131,21 @@ cornerMinEigenVal
|
||||
-----------------
|
||||
Calculates the minimal eigenvalue of gradient matrices for corner detection.
|
||||
|
||||
.. ocv:function:: void cornerMinEigenVal( InputArray src, OutputArray dst, int blockSize, int apertureSize=3, int borderType=BORDER_DEFAULT )
|
||||
.. ocv:function:: void cornerMinEigenVal( InputArray src, OutputArray dst, int blockSize, int ksize=3, int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.cornerMinEigenVal(src, blockSize[, dst[, ksize[, borderType]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int blockSize, int apertureSize=3 )
|
||||
.. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int block_size, int aperture_size=3 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, apertureSize=3)-> None
|
||||
.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, aperture_size=3) -> None
|
||||
|
||||
:param src: Input single-channel 8-bit or floating-point image.
|
||||
|
||||
:param dst: Image to store the minimal eigenvalues. It has the type ``CV_32FC1`` and the same size as ``src`` .
|
||||
|
||||
|
||||
:param blockSize: Neighborhood size (see the details on :ocv:func:`cornerEigenValsAndVecs` ).
|
||||
|
||||
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
|
||||
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
|
||||
|
||||
@@ -161,9 +164,9 @@ Refines the corner locations.
|
||||
|
||||
.. ocv:pyfunction:: cv2.cornerSubPix(image, corners, winSize, zeroZone, criteria) -> None
|
||||
|
||||
.. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize winSize, CvSize zeroZone, CvTermCriteria criteria )
|
||||
.. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, winSize, zeroZone, criteria)-> corners
|
||||
.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, win, zero_zone, criteria) -> corners
|
||||
|
||||
:param image: Input image.
|
||||
|
||||
@@ -223,15 +226,15 @@ Determines strong corners on an image.
|
||||
|
||||
.. ocv:pyfunction:: cv2.goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) -> corners
|
||||
|
||||
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eigImage, CvArr* tempImage, CvPoint2D32f* corners, int* cornerCount, double qualityLevel, double minDistance, const CvArr* mask=NULL, int blockSize=3, int useHarris=0, double k=0.04 )
|
||||
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, CvArr* temp_image, CvPoint2D32f* corners, int* corner_count, double quality_level, double min_distance, const CvArr* mask=NULL, int block_size=3, int use_harris=0, double k=0.04 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04)-> corners
|
||||
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04) -> cornerCount
|
||||
|
||||
:param image: Input 8-bit or floating-point 32-bit, single-channel image.
|
||||
|
||||
:param eigImage: The parameter is ignored.
|
||||
|
||||
:param tempImage: The parameter is ignored.
|
||||
|
||||
:param eig_image: The parameter is ignored.
|
||||
|
||||
:param temp_image: The parameter is ignored.
|
||||
|
||||
:param corners: Output vector of detected corners.
|
||||
|
||||
@@ -244,9 +247,9 @@ Determines strong corners on an image.
|
||||
:param mask: Optional region of interest. If the image is not empty (it needs to have the type ``CV_8UC1`` and the same size as ``image`` ), it specifies the region in which the corners are detected.
|
||||
|
||||
:param blockSize: Size of an average block for computing a derivative covariation matrix over each pixel neighborhood. See :ocv:func:`cornerEigenValsAndVecs` .
|
||||
|
||||
|
||||
:param useHarrisDetector: Parameter indicating whether to use a Harris detector (see :ocv:func:`cornerHarris`) or :ocv:func:`cornerMinEigenVal`.
|
||||
|
||||
|
||||
:param k: Free parameter of the Harris detector.
|
||||
|
||||
The function finds the most prominent corners in the image or in the specified image region, as described in [Shi94]_:
|
||||
@@ -255,7 +258,7 @@ The function finds the most prominent corners in the image or in the specified i
|
||||
Function calculates the corner quality measure at every source image pixel using the
|
||||
:ocv:func:`cornerMinEigenVal` or
|
||||
:ocv:func:`cornerHarris` .
|
||||
|
||||
|
||||
#.
|
||||
Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are retained).
|
||||
|
||||
@@ -268,16 +271,16 @@ The function finds the most prominent corners in the image or in the specified i
|
||||
|
||||
#.
|
||||
Function throws away each corner for which there is a stronger corner at a distance less than ``maxDistance``.
|
||||
|
||||
|
||||
The function can be used to initialize a point-based tracker of an object.
|
||||
|
||||
.. note:: If the function is called with different values ``A`` and ``B`` of the parameter ``qualityLevel`` , and ``A`` > {B}, the vector of returned corners with ``qualityLevel=A`` will be the prefix of the output vector with ``qualityLevel=B`` .
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ocv:func:`cornerMinEigenVal`,
|
||||
:ocv:func:`cornerHarris`,
|
||||
:ocv:func:`calcOpticalFlowPyrLK`,
|
||||
:ocv:func:`cornerMinEigenVal`,
|
||||
:ocv:func:`cornerHarris`,
|
||||
:ocv:func:`calcOpticalFlowPyrLK`,
|
||||
:ocv:func:`estimateRigidTransform`,
|
||||
|
||||
|
||||
@@ -287,16 +290,16 @@ Finds circles in a grayscale image using the Hough transform.
|
||||
|
||||
.. ocv:function:: void HoughCircles( InputArray image, OutputArray circles, int method, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
|
||||
|
||||
.. ocv:cfunction:: CvSeq* cvHoughCircles( CvArr* image, CvMemStorage* circleStorage, int method, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
|
||||
.. ocv:cfunction:: CvSeq* cvHoughCircles( CvArr* image, void* circle_storage, int method, double dp, double min_dist, double param1=100, double param2=100, int min_radius=0, int max_radius=0 )
|
||||
|
||||
.. ocv:pyfunction:: cv2.HoughCircles(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles
|
||||
|
||||
:param image: 8-bit, single-channel, grayscale input image.
|
||||
|
||||
:param circles: Output vector of found circles. Each vector is encoded as a 3-element floating-point vector :math:`(x, y, radius)` .
|
||||
|
||||
:param circleStorage: In C function this is a memory storage that will contain the output sequence of found circles.
|
||||
|
||||
|
||||
:param circle_storage: In C function this is a memory storage that will contain the output sequence of found circles.
|
||||
|
||||
:param method: Detection method to use. Currently, the only implemented method is ``CV_HOUGH_GRADIENT`` , which is basically *21HT* , described in [Yuen90]_.
|
||||
|
||||
:param dp: Inverse ratio of the accumulator resolution to the image resolution. For example, if ``dp=1`` , the accumulator has the same resolution as the input image. If ``dp=2`` , the accumulator has half as big width and height.
|
||||
@@ -311,7 +314,7 @@ Finds circles in a grayscale image using the Hough transform.
|
||||
|
||||
:param maxRadius: Maximum circle radius.
|
||||
|
||||
The function finds circles in a grayscale image using a modification of the Hough transform.
|
||||
The function finds circles in a grayscale image using a modification of the Hough transform.
|
||||
|
||||
Example: ::
|
||||
|
||||
@@ -362,7 +365,7 @@ Finds lines in a binary image using the standard Hough transform.
|
||||
|
||||
.. ocv:pyfunction:: cv2.HoughLines(image, rho, theta, threshold[, lines[, srn[, stn]]]) -> lines
|
||||
|
||||
.. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 )
|
||||
.. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* line_storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.HoughLines2(image, storage, method, rho, theta, threshold, param1=0, param2=0)-> lines
|
||||
|
||||
@@ -379,31 +382,31 @@ Finds lines in a binary image using the standard Hough transform.
|
||||
:param srn: For the multi-scale Hough transform, it is a divisor for the distance resolution ``rho`` . The coarse accumulator distance resolution is ``rho`` and the accurate accumulator resolution is ``rho/srn`` . If both ``srn=0`` and ``stn=0`` , the classical Hough transform is used. Otherwise, both these parameters should be positive.
|
||||
|
||||
:param stn: For the multi-scale Hough transform, it is a divisor for the distance resolution ``theta``.
|
||||
|
||||
:param method: One of the following Hough transform variants:
|
||||
|
||||
* **CV_HOUGH_STANDARD** classical or standard Hough transform. Every line is represented by two floating-point numbers :math:`(\rho, \theta)` , where :math:`\rho` is a distance between (0,0) point and the line, and :math:`\theta` is the angle between x-axis and the normal to the line. Thus, the matrix must be (the created sequence will be) of ``CV_32FC2`` type
|
||||
|
||||
|
||||
* **CV_HOUGH_PROBABILISTIC** probabilistic Hough transform (more efficient in case if the picture contains a few long linear segments). It returns line segments rather than the whole line. Each segment is represented by starting and ending points, and the matrix must be (the created sequence will be) of the ``CV_32SC4`` type.
|
||||
|
||||
|
||||
:param method: One of the following Hough transform variants:
|
||||
|
||||
* **CV_HOUGH_STANDARD** classical or standard Hough transform. Every line is represented by two floating-point numbers :math:`(\rho, \theta)` , where :math:`\rho` is a distance between (0,0) point and the line, and :math:`\theta` is the angle between x-axis and the normal to the line. Thus, the matrix must be (the created sequence will be) of ``CV_32FC2`` type
|
||||
|
||||
|
||||
* **CV_HOUGH_PROBABILISTIC** probabilistic Hough transform (more efficient in case if the picture contains a few long linear segments). It returns line segments rather than the whole line. Each segment is represented by starting and ending points, and the matrix must be (the created sequence will be) of the ``CV_32SC4`` type.
|
||||
|
||||
* **CV_HOUGH_MULTI_SCALE** multi-scale variant of the classical Hough transform. The lines are encoded the same way as ``CV_HOUGH_STANDARD``.
|
||||
|
||||
|
||||
|
||||
:param param1: First method-dependent parameter:
|
||||
|
||||
* For the classical Hough transform, it is not used (0).
|
||||
|
||||
* For the probabilistic Hough transform, it is the minimum line length.
|
||||
|
||||
* For the multi-scale Hough transform, it is ``srn``.
|
||||
|
||||
:param param2: Second method-dependent parameter:
|
||||
|
||||
* For the classical Hough transform, it is not used (0).
|
||||
|
||||
|
||||
* For the probabilistic Hough transform, it is the minimum line length.
|
||||
|
||||
* For the multi-scale Hough transform, it is ``srn``.
|
||||
|
||||
:param param2: Second method-dependent parameter:
|
||||
|
||||
* For the classical Hough transform, it is not used (0).
|
||||
|
||||
* For the probabilistic Hough transform, it is the maximum gap between line segments lying on the same line to treat them as a single line segment (that is, to join them).
|
||||
|
||||
|
||||
* For the multi-scale Hough transform, it is ``stn``.
|
||||
|
||||
The function implements the standard or standard multi-scale Hough transform algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm for a good explanation of Hough transform.
|
||||
@@ -501,21 +504,22 @@ preCornerDetect
|
||||
---------------
|
||||
Calculates a feature map for corner detection.
|
||||
|
||||
.. ocv:function:: void preCornerDetect( InputArray src, OutputArray dst, int apertureSize, int borderType=BORDER_DEFAULT )
|
||||
.. ocv:function:: void preCornerDetect( InputArray src, OutputArray dst, int ksize, int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.preCornerDetect(src, ksize[, dst[, borderType]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int apertureSize=3 )
|
||||
.. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int aperture_size=3 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.PreCornerDetect(image, corners, apertureSize=3)-> None
|
||||
|
||||
:param src: Source single-channel 8-bit of floating-point image.
|
||||
|
||||
:param dst: Output image that has the type ``CV_32F`` and the same size as ``src`` .
|
||||
|
||||
:param apertureSize: Aperture size of the :ocv:func:`Sobel` .
|
||||
|
||||
|
||||
:param ksize: Aperture size of the :ocv:func:`Sobel` .
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
|
||||
|
||||
|
||||
The function calculates the complex spatial derivative-based function of the source image
|
||||
|
||||
.. math::
|
||||
|
||||
@@ -14,11 +14,11 @@ OpenCV enables you to specify the extrapolation method. For details, see the fun
|
||||
|
||||
/*
|
||||
Various border types, image boundaries are denoted with '|'
|
||||
|
||||
|
||||
* BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
|
||||
* BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
|
||||
* BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
|
||||
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
|
||||
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
|
||||
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
|
||||
*/
|
||||
|
||||
@@ -390,9 +390,9 @@ Applies the bilateral filter to an image.
|
||||
:param src: Source 8-bit or floating-point, 1-channel or 3-channel image.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param d: Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from ``sigmaSpace`` .
|
||||
|
||||
|
||||
:param sigmaColor: Filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see ``sigmaSpace`` ) will be mixed together, resulting in larger areas of semi-equal color.
|
||||
|
||||
:param sigmaSpace: Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see ``sigmaColor`` ). When ``d>0`` , it specifies the neighborhood size regardless of ``sigmaSpace`` . Otherwise, ``d`` is proportional to ``sigmaSpace`` .
|
||||
@@ -421,7 +421,7 @@ Smoothes an image using the normalized box filter.
|
||||
:param src: Source image. The image can have any number of channels, which are processed independently. The depth should be ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F`` or ``CV_64F``.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param ksize: Smoothing kernel size.
|
||||
|
||||
:param anchor: Anchor point. The default value ``Point(-1,-1)`` means that the anchor is at the kernel center.
|
||||
@@ -441,7 +441,7 @@ The call ``blur(src, dst, ksize, anchor, borderType)`` is equivalent to ``boxFil
|
||||
:ocv:func:`boxFilter`,
|
||||
:ocv:func:`bilateralFilter`,
|
||||
:ocv:func:`GaussianBlur`,
|
||||
:ocv:func:`medianBlur`
|
||||
:ocv:func:`medianBlur`
|
||||
|
||||
|
||||
borderInterpolate
|
||||
@@ -453,7 +453,7 @@ Computes the source location of an extrapolated pixel.
|
||||
.. ocv:pyfunction:: cv2.borderInterpolate(p, len, borderType) -> retval
|
||||
|
||||
:param p: 0-based coordinate of the extrapolated pixel along one of the axes, likely <0 or >= ``len`` .
|
||||
|
||||
|
||||
:param len: Length of the array along the corresponding axis.
|
||||
|
||||
:param borderType: Border type, one of the ``BORDER_*`` , except for ``BORDER_TRANSPARENT`` and ``BORDER_ISOLATED`` . When ``borderType==BORDER_CONSTANT`` , the function always returns -1, regardless of ``p`` and ``len`` .
|
||||
@@ -486,7 +486,7 @@ Smoothes an image using the box filter.
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param ksize: Smoothing kernel size.
|
||||
|
||||
:param anchor: Anchor point. The default value ``Point(-1,-1)`` means that the anchor is at the kernel center.
|
||||
@@ -515,7 +515,7 @@ Unnormalized box filter is useful for computing various integral characteristics
|
||||
:ocv:func:`bilateralFilter`,
|
||||
:ocv:func:`GaussianBlur`,
|
||||
:ocv:func:`medianBlur`,
|
||||
:ocv:func:`integral`
|
||||
:ocv:func:`integral`
|
||||
|
||||
|
||||
|
||||
@@ -523,7 +523,7 @@ buildPyramid
|
||||
----------------
|
||||
Constructs the Gaussian pyramid for an image.
|
||||
|
||||
.. ocv:function:: void buildPyramid( InputArray src, OutputArrayOfArrays dst, int maxlevel )
|
||||
.. ocv:function:: void buildPyramid( InputArray src, OutputArrayOfArrays dst, int maxlevel, int borderType=BORDER_DEFAULT )
|
||||
|
||||
:param src: Source image. Check :ocv:func:`pyrDown` for the list of supported types.
|
||||
|
||||
@@ -550,19 +550,19 @@ Forms a border around an image.
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image of the same type as ``src`` and the size ``Size(src.cols+left+right, src.rows+top+bottom)`` .
|
||||
|
||||
|
||||
:param top:
|
||||
|
||||
|
||||
:param bottom:
|
||||
|
||||
|
||||
:param left:
|
||||
|
||||
|
||||
:param right: Parameter specifying how many pixels in each direction from the source image rectangle to extrapolate. For example, ``top=1, bottom=1, left=1, right=1`` mean that 1 pixel-wide border needs to be built.
|
||||
|
||||
:param borderType: Border type. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
|
||||
:param value: Border value if ``borderType==BORDER_CONSTANT`` .
|
||||
|
||||
|
||||
The function copies the source image into the middle of the destination image. The areas to the left, to the right, above and below the copied source image will be filled with extrapolated pixels. This is not what
|
||||
:ocv:class:`FilterEngine` or filtering functions based on it do (they extrapolate pixels on-fly), but what other more complex functions, including your own, may do to simplify image boundary handling.
|
||||
|
||||
@@ -605,17 +605,17 @@ Returns a box filter engine.
|
||||
:param srcType: Source image type.
|
||||
|
||||
:param sumType: Intermediate horizontal sum type that must have as many channels as ``srcType`` .
|
||||
|
||||
|
||||
:param dstType: Destination image type that must have as many channels as ``srcType`` .
|
||||
|
||||
|
||||
:param ksize: Aperture size.
|
||||
|
||||
:param anchor: Anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
|
||||
|
||||
:param normalize: Flag specifying whether the sums are normalized or not. See :ocv:func:`boxFilter` for details.
|
||||
|
||||
|
||||
:param scale: Another way to specify normalization in lower-level ``getColumnSumFilter`` .
|
||||
|
||||
|
||||
:param borderType: Border type to use. See :ocv:func:`borderInterpolate` .
|
||||
|
||||
The function is a convenience function that retrieves the horizontal sum primitive filter with
|
||||
@@ -631,7 +631,7 @@ The function itself is used by
|
||||
|
||||
:ocv:class:`FilterEngine`,
|
||||
:ocv:func:`blur`,
|
||||
:ocv:func:`boxFilter`
|
||||
:ocv:func:`boxFilter`
|
||||
|
||||
|
||||
|
||||
@@ -644,13 +644,13 @@ Returns an engine for computing image derivatives.
|
||||
:param srcType: Source image type.
|
||||
|
||||
:param dstType: Destination image type that must have as many channels as ``srcType`` .
|
||||
|
||||
|
||||
:param dx: Derivative order in respect of x.
|
||||
|
||||
:param dy: Derivative order in respect of y.
|
||||
|
||||
:param ksize: Aperture size See :ocv:func:`getDerivKernels` .
|
||||
|
||||
|
||||
:param borderType: Border type to use. See :ocv:func:`borderInterpolate` .
|
||||
|
||||
The function :ocv:func:`createDerivFilter` is a small convenience function that retrieves linear filter coefficients for computing image derivatives using
|
||||
@@ -664,7 +664,7 @@ The function :ocv:func:`createDerivFilter` is a small convenience function that
|
||||
:ocv:func:`createSeparableLinearFilter`,
|
||||
:ocv:func:`getDerivKernels`,
|
||||
:ocv:func:`Scharr`,
|
||||
:ocv:func:`Sobel`
|
||||
:ocv:func:`Sobel`
|
||||
|
||||
|
||||
|
||||
@@ -672,16 +672,16 @@ createGaussianFilter
|
||||
------------------------
|
||||
Returns an engine for smoothing images with the Gaussian filter.
|
||||
|
||||
.. ocv:function:: Ptr<FilterEngine> createGaussianFilter( int type, Size ksize, double sigmaX, double sigmaY=0, int borderType=BORDER_DEFAULT)
|
||||
.. ocv:function:: Ptr<FilterEngine> createGaussianFilter( int type, Size ksize, double sigma1, double sigma2=0, int borderType=BORDER_DEFAULT )
|
||||
|
||||
:param type: Source and destination image type.
|
||||
|
||||
:param ksize: Aperture size. See :ocv:func:`getGaussianKernel` .
|
||||
|
||||
:param sigmaX: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` .
|
||||
|
||||
:param sigmaY: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigmaY}\leftarrow\texttt{sigmaX}` .
|
||||
|
||||
|
||||
:param sigma1: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` .
|
||||
|
||||
:param sigma2: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigma2}\leftarrow\texttt{sigma1}` .
|
||||
|
||||
:param borderType: Border type to use. See :ocv:func:`borderInterpolate` .
|
||||
|
||||
The function :ocv:func:`createGaussianFilter` computes Gaussian kernel coefficients and then returns a separable linear filter for that kernel. The function is used by
|
||||
@@ -693,7 +693,7 @@ The function :ocv:func:`createGaussianFilter` computes Gaussian kernel coefficie
|
||||
|
||||
:ocv:func:`createSeparableLinearFilter`,
|
||||
:ocv:func:`getGaussianKernel`,
|
||||
:ocv:func:`GaussianBlur`
|
||||
:ocv:func:`GaussianBlur`
|
||||
|
||||
|
||||
|
||||
@@ -701,14 +701,14 @@ createLinearFilter
|
||||
----------------------
|
||||
Creates a non-separable linear filter engine.
|
||||
|
||||
.. ocv:function:: Ptr<FilterEngine> createLinearFilter(int srcType, int dstType, InputArray kernel, Point _anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar())
|
||||
.. ocv:function:: Ptr<FilterEngine> createLinearFilter( int srcType, int dstType, InputArray kernel, Point _anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar() )
|
||||
|
||||
.. ocv:function:: Ptr<BaseFilter> getLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor=Point(-1,-1), double delta=0, int bits=0)
|
||||
.. ocv:function:: Ptr<BaseFilter> getLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor=Point(-1,-1), double delta=0, int bits=0)
|
||||
|
||||
:param srcType: Source image type.
|
||||
|
||||
:param dstType: Destination image type that must have as many channels as ``srcType`` .
|
||||
|
||||
|
||||
:param kernel: 2D array of filter coefficients.
|
||||
|
||||
:param anchor: Anchor point within the kernel. Special value ``Point(-1,-1)`` means that the anchor is at the kernel center.
|
||||
@@ -718,9 +718,9 @@ Creates a non-separable linear filter engine.
|
||||
:param bits: Number of the fractional bits. The parameter is used when the kernel is an integer matrix representing fixed-point filter coefficients.
|
||||
|
||||
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
|
||||
|
||||
|
||||
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
|
||||
|
||||
|
||||
:param borderValue: Border value used in case of a constant border.
|
||||
|
||||
The function returns a pointer to a 2D linear filter for the specified kernel, the source array type, and the destination array type. The function is a higher-level function that calls ``getLinearFilter`` and passes the retrieved 2D filter to the
|
||||
@@ -737,13 +737,13 @@ createMorphologyFilter
|
||||
--------------------------
|
||||
Creates an engine for non-separable morphological operations.
|
||||
|
||||
.. ocv:function:: Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray element, Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, int columnBorderType=-1, const Scalar& borderValue=morphologyDefaultBorderValue())
|
||||
.. ocv:function:: Ptr<FilterEngine> createMorphologyFilter( int op, int type, InputArray kernel, Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, int columnBorderType=-1, const Scalar& borderValue=morphologyDefaultBorderValue() )
|
||||
|
||||
.. ocv:function:: Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray element, Point anchor=Point(-1,-1))
|
||||
.. ocv:function:: Ptr<BaseFilter> getMorphologyFilter( int op, int type, InputArray kernel, Point anchor=Point(-1,-1) )
|
||||
|
||||
.. ocv:function:: Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int esize, int anchor=-1)
|
||||
.. ocv:function:: Ptr<BaseRowFilter> getMorphologyRowFilter( int op, int type, int ksize, int anchor=-1 )
|
||||
|
||||
.. ocv:function:: Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int esize, int anchor=-1)
|
||||
.. ocv:function:: Ptr<BaseColumnFilter> getMorphologyColumnFilter( int op, int type, int ksize, int anchor=-1 )
|
||||
|
||||
.. ocv:function:: Scalar morphologyDefaultBorderValue()
|
||||
|
||||
@@ -751,16 +751,16 @@ Creates an engine for non-separable morphological operations.
|
||||
|
||||
:param type: Input/output image type. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
|
||||
|
||||
:param element: 2D 8-bit structuring element for a morphological operation. Non-zero elements indicate the pixels that belong to the element.
|
||||
:param kernel: 2D 8-bit structuring element for a morphological operation. Non-zero elements indicate the pixels that belong to the element.
|
||||
|
||||
:param esize: Horizontal or vertical structuring element size for separable morphological operations.
|
||||
:param ksize: Horizontal or vertical structuring element size for separable morphological operations.
|
||||
|
||||
:param anchor: Anchor position within the structuring element. Negative values mean that the anchor is at the kernel center.
|
||||
|
||||
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
|
||||
|
||||
|
||||
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
|
||||
|
||||
|
||||
:param borderValue: Border value in case of a constant border. The default value, \ ``morphologyDefaultBorderValue`` , has a special meaning. It is transformed :math:`+\inf` for the erosion and to :math:`-\inf` for the dilation, which means that the minimum (maximum) is effectively computed only over the pixels that are inside the image.
|
||||
|
||||
The functions construct primitive morphological filtering operations or a filter engine based on them. Normally it is enough to use
|
||||
@@ -783,18 +783,18 @@ createSeparableLinearFilter
|
||||
-------------------------------
|
||||
Creates an engine for a separable linear filter.
|
||||
|
||||
.. ocv:function:: Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar())
|
||||
.. ocv:function:: Ptr<FilterEngine> createSeparableLinearFilter( int srcType, int dstType, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar() )
|
||||
|
||||
.. ocv:function:: Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType, InputArray columnKernel, int anchor, int symmetryType, double delta=0, int bits=0)
|
||||
.. ocv:function:: Ptr<BaseColumnFilter> getLinearColumnFilter( int bufType, int dstType, InputArray kernel, int anchor, int symmetryType, double delta=0, int bits=0 )
|
||||
|
||||
.. ocv:function:: Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType, InputArray rowKernel, int anchor, int symmetryType)
|
||||
.. ocv:function:: Ptr<BaseRowFilter> getLinearRowFilter( int srcType, int bufType, InputArray kernel, int anchor, int symmetryType )
|
||||
|
||||
:param srcType: Source array type.
|
||||
|
||||
:param dstType: Destination image type that must have as many channels as ``srcType`` .
|
||||
|
||||
|
||||
:param bufType: Intermediate buffer type that must have as many channels as ``srcType`` .
|
||||
|
||||
|
||||
:param rowKernel: Coefficients for filtering each row.
|
||||
|
||||
:param columnKernel: Coefficients for filtering each column.
|
||||
@@ -806,12 +806,12 @@ Creates an engine for a separable linear filter.
|
||||
:param bits: Number of the fractional bits. The parameter is used when the kernel is an integer matrix representing fixed-point filter coefficients.
|
||||
|
||||
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
|
||||
|
||||
|
||||
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
|
||||
|
||||
|
||||
:param borderValue: Border value used in case of a constant border.
|
||||
|
||||
:param symmetryType: Type of each row and column kernel. See :ocv:func:`getKernelType` .
|
||||
:param symmetryType: Type of each row and column kernel. See :ocv:func:`getKernelType` .
|
||||
|
||||
The functions construct primitive separable linear filtering operations or a filter engine based on them. Normally it is enough to use
|
||||
:ocv:func:`createSeparableLinearFilter` or even higher-level
|
||||
@@ -831,7 +831,7 @@ dilate
|
||||
----------
|
||||
Dilates an image by using a specific structuring element.
|
||||
|
||||
.. ocv:function:: void dilate( InputArray src, OutputArray dst, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
|
||||
.. ocv:function:: void dilate( InputArray src, OutputArray dst, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
|
||||
|
||||
.. ocv:pyfunction:: cv2.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
|
||||
|
||||
@@ -841,7 +841,7 @@ Dilates an image by using a specific structuring element.
|
||||
:param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param element: Structuring element used for dilation. If ``element=Mat()`` , a ``3 x 3`` rectangular structuring element is used.
|
||||
|
||||
:param anchor: Position of the anchor within the element. The default value ``(-1, -1)`` means that the anchor is at the element center.
|
||||
@@ -849,9 +849,9 @@ Dilates an image by using a specific structuring element.
|
||||
:param iterations: Number of times dilation is applied.
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
|
||||
:param borderValue: Border value in case of a constant border. The default value has a special meaning. See :ocv:func:`createMorphologyFilter` for details.
|
||||
|
||||
|
||||
The function dilates the source image using the specified structuring element that determines the shape of a pixel neighborhood over which the maximum is taken:
|
||||
|
||||
.. math::
|
||||
@@ -871,7 +871,7 @@ erode
|
||||
---------
|
||||
Erodes an image by using a specific structuring element.
|
||||
|
||||
.. ocv:function:: void erode( InputArray src, OutputArray dst, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
|
||||
.. ocv:function:: void erode( InputArray src, OutputArray dst, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
|
||||
|
||||
.. ocv:pyfunction:: cv2.erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
|
||||
|
||||
@@ -881,7 +881,7 @@ Erodes an image by using a specific structuring element.
|
||||
:param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src``.
|
||||
|
||||
|
||||
:param element: Structuring element used for erosion. If ``element=Mat()`` , a ``3 x 3`` rectangular structuring element is used.
|
||||
|
||||
:param anchor: Position of the anchor within the element. The default value ``(-1, -1)`` means that the anchor is at the element center.
|
||||
@@ -889,9 +889,9 @@ Erodes an image by using a specific structuring element.
|
||||
:param iterations: Number of times erosion is applied.
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
|
||||
:param borderValue: Border value in case of a constant border. The default value has a special meaning. See :ocv:func:`createMorphologyFilter` for details.
|
||||
|
||||
|
||||
The function erodes the source image using the specified structuring element that determines the shape of a pixel neighborhood over which the minimum is taken:
|
||||
|
||||
.. math::
|
||||
@@ -916,27 +916,28 @@ Convolves an image with the kernel.
|
||||
|
||||
.. ocv:pyfunction:: cv2.filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1, -1))
|
||||
.. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1,-1) )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Filter2D(src, dst, kernel, anchor=(-1, -1))-> None
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image of the same size and the same number of channels as ``src`` .
|
||||
|
||||
|
||||
:param ddepth: Desired depth of the destination image. If it is negative, it will be the same as ``src.depth()`` . The following combination of ``src.depth()`` and ``ddepth`` are supported:
|
||||
* ``src.depth()`` = ``CV_8U``, ``ddepth`` = -1/``CV_16S``/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_16U``/``CV_16S``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_32F``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_64F``, ``ddepth`` = -1/``CV_64F``
|
||||
|
||||
|
||||
when ``ddepth=-1``, the destination image will have the same depth as the source.
|
||||
|
||||
|
||||
:param kernel: Convolution kernel (or rather a correlation kernel), a single-channel floating point matrix. If you want to apply different kernels to different channels, split the image into separate color planes using :ocv:func:`split` and process them individually.
|
||||
|
||||
:param anchor: Anchor of the kernel that indicates the relative position of a filtered point within the kernel. The anchor should lie within the kernel. The special default value (-1,-1) means that the anchor is at the kernel center.
|
||||
|
||||
:param delta: Optional value added to the filtered pixels before storing them in ``dst`` .
|
||||
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
The function applies an arbitrary linear filter to an image. In-place operation is supported. When the aperture is partially outside the image, the function interpolates outlier pixel values according to the specified border mode.
|
||||
@@ -972,13 +973,13 @@ Smoothes an image using a Gaussian filter.
|
||||
:param src: Source image. The image can have any number of channels, which are processed independently. The depth should be ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F`` or ``CV_64F``.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. Or, they can be zero's and then they are computed from ``sigma*`` .
|
||||
|
||||
|
||||
:param sigmaX: Gaussian kernel standard deviation in X direction.
|
||||
|
||||
|
||||
:param sigmaY: Gaussian kernel standard deviation in Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If both sigmas are zeros, they are computed from ``ksize.width`` and ``ksize.height`` , respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modifications of all this semantics, it is recommended to specify all of ``ksize`` , ``sigmaX`` , and ``sigmaY`` .
|
||||
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
The function convolves the source image with the specified Gaussian kernel. In-place filtering is supported.
|
||||
@@ -1002,9 +1003,9 @@ Returns filter coefficients for computing spatial image derivatives.
|
||||
.. ocv:pyfunction:: cv2.getDerivKernels(dx, dy, ksize[, kx[, ky[, normalize[, ktype]]]]) -> kx, ky
|
||||
|
||||
:param kx: Output matrix of row filter coefficients. It has the type ``ktype`` .
|
||||
|
||||
|
||||
:param ky: Output matrix of column filter coefficients. It has the type ``ktype`` .
|
||||
|
||||
|
||||
:param dx: Derivative order in respect of x.
|
||||
|
||||
:param dy: Derivative order in respect of y.
|
||||
@@ -1060,7 +1061,7 @@ Two of such generated kernels can be passed to
|
||||
:ocv:func:`createSeparableLinearFilter`,
|
||||
:ocv:func:`getDerivKernels`,
|
||||
:ocv:func:`getStructuringElement`,
|
||||
:ocv:func:`GaussianBlur`
|
||||
:ocv:func:`GaussianBlur`
|
||||
|
||||
|
||||
|
||||
@@ -1084,7 +1085,7 @@ The function analyzes the kernel coefficients and returns the corresponding kern
|
||||
|
||||
* **KERNEL_SMOOTH** All the kernel elements are non-negative and summed to 1. For example, the Gaussian kernel is both smooth kernel and symmetrical, so the function returns ``KERNEL_SMOOTH | KERNEL_SYMMETRICAL`` .
|
||||
* **KERNEL_INTEGER** All the kernel coefficients are integer numbers. This flag can be combined with ``KERNEL_SYMMETRICAL`` or ``KERNEL_ASYMMETRICAL`` .
|
||||
|
||||
|
||||
|
||||
|
||||
getStructuringElement
|
||||
@@ -1095,7 +1096,7 @@ Returns a structuring element of the specified size and shape for morphological
|
||||
|
||||
.. ocv:pyfunction:: cv2.getStructuringElement(shape, ksize[, anchor]) -> retval
|
||||
|
||||
.. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchorX, int anchorY, int shape, int* values=NULL )
|
||||
.. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchor_x, int anchor_y, int shape, int* values=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)-> kernel
|
||||
|
||||
@@ -1108,27 +1109,27 @@ Returns a structuring element of the specified size and shape for morphological
|
||||
E_{ij}=1
|
||||
|
||||
* **MORPH_ELLIPSE** - an elliptic structuring element, that is, a filled ellipse inscribed into the rectangle ``Rect(0, 0, esize.width, 0.esize.height)``
|
||||
|
||||
|
||||
* **MORPH_CROSS** - a cross-shaped structuring element:
|
||||
|
||||
.. math::
|
||||
|
||||
E_{ij} = \fork{1}{if i=\texttt{anchor.y} or j=\texttt{anchor.x}}{0}{otherwise}
|
||||
|
||||
|
||||
* **CV_SHAPE_CUSTOM** - custom structuring element (OpenCV 1.x API)
|
||||
|
||||
:param ksize: Size of the structuring element.
|
||||
|
||||
:param cols: Width of the structuring element
|
||||
|
||||
|
||||
:param rows: Height of the structuring element
|
||||
|
||||
:param anchor: Anchor position within the element. The default value :math:`(-1, -1)` means that the anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor position. In other cases the anchor just regulates how much the result of the morphological operation is shifted.
|
||||
|
||||
:param anchorX: x-coordinate of the anchor
|
||||
|
||||
:param anchorY: y-coordinate of the anchor
|
||||
|
||||
|
||||
:param anchor_x: x-coordinate of the anchor
|
||||
|
||||
:param anchor_y: y-coordinate of the anchor
|
||||
|
||||
:param values: integer array of ``cols``*``rows`` elements that specifies the custom shape of the structuring element, when ``shape=CV_SHAPE_CUSTOM``.
|
||||
|
||||
The function constructs and returns the structuring element that can be further passed to
|
||||
@@ -1149,9 +1150,9 @@ Smoothes an image using the median filter.
|
||||
.. ocv:pyfunction:: cv2.medianBlur(src, ksize[, dst]) -> dst
|
||||
|
||||
:param src: Source 1-, 3-, or 4-channel image. When ``ksize`` is 3 or 5, the image depth should be ``CV_8U`` , ``CV_16U`` , or ``CV_32F`` . For larger aperture sizes, it can only be ``CV_8U`` .
|
||||
|
||||
|
||||
:param dst: Destination array of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param ksize: Aperture linear size. It must be odd and greater than 1, for example: 3, 5, 7 ...
|
||||
|
||||
The function smoothes an image using the median filter with the
|
||||
@@ -1170,7 +1171,7 @@ morphologyEx
|
||||
----------------
|
||||
Performs advanced morphological transformations.
|
||||
|
||||
.. ocv:function:: void morphologyEx( InputArray src, OutputArray dst, int op, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
|
||||
.. ocv:function:: void morphologyEx( InputArray src, OutputArray dst, int op, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
|
||||
|
||||
.. ocv:pyfunction:: cv2.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
|
||||
|
||||
@@ -1180,7 +1181,7 @@ Performs advanced morphological transformations.
|
||||
:param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param element: Structuring element.
|
||||
|
||||
:param op: Type of a morphological operation that can be one of the following:
|
||||
@@ -1198,7 +1199,7 @@ Performs advanced morphological transformations.
|
||||
:param iterations: Number of times erosion and dilation are applied.
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
|
||||
:param borderValue: Border value in case of a constant border. The default value has a special meaning. See :ocv:func:`createMorphologyFilter` for details.
|
||||
|
||||
The function can perform advanced morphological transformations using an erosion and dilation as basic operations.
|
||||
@@ -1242,7 +1243,6 @@ Any of the operations can be done in-place. In case of multi-channel images, eac
|
||||
:ocv:func:`createMorphologyFilter`
|
||||
|
||||
|
||||
|
||||
Laplacian
|
||||
-------------
|
||||
Calculates the Laplacian of an image.
|
||||
@@ -1251,14 +1251,14 @@ Calculates the Laplacian of an image.
|
||||
|
||||
.. ocv:pyfunction:: cv2.Laplacian(src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int ksize=3)
|
||||
.. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int aperture_size=3 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Laplace(src, dst, ksize=3)-> None
|
||||
.. ocv:pyoldfunction:: cv.Laplace(src, dst, apertureSize=3) -> None
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image of the same size and the same number of channels as ``src`` .
|
||||
|
||||
|
||||
:param ddepth: Desired depth of the destination image.
|
||||
|
||||
:param ksize: Aperture size used to compute the second-derivative filters. See :ocv:func:`getDerivKernels` for details. The size must be positive and odd.
|
||||
@@ -1266,7 +1266,7 @@ Calculates the Laplacian of an image.
|
||||
:param scale: Optional scale factor for the computed Laplacian values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
|
||||
|
||||
:param delta: Optional delta value that is added to the results prior to storing them in ``dst`` .
|
||||
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
The function calculates the Laplacian of the source image by adding up the second x and y derivatives calculated using the Sobel operator:
|
||||
@@ -1293,9 +1293,9 @@ pyrDown
|
||||
-----------
|
||||
Smoothes an image and downsamples it.
|
||||
|
||||
.. ocv:function:: void pyrDown( InputArray src, OutputArray dst, const Size& dstsize=Size())
|
||||
.. ocv:function:: void pyrDown( InputArray src, OutputArray dst, const Size& dstsize=Size(), int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.pyrDown(src[, dst[, dstsize]]) -> dst
|
||||
.. ocv:pyfunction:: cv2.pyrDown(src[, dst[, dstsize[, borderType]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvPyrDown( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
|
||||
|
||||
@@ -1304,7 +1304,7 @@ Smoothes an image and downsamples it.
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image. It has the specified size and the same type as ``src`` .
|
||||
|
||||
|
||||
:param dstsize: Size of the destination image. By default, it is computed as ``Size((src.cols+1)/2, (src.rows+1)/2)`` . But in any case, the following conditions should be satisfied:
|
||||
|
||||
.. math::
|
||||
@@ -1326,9 +1326,9 @@ pyrUp
|
||||
---------
|
||||
Upsamples an image and then smoothes it.
|
||||
|
||||
.. ocv:function:: void pyrUp( InputArray src, OutputArray dst, const Size& dstsize=Size())
|
||||
.. ocv:function:: void pyrUp( InputArray src, OutputArray dst, const Size& dstsize=Size(), int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.pyrUp(src[, dst[, dstsize]]) -> dst
|
||||
.. ocv:pyfunction:: cv2.pyrUp(src[, dst[, dstsize[, borderType]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: cvPyrUp( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
|
||||
|
||||
@@ -1337,7 +1337,7 @@ Upsamples an image and then smoothes it.
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image. It has the specified size and the same type as ``src`` .
|
||||
|
||||
|
||||
:param dstsize: Size of the destination image. By default, it is computed as ``Size(src.cols*2, (src.rows*2)`` . But in any case, the following conditions should be satisfied:
|
||||
|
||||
.. math::
|
||||
@@ -1353,46 +1353,46 @@ pyrMeanShiftFiltering
|
||||
---------------------
|
||||
Performs initial step of meanshift segmentation of an image.
|
||||
|
||||
.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) )
|
||||
.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) )
|
||||
|
||||
.. ocv:pyfunction:: cv2.pyrMeanShiftFiltering(src, sp, sr[, dst[, maxLevel[, termcrit]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvPyrMeanShiftFiltering( const CvArr* src, CvArr* dst, double sp, double sr, int max_level=1, CvTermCriteria termcrit= cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1))
|
||||
|
||||
.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, maxLevel=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1))-> None
|
||||
|
||||
:param src: The source 8-bit, 3-channel image.
|
||||
|
||||
:param dst: The destination image of the same format and the same size as the source.
|
||||
|
||||
:param sp: The spatial window radius.
|
||||
|
||||
:param sr: The color window radius.
|
||||
|
||||
:param maxLevel: Maximum level of the pyramid for the segmentation.
|
||||
|
||||
:param termcrit: Termination criteria: when to stop meanshift iterations.
|
||||
|
||||
|
||||
The function implements the filtering stage of meanshift segmentation, that is, the output of the function is the filtered "posterized" image with color gradients and fine-grain texture flattened. At every pixel
|
||||
.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1)) -> None
|
||||
|
||||
:param src: The source 8-bit, 3-channel image.
|
||||
|
||||
:param dst: The destination image of the same format and the same size as the source.
|
||||
|
||||
:param sp: The spatial window radius.
|
||||
|
||||
:param sr: The color window radius.
|
||||
|
||||
:param maxLevel: Maximum level of the pyramid for the segmentation.
|
||||
|
||||
:param termcrit: Termination criteria: when to stop meanshift iterations.
|
||||
|
||||
|
||||
The function implements the filtering stage of meanshift segmentation, that is, the output of the function is the filtered "posterized" image with color gradients and fine-grain texture flattened. At every pixel
|
||||
``(X,Y)`` of the input image (or down-sized input image, see below) the function executes meanshift
|
||||
iterations, that is, the pixel ``(X,Y)`` neighborhood in the joint space-color hyperspace is considered:
|
||||
|
||||
.. math::
|
||||
|
||||
(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}
|
||||
(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}
|
||||
|
||||
|
||||
where ``(R,G,B)`` and ``(r,g,b)`` are the vectors of color components at ``(X,Y)`` and ``(x,y)``, respectively (though, the algorithm does not depend on the color space used, so any 3-component color space can be used instead). Over the neighborhood the average spatial value ``(X',Y')`` and average color vector ``(R',G',B')`` are found and they act as the neighborhood center on the next iteration:
|
||||
where ``(R,G,B)`` and ``(r,g,b)`` are the vectors of color components at ``(X,Y)`` and ``(x,y)``, respectively (though, the algorithm does not depend on the color space used, so any 3-component color space can be used instead). Over the neighborhood the average spatial value ``(X',Y')`` and average color vector ``(R',G',B')`` are found and they act as the neighborhood center on the next iteration:
|
||||
|
||||
.. math::
|
||||
|
||||
(X,Y)~(X',Y'), (R,G,B)~(R',G',B').
|
||||
|
||||
After the iterations over, the color components of the initial pixel (that is, the pixel from where the iterations started) are set to the final value (average color at the last iteration):
|
||||
|
||||
After the iterations over, the color components of the initial pixel (that is, the pixel from where the iterations started) are set to the final value (average color at the last iteration):
|
||||
|
||||
.. math::
|
||||
|
||||
|
||||
I(X,Y) <- (R*,G*,B*)
|
||||
|
||||
When ``maxLevel > 0``, the gaussian pyramid of ``maxLevel+1`` levels is built, and the above procedure is run on the smallest layer first. After that, the results are propagated to the larger layer and the iterations are run again only on those pixels where the layer colors differ by more than ``sr`` from the lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the results will be actually different from the ones obtained by running the meanshift procedure on the whole original image (i.e. when ``maxLevel==0``).
|
||||
@@ -1402,25 +1402,25 @@ sepFilter2D
|
||||
---------------
|
||||
Applies a separable linear filter to an image.
|
||||
|
||||
.. ocv:function:: void sepFilter2D( InputArray src, OutputArray dst, int ddepth, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int borderType=BORDER_DEFAULT )
|
||||
.. ocv:function:: void sepFilter2D( InputArray src, OutputArray dst, int ddepth, InputArray kernelX, InputArray kernelY, Point anchor=Point(-1,-1), double delta=0, int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.sepFilter2D(src, ddepth, kernelX, kernelY[, dst[, anchor[, delta[, borderType]]]]) -> dst
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image of the same size and the same number of channels as ``src`` .
|
||||
|
||||
|
||||
:param ddepth: Destination image depth. The following combination of ``src.depth()`` and ``ddepth`` are supported:
|
||||
* ``src.depth()`` = ``CV_8U``, ``ddepth`` = -1/``CV_16S``/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_16U``/``CV_16S``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_32F``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_64F``, ``ddepth`` = -1/``CV_64F``
|
||||
|
||||
|
||||
when ``ddepth=-1``, the destination image will have the same depth as the source.
|
||||
|
||||
:param rowKernel: Coefficients for filtering each row.
|
||||
:param kernelX: Coefficients for filtering each row.
|
||||
|
||||
:param columnKernel: Coefficients for filtering each column.
|
||||
:param kernelY: Coefficients for filtering each column.
|
||||
|
||||
:param anchor: Anchor position within the kernel. The default value :math:`(-1, 1)` means that the anchor is at the kernel center.
|
||||
|
||||
@@ -1428,7 +1428,7 @@ Applies a separable linear filter to an image.
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
The function applies a separable linear filter to the image. That is, first, every row of ``src`` is filtered with the 1D kernel ``rowKernel`` . Then, every column of the result is filtered with the 1D kernel ``columnKernel`` . The final result shifted by ``delta`` is stored in ``dst`` .
|
||||
The function applies a separable linear filter to the image. That is, first, every row of ``src`` is filtered with the 1D kernel ``kernelX`` . Then, every column of the result is filtered with the 1D kernel ``kernelY`` . The final result shifted by ``delta`` is stored in ``dst`` .
|
||||
|
||||
.. seealso::
|
||||
|
||||
@@ -1437,50 +1437,50 @@ The function applies a separable linear filter to the image. That is, first, eve
|
||||
:ocv:func:`Sobel`,
|
||||
:ocv:func:`GaussianBlur`,
|
||||
:ocv:func:`boxFilter`,
|
||||
:ocv:func:`blur`
|
||||
:ocv:func:`blur`
|
||||
|
||||
|
||||
Smooth
|
||||
------
|
||||
Smooths the image in one of several ways.
|
||||
|
||||
.. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int param1=3, int param2=0, double param3=0, double param4=0)
|
||||
.. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int size1=3, int size2=0, double sigma1=0, double sigma2=0 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)-> None
|
||||
|
||||
:param src: The source image
|
||||
|
||||
:param dst: The destination image
|
||||
|
||||
:param smoothtype: Type of the smoothing:
|
||||
|
||||
* **CV_BLUR_NO_SCALE** linear convolution with :math:`\texttt{param1}\times\texttt{param2}` box kernel (all 1's). If you want to smooth different pixels with different-size box kernels, you can use the integral image that is computed using :ocv:func:`integral`
|
||||
|
||||
|
||||
* **CV_BLUR** linear convolution with :math:`\texttt{param1}\times\texttt{param2}` box kernel (all 1's) with subsequent scaling by :math:`1/(\texttt{param1}\cdot\texttt{param2})`
|
||||
|
||||
|
||||
* **CV_GAUSSIAN** linear convolution with a :math:`\texttt{param1}\times\texttt{param2}` Gaussian kernel
|
||||
|
||||
|
||||
* **CV_MEDIAN** median filter with a :math:`\texttt{param1}\times\texttt{param1}` square aperture
|
||||
|
||||
|
||||
* **CV_BILATERAL** bilateral filter with a :math:`\texttt{param1}\times\texttt{param1}` square aperture, color sigma= ``param3`` and spatial sigma= ``param4`` . If ``param1=0`` , the aperture square side is set to ``cvRound(param4*1.5)*2+1`` . Information about bilateral filtering can be found at http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.html
|
||||
|
||||
|
||||
:param param1: The first parameter of the smoothing operation, the aperture width. Must be a positive odd number (1, 3, 5, ...)
|
||||
|
||||
:param param2: The second parameter of the smoothing operation, the aperture height. Ignored by ``CV_MEDIAN`` and ``CV_BILATERAL`` methods. In the case of simple scaled/non-scaled and Gaussian blur if ``param2`` is zero, it is set to ``param1`` . Otherwise it must be a positive odd number.
|
||||
|
||||
:param param3: In the case of a Gaussian parameter this parameter may specify Gaussian :math:`\sigma` (standard deviation). If it is zero, it is calculated from the kernel size:
|
||||
|
||||
:param src: The source image
|
||||
|
||||
:param dst: The destination image
|
||||
|
||||
:param smoothtype: Type of the smoothing:
|
||||
|
||||
* **CV_BLUR_NO_SCALE** linear convolution with :math:`\texttt{size1}\times\texttt{size2}` box kernel (all 1's). If you want to smooth different pixels with different-size box kernels, you can use the integral image that is computed using :ocv:func:`integral`
|
||||
|
||||
|
||||
* **CV_BLUR** linear convolution with :math:`\texttt{size1}\times\texttt{size2}` box kernel (all 1's) with subsequent scaling by :math:`1/(\texttt{size1}\cdot\texttt{size2})`
|
||||
|
||||
|
||||
* **CV_GAUSSIAN** linear convolution with a :math:`\texttt{size1}\times\texttt{size2}` Gaussian kernel
|
||||
|
||||
|
||||
* **CV_MEDIAN** median filter with a :math:`\texttt{size1}\times\texttt{size1}` square aperture
|
||||
|
||||
|
||||
* **CV_BILATERAL** bilateral filter with a :math:`\texttt{size1}\times\texttt{size1}` square aperture, color sigma= ``sigma1`` and spatial sigma= ``sigma2`` . If ``size1=0`` , the aperture square side is set to ``cvRound(sigma2*1.5)*2+1`` . Information about bilateral filtering can be found at http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.html
|
||||
|
||||
|
||||
:param size1: The first parameter of the smoothing operation, the aperture width. Must be a positive odd number (1, 3, 5, ...)
|
||||
|
||||
:param size2: The second parameter of the smoothing operation, the aperture height. Ignored by ``CV_MEDIAN`` and ``CV_BILATERAL`` methods. In the case of simple scaled/non-scaled and Gaussian blur if ``size2`` is zero, it is set to ``size1`` . Otherwise it must be a positive odd number.
|
||||
|
||||
:param sigma1: In the case of a Gaussian parameter this parameter may specify Gaussian :math:`\sigma` (standard deviation). If it is zero, it is calculated from the kernel size:
|
||||
|
||||
.. math::
|
||||
|
||||
\sigma = 0.3 (n/2 - 1) + 0.8 \quad \text{where} \quad n= \begin{array}{l l} \mbox{\texttt{param1} for horizontal kernel} \\ \mbox{\texttt{param2} for vertical kernel} \end{array}
|
||||
|
||||
Using standard sigma for small kernels ( :math:`3\times 3` to :math:`7\times 7` ) gives better speed. If ``param3`` is not zero, while ``param1`` and ``param2`` are zeros, the kernel size is calculated from the sigma (to provide accurate enough operation).
|
||||
|
||||
|
||||
\sigma = 0.3 (n/2 - 1) + 0.8 \quad \text{where} \quad n= \begin{array}{l l} \mbox{\texttt{size1} for horizontal kernel} \\ \mbox{\texttt{size2} for vertical kernel} \end{array}
|
||||
|
||||
Using standard sigma for small kernels ( :math:`3\times 3` to :math:`7\times 7` ) gives better speed. If ``sigma1`` is not zero, while ``size1`` and ``size2`` are zeros, the kernel size is calculated from the sigma (to provide accurate enough operation).
|
||||
|
||||
The function smooths an image using one of several methods. Every of the methods has some features and restrictions listed below:
|
||||
|
||||
* Blur with no scaling works with single-channel images only and supports accumulation of 8-bit to 16-bit format (similar to :ocv:func:`Sobel` and :ocv:func:`Laplacian`) and 32-bit floating point to 32-bit floating-point format.
|
||||
@@ -1496,23 +1496,24 @@ Sobel
|
||||
---------
|
||||
Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
|
||||
|
||||
.. ocv:function:: void Sobel( InputArray src, OutputArray dst, int ddepth, int xorder, int yorder, int ksize=3, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
|
||||
.. ocv:function:: void Sobel( InputArray src, OutputArray dst, int ddepth, int dx, int dy, int ksize=3, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int apertureSize=3 )
|
||||
.. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int aperture_size=3 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Sobel(src, dst, xorder, yorder, apertureSize=3)-> None
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image of the same size and the same number of channels as ``src`` .
|
||||
|
||||
|
||||
:param ddepth: Destination image depth. The following combination of ``src.depth()`` and ``ddepth`` are supported:
|
||||
* ``src.depth()`` = ``CV_8U``, ``ddepth`` = -1/``CV_16S``/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_16U``/``CV_16S``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_32F``, ``ddepth`` = -1/``CV_32F``/``CV_64F``
|
||||
* ``src.depth()`` = ``CV_64F``, ``ddepth`` = -1/``CV_64F``
|
||||
|
||||
|
||||
when ``ddepth=-1``, the destination image will have the same depth as the source. In the case of 8-bit input images it will result in truncated derivatives.
|
||||
|
||||
:param xorder: Order of the derivative x.
|
||||
@@ -1524,7 +1525,7 @@ Calculates the first, second, third, or mixed image derivatives using an extende
|
||||
:param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
|
||||
|
||||
:param delta: Optional delta value that is added to the results prior to storing them in ``dst`` .
|
||||
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
In all cases except one, the
|
||||
@@ -1538,7 +1539,7 @@ derivative. When
|
||||
There is also the special value ``ksize = CV_SCHARR`` (-1) that corresponds to the
|
||||
:math:`3\times3` Scharr
|
||||
filter that may give more accurate results than the
|
||||
:math:`3\times3` Sobel. The Scharr aperture is
|
||||
:math:`3\times3` Sobel. The Scharr aperture is
|
||||
|
||||
.. math::
|
||||
|
||||
@@ -1582,37 +1583,37 @@ Scharr
|
||||
----------
|
||||
Calculates the first x- or y- image derivative using Scharr operator.
|
||||
|
||||
.. ocv:function:: void Scharr( InputArray src, OutputArray dst, int ddepth, int xorder, int yorder, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
|
||||
.. ocv:function:: void Scharr( InputArray src, OutputArray dst, int ddepth, int dx, int dy, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.Scharr(src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]]) -> dst
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image of the same size and the same number of channels as ``src``.
|
||||
|
||||
|
||||
:param ddepth: Destination image depth. See :ocv:func:`Sobel` for the list of supported combination of ``src.depth()`` and ``ddepth``.
|
||||
|
||||
:param xorder: Order of the derivative x.
|
||||
:param dx: Order of the derivative x.
|
||||
|
||||
:param yorder: Order of the derivative y.
|
||||
:param dy: Order of the derivative y.
|
||||
|
||||
:param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
|
||||
|
||||
:param delta: Optional delta value that is added to the results prior to storing them in ``dst``.
|
||||
|
||||
|
||||
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
|
||||
|
||||
|
||||
The function computes the first x- or y- spatial image derivative using the Scharr operator. The call
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{Scharr(src, dst, ddepth, xorder, yorder, scale, delta, borderType)}
|
||||
\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}
|
||||
|
||||
is equivalent to
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{Sobel(src, dst, ddepth, xorder, yorder, CV\_SCHARR, scale, delta, borderType)} .
|
||||
\texttt{Sobel(src, dst, ddepth, dx, dy, CV\_SCHARR, scale, delta, borderType)} .
|
||||
|
||||
.. seealso::
|
||||
|
||||
|
||||
@@ -41,15 +41,15 @@ Converts image transformation maps from one representation to another.
|
||||
.. ocv:pyfunction:: cv2.convertMaps(map1, map2, dstmap1type[, dstmap1[, dstmap2[, nninterpolation]]]) -> dstmap1, dstmap2
|
||||
|
||||
:param map1: The first input map of type ``CV_16SC2`` , ``CV_32FC1`` , or ``CV_32FC2`` .
|
||||
|
||||
|
||||
:param map2: The second input map of type ``CV_16UC1`` , ``CV_32FC1`` , or none (empty matrix), respectively.
|
||||
|
||||
:param dstmap1: The first output map that has the type ``dstmap1type`` and the same size as ``src`` .
|
||||
|
||||
|
||||
:param dstmap2: The second output map.
|
||||
|
||||
:param dstmap1type: Type of the first output map that should be ``CV_16SC2`` , ``CV_32FC1`` , or ``CV_32FC2`` .
|
||||
|
||||
|
||||
:param nninterpolation: Flag indicating whether the fixed-point maps are used for the nearest-neighbor or for a more complex interpolation.
|
||||
|
||||
The function converts a pair of maps for
|
||||
@@ -77,11 +77,13 @@ getAffineTransform
|
||||
----------------------
|
||||
Calculates an affine transform from three pairs of the corresponding points.
|
||||
|
||||
.. ocv:function:: Mat getAffineTransform( const Point2f* src, const Point2f* dst )
|
||||
.. ocv:function:: Mat getAffineTransform( InputArray src, InputArray dst )
|
||||
|
||||
.. ocv:function:: Mat getAffineTransform( const Point2f src[], const Point2f dst[] )
|
||||
|
||||
.. ocv:pyfunction:: cv2.getAffineTransform(src, dst) -> retval
|
||||
|
||||
.. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* mapMatrix )
|
||||
.. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f * src, const CvPoint2D32f * dst, CvMat * map_matrix )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GetAffineTransform(src, dst, mapMatrix)-> None
|
||||
|
||||
@@ -114,11 +116,13 @@ getPerspectiveTransform
|
||||
---------------------------
|
||||
Calculates a perspective transform from four pairs of the corresponding points.
|
||||
|
||||
.. ocv:function:: Mat getPerspectiveTransform( const Point2f* src, const Point2f* dst )
|
||||
.. ocv:function:: Mat getPerspectiveTransform( InputArray src, InputArray dst )
|
||||
|
||||
.. ocv:function:: Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] )
|
||||
|
||||
.. ocv:pyfunction:: cv2.getPerspectiveTransform(src, dst) -> retval
|
||||
|
||||
.. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* mapMatrix )
|
||||
.. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* map_matrix )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GetPerspectiveTransform(src, dst, mapMatrix)-> None
|
||||
|
||||
@@ -151,7 +155,7 @@ getRectSubPix
|
||||
-----------------
|
||||
Retrieves a pixel rectangle from an image with sub-pixel accuracy.
|
||||
|
||||
.. ocv:function:: void getRectSubPix( InputArray image, Size patchSize, Point2f center, OutputArray dst, int patchType=-1 )
|
||||
.. ocv:function:: void getRectSubPix( InputArray image, Size patchSize, Point2f center, OutputArray patch, int patchType=-1 )
|
||||
|
||||
.. ocv:pyfunction:: cv2.getRectSubPix(image, patchSize, center[, patch[, patchType]]) -> patch
|
||||
|
||||
@@ -165,7 +169,7 @@ Retrieves a pixel rectangle from an image with sub-pixel accuracy.
|
||||
:param center: Floating point coordinates of the center of the extracted rectangle within the source image. The center must be inside the image.
|
||||
|
||||
:param dst: Extracted patch that has the size ``patchSize`` and the same number of channels as ``src`` .
|
||||
|
||||
|
||||
:param patchType: Depth of the extracted pixels. By default, they have the same depth as ``src`` .
|
||||
|
||||
The function ``getRectSubPix`` extracts pixels from ``src`` :
|
||||
@@ -196,7 +200,7 @@ Calculates an affine matrix of 2D rotation.
|
||||
|
||||
.. ocv:pyfunction:: cv2.getRotationMatrix2D(center, angle, scale) -> retval
|
||||
|
||||
.. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* mapMatrix )
|
||||
.. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* map_matrix )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GetRotationMatrix2D(center, angle, scale, mapMatrix)-> None
|
||||
|
||||
@@ -206,7 +210,7 @@ Calculates an affine matrix of 2D rotation.
|
||||
|
||||
:param scale: Isotropic scale factor.
|
||||
|
||||
:param mapMatrix: The output affine transformation, 2x3 floating-point matrix.
|
||||
:param map_matrix: The output affine transformation, 2x3 floating-point matrix.
|
||||
|
||||
The function calculates the following matrix:
|
||||
|
||||
@@ -262,19 +266,19 @@ Remaps an image to log-polar space.
|
||||
|
||||
.. ocv:pyoldfunction:: cv.LogPolar(src, dst, center, M, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS)-> None
|
||||
|
||||
:param src: Source image
|
||||
:param src: Source image
|
||||
|
||||
:param dst: Destination image
|
||||
:param dst: Destination image
|
||||
|
||||
:param center: The transformation center; where the output precision is maximal
|
||||
:param center: The transformation center; where the output precision is maximal
|
||||
|
||||
:param M: Magnitude scale parameter. See below
|
||||
:param M: Magnitude scale parameter. See below
|
||||
|
||||
:param flags: A combination of interpolation methods and the following optional flags:
|
||||
|
||||
* **CV_WARP_FILL_OUTLIERS** fills all of the destination image pixels. If some of them correspond to outliers in the source image, they are set to zero
|
||||
|
||||
* **CV_WARP_INVERSE_MAP** See below
|
||||
:param flags: A combination of interpolation methods and the following optional flags:
|
||||
|
||||
* **CV_WARP_FILL_OUTLIERS** fills all of the destination image pixels. If some of them correspond to outliers in the source image, they are set to zero
|
||||
|
||||
* **CV_WARP_INVERSE_MAP** See below
|
||||
|
||||
The function ``cvLogPolar`` transforms the source image using the following transformation:
|
||||
|
||||
@@ -283,7 +287,7 @@ The function ``cvLogPolar`` transforms the source image using the following tran
|
||||
|
||||
.. math::
|
||||
|
||||
dst( \phi , \rho ) = src(x,y)
|
||||
dst( \phi , \rho ) = src(x,y)
|
||||
|
||||
|
||||
*
|
||||
@@ -291,14 +295,14 @@ The function ``cvLogPolar`` transforms the source image using the following tran
|
||||
|
||||
.. math::
|
||||
|
||||
dst(x,y) = src( \phi , \rho )
|
||||
dst(x,y) = src( \phi , \rho )
|
||||
|
||||
|
||||
where
|
||||
|
||||
.. math::
|
||||
|
||||
\rho = M \cdot \log{\sqrt{x^2 + y^2}} , \phi =atan(y/x)
|
||||
\rho = M \cdot \log{\sqrt{x^2 + y^2}} , \phi =atan(y/x)
|
||||
|
||||
|
||||
The function emulates the human "foveal" vision and can be used for fast scale and rotation-invariant template matching, for object tracking and so forth. The function can not operate in-place.
|
||||
@@ -372,7 +376,7 @@ Resizes an image.
|
||||
|
||||
\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}
|
||||
|
||||
|
||||
|
||||
Either ``dsize`` or both ``fx`` and ``fy`` must be non-zero.
|
||||
|
||||
:param fx: Scale factor along the horizontal axis. When it is 0, it is computed as
|
||||
@@ -417,7 +421,7 @@ To shrink an image, it will generally look best with CV_INTER_AREA interpolation
|
||||
|
||||
:ocv:func:`warpAffine`,
|
||||
:ocv:func:`warpPerspective`,
|
||||
:ocv:func:`remap`
|
||||
:ocv:func:`remap`
|
||||
|
||||
|
||||
warpAffine
|
||||
@@ -428,16 +432,18 @@ Applies an affine transformation to an image.
|
||||
|
||||
.. ocv:pyfunction:: cv2.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* mapMatrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
|
||||
.. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.WarpAffine(src, dst, mapMatrix, flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
|
||||
|
||||
.. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* mapMatrix )
|
||||
.. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* map_matrix )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GetQuadrangleSubPix(src, dst, mapMatrix)-> None
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image that has the size ``dsize`` and the same type as ``src`` .
|
||||
|
||||
|
||||
:param M: :math:`2\times 3` transformation matrix.
|
||||
|
||||
:param dsize: Size of the destination image.
|
||||
@@ -477,13 +483,14 @@ Applies a perspective transformation to an image.
|
||||
|
||||
.. ocv:pyfunction:: cv2.warpPerspective(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* mapMatrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
|
||||
.. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.WarpPerspective(src, dst, mapMatrix, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image that has the size ``dsize`` and the same type as ``src`` .
|
||||
|
||||
|
||||
:param M: :math:`3\times 3` transformation matrix.
|
||||
|
||||
:param dsize: Size of the destination image.
|
||||
@@ -524,24 +531,24 @@ Computes the undistortion and rectification transformation map.
|
||||
|
||||
.. ocv:pyfunction:: cv2.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, m1type[, map1[, map2]]) -> map1, map2
|
||||
|
||||
.. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* R, const CvMat* newCameraMatrix, CvArr* map1, CvArr* map2 )
|
||||
.. ocv:cfunction:: void cvInitUndistortMap( const CvMat* cameraMatrix, const CvMat* distCoeffs, CvArr* map1, CvArr* map2 )
|
||||
.. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat * R, const CvMat* new_camera_matrix, CvArr* mapx, CvArr* mapy )
|
||||
.. ocv:cfunction:: void cvInitUndistortMap( const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvArr* mapx, CvArr* mapy )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.InitUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, map1, map2)-> None
|
||||
.. ocv:pyoldfunction:: cv.InitUndistortMap(cameraMatrix, distCoeffs, map1, map2)-> None
|
||||
|
||||
:param cameraMatrix: Input camera matrix :math:`A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
|
||||
|
||||
|
||||
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
|
||||
|
||||
:param R: Optional rectification transformation in the object space (3x3 matrix). ``R1`` or ``R2`` , computed by :ocv:func:`stereoRectify` can be passed here. If the matrix is empty, the identity transformation is assumed. In ``cvInitUndistortMap`` R assumed to be an identity matrix.
|
||||
|
||||
:param newCameraMatrix: New camera matrix :math:`A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}` .
|
||||
|
||||
|
||||
:param size: Undistorted image size.
|
||||
|
||||
:param m1type: Type of the first output map that can be ``CV_32FC1`` or ``CV_16SC2`` . See :ocv:func:`convertMaps` for details.
|
||||
|
||||
|
||||
:param map1: The first output map.
|
||||
|
||||
:param map2: The second output map.
|
||||
@@ -606,7 +613,7 @@ where
|
||||
:math:`(0,0)` and
|
||||
:math:`(1,1)` elements of ``cameraMatrix`` , respectively.
|
||||
|
||||
By default, the undistortion functions in OpenCV (see
|
||||
By default, the undistortion functions in OpenCV (see
|
||||
:ocv:func:`initUndistortRectifyMap`,
|
||||
:ocv:func:`undistort`) do not move the principal point. However, when you work with stereo, it is important to move the principal points in both views to the same y-coordinate (which is required by most of stereo correspondence algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for each view where the principal points are located at the center.
|
||||
|
||||
@@ -621,16 +628,16 @@ Transforms an image to compensate for lens distortion.
|
||||
|
||||
.. ocv:pyfunction:: cv2.undistort(src, cameraMatrix, distCoeffs[, dst[, newCameraMatrix]]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* newCameraMatrix=NULL )
|
||||
.. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* camera_matrix, const CvMat* distortion_coeffs, const CvMat* new_camera_matrix=0 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Undistort2(src, dst, cameraMatrix, distCoeffs)-> None
|
||||
|
||||
:param src: Input (distorted) image.
|
||||
|
||||
:param dst: Output (corrected) image that has the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param cameraMatrix: Input camera matrix :math:`A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
|
||||
|
||||
|
||||
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
|
||||
|
||||
:param newCameraMatrix: Camera matrix of the distorted image. By default, it is the same as ``cameraMatrix`` but you may additionally scale and shift the result by using a different matrix.
|
||||
@@ -660,7 +667,7 @@ Computes the ideal point coordinates from the observed point coordinates.
|
||||
|
||||
.. ocv:function:: void undistortPoints( InputArray src, OutputArray dst, InputArray cameraMatrix, InputArray distCoeffs, InputArray R=noArray(), InputArray P=noArray())
|
||||
|
||||
.. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* R=NULL, const CvMat* P=NULL)
|
||||
.. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat* R=0, const CvMat* P=0 )
|
||||
.. ocv:pyoldfunction:: cv.UndistortPoints(src, dst, cameraMatrix, distCoeffs, R=None, P=None)-> None
|
||||
|
||||
:param src: Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).
|
||||
@@ -668,7 +675,7 @@ Computes the ideal point coordinates from the observed point coordinates.
|
||||
:param dst: Output ideal point coordinates after undistortion and reverse perspective transformation. If matrix ``P`` is identity or omitted, ``dst`` will contain normalized point coordinates.
|
||||
|
||||
:param cameraMatrix: Camera matrix :math:`\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
|
||||
|
||||
|
||||
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
|
||||
|
||||
:param R: Rectification transformation in the object space (3x3 matrix). ``R1`` or ``R2`` computed by :ocv:func:`stereoRectify` can be passed here. If the matrix is empty, the identity transformation is used.
|
||||
@@ -696,4 +703,4 @@ where ``undistort()`` is an approximate iterative algorithm that estimates the n
|
||||
|
||||
The function can be used for both a stereo camera head or a monocular camera (when R is empty).
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -9,22 +9,22 @@ calcHist
|
||||
------------
|
||||
Calculates a histogram of a set of arrays.
|
||||
|
||||
.. ocv:function:: void calcHist( const Mat* arrays, int narrays, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
|
||||
.. ocv:function:: void calcHist( const Mat* images, int nimages, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
|
||||
|
||||
.. ocv:function:: void calcHist( const Mat* arrays, int narrays, const int* channels, InputArray mask, SparseMat& hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
|
||||
.. ocv:function:: void calcHist( const Mat* images, int nimages, const int* channels, InputArray mask, SparseMat& hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
|
||||
|
||||
.. ocv:pyfunction:: cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]]) -> hist
|
||||
|
||||
.. ocv:cfunction:: void cvCalcHist( IplImage** image, CvHistogram* hist, int accumulate=0, const CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.CalcHist(image, hist, accumulate=0, mask=None)-> None
|
||||
|
||||
:param arrays: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
|
||||
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
|
||||
|
||||
:param narrays: Number of source arrays.
|
||||
:param nimages: Number of source images.
|
||||
|
||||
:param channels: List of the ``dims`` channels used to compute the histogram. The first array channels are numerated from 0 to ``arrays[0].channels()-1`` , the second array channels are counted from ``arrays[0].channels()`` to ``arrays[0].channels() + arrays[1].channels()-1``, and so on.
|
||||
:param channels: List of the ``dims`` channels used to compute the histogram. The first array channels are numerated from 0 to ``images[0].channels()-1`` , the second array channels are counted from ``images[0].channels()`` to ``images[0].channels() + images[1].channels()-1``, and so on.
|
||||
|
||||
:param mask: Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as ``arrays[i]`` . The non-zero mask elements mark the array elements counted in the histogram.
|
||||
:param mask: Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as ``images[i]`` . The non-zero mask elements mark the array elements counted in the histogram.
|
||||
|
||||
:param hist: Output histogram, which is a dense or sparse ``dims`` -dimensional array.
|
||||
|
||||
@@ -106,27 +106,27 @@ calcBackProject
|
||||
-------------------
|
||||
Calculates the back projection of a histogram.
|
||||
|
||||
.. ocv:function:: void calcBackProject( const Mat* arrays, int narrays, const int* channels, InputArray hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
|
||||
.. ocv:function:: void calcBackProject( const Mat* images, int nimages, const int* channels, InputArray hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
|
||||
|
||||
.. ocv:function:: void calcBackProject( const Mat* arrays, int narrays, const int* channels, const SparseMat& hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
|
||||
.. ocv:function:: void calcBackProject( const Mat* images, int nimages, const int* channels, const SparseMat& hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
|
||||
|
||||
.. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges[, dst[, scale]]) -> dst
|
||||
.. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges, scale[, dst]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvCalcBackProject( IplImage** image, CvArr* backProject, const CvHistogram* hist )
|
||||
.. ocv:pyoldfunction:: cv.CalcBackProject(image, backProject, hist)-> None
|
||||
.. ocv:pyoldfunction:: cv.CalcBackProject(image, back_project, hist) -> None
|
||||
|
||||
:param arrays: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
|
||||
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
|
||||
|
||||
:param narrays: Number of source arrays.
|
||||
:param nimages: Number of source images.
|
||||
|
||||
:param channels: The list of channels used to compute the back projection. The number of channels must match the histogram dimensionality. The first array channels are numerated from 0 to ``arrays[0].channels()-1`` , the second array channels are counted from ``arrays[0].channels()`` to ``arrays[0].channels() + arrays[1].channels()-1``, and so on.
|
||||
:param channels: The list of channels used to compute the back projection. The number of channels must match the histogram dimensionality. The first array channels are numerated from 0 to ``images[0].channels()-1`` , the second array channels are counted from ``images[0].channels()`` to ``images[0].channels() + images[1].channels()-1``, and so on.
|
||||
|
||||
:param hist: Input histogram that can be dense or sparse.
|
||||
|
||||
:param backProject: Destination back projection array that is a single-channel array of the same size and depth as ``arrays[0]`` .
|
||||
|
||||
:param backProject: Destination back projection array that is a single-channel array of the same size and depth as ``images[0]`` .
|
||||
|
||||
:param ranges: Array of arrays of the histogram bin boundaries in each dimension. See :ocv:func:`calcHist` .
|
||||
|
||||
|
||||
:param scale: Optional scale factor for the output back projection.
|
||||
|
||||
:param uniform: Flag indicating whether the histogram is uniform or not (see above).
|
||||
@@ -164,7 +164,7 @@ Compares two histograms.
|
||||
:param H1: First compared histogram.
|
||||
|
||||
:param H2: Second compared histogram of the same size as ``H1`` .
|
||||
|
||||
|
||||
:param method: Comparison method that could be one of the following:
|
||||
|
||||
* **CV_COMP_CORREL** Correlation
|
||||
@@ -225,8 +225,9 @@ Computes the "minimal work" distance between two weighted point configurations.
|
||||
|
||||
.. ocv:function:: float EMD( InputArray signature1, InputArray signature2, int distType, InputArray cost=noArray(), float* lowerBound=0, OutputArray flow=noArray() )
|
||||
|
||||
.. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distType, CvDistanceFunction distFunc=NULL, const CvArr* cost=NULL, CvArr* flow=NULL, float* lowerBound=NULL, void* userdata=NULL )
|
||||
.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distType, distFunc=None, cost=None, flow=None, lowerBound=None, userdata=None) -> float
|
||||
.. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distance_type, CvDistanceFunction distance_func=NULL, const CvArr* cost_matrix=NULL, CvArr* flow=NULL, float* lower_bound=NULL, void* userdata=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distance_type, distance_func=None, cost_matrix=None, flow=None, lower_bound=None, userdata=None) -> float
|
||||
|
||||
:param signature1: First signature, a :math:`\texttt{size1}\times \texttt{dims}+1` floating-point matrix. Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a single column (weights only) if the user-defined cost matrix is used.
|
||||
|
||||
@@ -234,19 +235,19 @@ Computes the "minimal work" distance between two weighted point configurations.
|
||||
|
||||
:param distType: Used metric. ``CV_DIST_L1, CV_DIST_L2`` , and ``CV_DIST_C`` stand for one of the standard metrics. ``CV_DIST_USER`` means that a pre-calculated cost matrix ``cost`` is used.
|
||||
|
||||
:param distFunc: Custom distance function supported by the old interface. ``CvDistanceFunction`` is defined as: ::
|
||||
|
||||
:param distance_func: Custom distance function supported by the old interface. ``CvDistanceFunction`` is defined as: ::
|
||||
|
||||
typedef float (CV_CDECL * CvDistanceFunction)( const float* a,
|
||||
const float* b, void* userdata );
|
||||
|
||||
|
||||
where ``a`` and ``b`` are point coordinates and ``userdata`` is the same as the last parameter.
|
||||
|
||||
|
||||
:param cost: User-defined :math:`\texttt{size1}\times \texttt{size2}` cost matrix. Also, if a cost matrix is used, lower boundary ``lowerBound`` cannot be calculated because it needs a metric function.
|
||||
|
||||
:param lowerBound: Optional input/output parameter: lower boundary of a distance between the two signatures that is a distance between mass centers. The lower boundary may not be calculated if the user-defined cost matrix is used, the total weights of point configurations are not equal, or if the signatures consist of weights only (the signature matrices have a single column). You **must** initialize ``*lowerBound`` . If the calculated distance between mass centers is greater or equal to ``*lowerBound`` (it means that the signatures are far enough), the function does not calculate EMD. In any case ``*lowerBound`` is set to the calculated distance between mass centers on return. Thus, if you want to calculate both distance between mass centers and EMD, ``*lowerBound`` should be set to 0.
|
||||
|
||||
:param flow: Resultant :math:`\texttt{size1} \times \texttt{size2}` flow matrix: :math:`\texttt{flow}_{i,j}` is a flow from :math:`i` -th point of ``signature1`` to :math:`j` -th point of ``signature2`` .
|
||||
|
||||
|
||||
:param userdata: Optional pointer directly passed to the custom distance function.
|
||||
|
||||
The function computes the earth mover distance and/or a lower boundary of the distance between the two weighted point configurations. One of the applications described in [RubnerSept98]_ is multi-dimensional histogram comparison for image retrieval. EMD is a transportation problem that is solved using some modification of a simplex algorithm, thus the complexity is exponential in the worst case, though, on average it is much faster. In the case of a real metric the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used to determine roughly whether the two signatures are far enough so that they cannot relate to the same object.
|
||||
@@ -301,20 +302,20 @@ Locates a template within an image by using a histogram comparison.
|
||||
|
||||
.. ocv:cfunction:: void cvCalcBackProjectPatch( IplImage** images, CvArr* dst, CvSize patch_size, CvHistogram* hist, int method, double factor )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patchSize, hist, method, factor)-> None
|
||||
|
||||
:param images: Source images (though, you may pass CvMat** as well).
|
||||
|
||||
:param dst: Destination image.
|
||||
|
||||
:param patch_size: Size of the patch slid though the source image.
|
||||
|
||||
:param hist: Histogram.
|
||||
|
||||
:param method: Comparison method passed to :ocv:cfunc:`CompareHist` (see the function description).
|
||||
|
||||
:param factor: Normalization factor for histograms that affects the normalization scale of the destination image. Pass 1 if not sure.
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patch_size, hist, method, factor)-> None
|
||||
|
||||
:param images: Source images (though, you may pass CvMat** as well).
|
||||
|
||||
:param dst: Destination image.
|
||||
|
||||
:param patch_size: Size of the patch slid though the source image.
|
||||
|
||||
:param hist: Histogram.
|
||||
|
||||
:param method: Comparison method passed to :ocv:cfunc:`CompareHist` (see the function description).
|
||||
|
||||
:param factor: Normalization factor for histograms that affects the normalization scale of the destination image. Pass 1 if not sure.
|
||||
|
||||
The function calculates the back projection by comparing histograms of the source image patches with the given histogram. The function is similar to :ocv:func:`matchTemplate`, but instead of comparing the raster patch with all its possible positions within the search window, the function ``CalcBackProjectPatch`` compares histograms. See the algorithm diagram below:
|
||||
|
||||
.. image:: pics/backprojectpatch.png
|
||||
@@ -324,23 +325,23 @@ CalcProbDensity
|
||||
---------------
|
||||
Divides one histogram by another.
|
||||
|
||||
.. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dsthist, double scale=255 )
|
||||
.. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dst_hist, double scale=255 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dst_hist, scale=255) -> None
|
||||
|
||||
:param hist1: First histogram (the divisor).
|
||||
|
||||
:param hist2: Second histogram.
|
||||
|
||||
:param dst_hist: Destination histogram.
|
||||
|
||||
:param scale: Scale factor for the destination histogram.
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dsthist, scale=255)-> None
|
||||
|
||||
:param hist1: First histogram (the divisor).
|
||||
|
||||
:param hist2: Second histogram.
|
||||
|
||||
:param dsthist: Destination histogram.
|
||||
|
||||
:param scale: Scale factor for the destination histogram.
|
||||
|
||||
The function calculates the object probability density from two histograms as:
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{disthist} (I)= \forkthree{0}{if $\texttt{hist1}(I)=0$}{\texttt{scale}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) > \texttt{hist1}(I)$}{\frac{\texttt{hist2}(I) \cdot \texttt{scale}}{\texttt{hist1}(I)}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) \le \texttt{hist1}(I)$}
|
||||
\texttt{disthist} (I)= \forkthree{0}{if $\texttt{hist1}(I)=0$}{\texttt{scale}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) > \texttt{hist1}(I)$}{\frac{\texttt{hist2}(I) \cdot \texttt{scale}}{\texttt{hist1}(I)}}{if $\texttt{hist1}(I) \ne 0$ and $\texttt{hist2}(I) \le \texttt{hist1}(I)$}
|
||||
|
||||
|
||||
ClearHist
|
||||
@@ -350,7 +351,7 @@ Clears the histogram.
|
||||
.. ocv:cfunction:: void cvClearHist( CvHistogram* hist )
|
||||
.. ocv:pyoldfunction:: cv.ClearHist(hist)-> None
|
||||
|
||||
:param hist: Histogram.
|
||||
:param hist: Histogram.
|
||||
|
||||
The function sets all of the histogram bins to 0 in case of a dense histogram and removes all histogram bins in case of a sparse array.
|
||||
|
||||
@@ -361,10 +362,10 @@ Copies a histogram.
|
||||
|
||||
.. ocv:cfunction:: void cvCopyHist( const CvHistogram* src, CvHistogram** dst )
|
||||
|
||||
:param src: Source histogram.
|
||||
|
||||
:param dst: Pointer to the destination histogram.
|
||||
|
||||
:param src: Source histogram.
|
||||
|
||||
:param dst: Pointer to the destination histogram.
|
||||
|
||||
The function makes a copy of the histogram. If the second histogram pointer ``*dst`` is NULL, a new histogram of the same size as ``src`` is created. Otherwise, both histograms must have equal types and sizes. Then the function copies the bin values of the source histogram to the destination histogram and sets the same bin value ranges as in ``src``.
|
||||
|
||||
.. _createhist:
|
||||
@@ -375,92 +376,54 @@ Creates a histogram.
|
||||
|
||||
.. ocv:cfunction:: CvHistogram* cvCreateHist( int dims, int* sizes, int type, float** ranges=NULL, int uniform=1 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges, uniform=1) -> hist
|
||||
.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges=None, uniform=1) -> hist
|
||||
|
||||
:param dims: Number of histogram dimensions.
|
||||
|
||||
:param sizes: Array of the histogram dimension sizes.
|
||||
|
||||
:param type: Histogram representation format. ``CV_HIST_ARRAY`` means that the histogram data is represented as a multi-dimensional dense array CvMatND. ``CV_HIST_SPARSE`` means that histogram data is represented as a multi-dimensional sparse array ``CvSparseMat``.
|
||||
|
||||
:param ranges: Array of ranges for the histogram bins. Its meaning depends on the ``uniform`` parameter value. The ranges are used when the histogram is calculated or backprojected to determine which histogram bin corresponds to which value/tuple of values from the input image(s).
|
||||
|
||||
:param dims: Number of histogram dimensions.
|
||||
|
||||
:param sizes: Array of the histogram dimension sizes.
|
||||
|
||||
:param type: Histogram representation format. ``CV_HIST_ARRAY`` means that the histogram data is represented as a multi-dimensional dense array CvMatND. ``CV_HIST_SPARSE`` means that histogram data is represented as a multi-dimensional sparse array ``CvSparseMat``.
|
||||
|
||||
:param ranges: Array of ranges for the histogram bins. Its meaning depends on the ``uniform`` parameter value. The ranges are used when the histogram is calculated or backprojected to determine which histogram bin corresponds to which value/tuple of values from the input image(s).
|
||||
|
||||
:param uniform: Uniformity flag. If not zero, the histogram has evenly
|
||||
spaced bins and for every :math:`0<=i<cDims` ``ranges[i]``
|
||||
spaced bins and for every :math:`0<=i<cDims` ``ranges[i]``
|
||||
is an array of two numbers: lower and upper boundaries for the i-th
|
||||
histogram dimension.
|
||||
The whole range [lower,upper] is then split
|
||||
into ``dims[i]`` equal parts to determine the ``i``-th input
|
||||
tuple value ranges for every histogram bin. And if ``uniform=0`` ,
|
||||
then the ``i``-th element of the ``ranges`` array contains ``dims[i]+1`` elements: :math:`\texttt{lower}_0, \texttt{upper}_0,
|
||||
then the ``i``-th element of the ``ranges`` array contains ``dims[i]+1`` elements: :math:`\texttt{lower}_0, \texttt{upper}_0,
|
||||
\texttt{lower}_1, \texttt{upper}_1 = \texttt{lower}_2,
|
||||
...
|
||||
\texttt{upper}_{dims[i]-1}`
|
||||
where :math:`\texttt{lower}_j` and :math:`\texttt{upper}_j`
|
||||
\texttt{upper}_{dims[i]-1}`
|
||||
where :math:`\texttt{lower}_j` and :math:`\texttt{upper}_j`
|
||||
are lower and upper
|
||||
boundaries of the ``i``-th input tuple value for the ``j``-th
|
||||
boundaries of the ``i``-th input tuple value for the ``j``-th
|
||||
bin, respectively. In either case, the input values that are beyond
|
||||
the specified range for a histogram bin are not counted by :ocv:cfunc:`CalcHist` and filled with 0 by :ocv:cfunc:`CalcBackProject`.
|
||||
|
||||
|
||||
The function creates a histogram of the specified size and returns a pointer to the created histogram. If the array ``ranges`` is 0, the histogram bin ranges must be specified later via the function :ocv:cfunc:`SetHistBinRanges`. Though :ocv:cfunc:`CalcHist` and :ocv:cfunc:`CalcBackProject` may process 8-bit images without setting bin ranges, they assume they are equally spaced in 0 to 255 bins.
|
||||
|
||||
|
||||
GetHistValue\_?D
|
||||
----------------
|
||||
Returns a pointer to the histogram bin.
|
||||
|
||||
.. ocv:cfunction:: float cvGetHistValue_1D(CvHistogram hist, int idx0)
|
||||
|
||||
.. ocv:cfunction:: float cvGetHistValue_2D(CvHistogram hist, int idx0, int idx1)
|
||||
|
||||
.. ocv:cfunction:: float cvGetHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
|
||||
|
||||
.. ocv:cfunction:: float cvGetHistValue_nD(CvHistogram hist, int idx)
|
||||
|
||||
:param hist: Histogram.
|
||||
|
||||
:param idx0: 0-th index.
|
||||
|
||||
:param idx1: 1-st index.
|
||||
|
||||
:param idx2: 2-nd index.
|
||||
|
||||
:param idx: Array of indices.
|
||||
|
||||
::
|
||||
|
||||
#define cvGetHistValue_1D( hist, idx0 )
|
||||
((float*)(cvPtr1D( (hist)->bins, (idx0), 0 ))
|
||||
#define cvGetHistValue_2D( hist, idx0, idx1 )
|
||||
((float*)(cvPtr2D( (hist)->bins, (idx0), (idx1), 0 )))
|
||||
#define cvGetHistValue_3D( hist, idx0, idx1, idx2 )
|
||||
((float*)(cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0 )))
|
||||
#define cvGetHistValue_nD( hist, idx )
|
||||
((float*)(cvPtrND( (hist)->bins, (idx), 0 )))
|
||||
|
||||
..
|
||||
|
||||
The macros ``GetHistValue`` return a pointer to the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function creates a new bin and sets it to 0, unless it exists already.
|
||||
|
||||
|
||||
GetMinMaxHistValue
|
||||
------------------
|
||||
Finds the minimum and maximum histogram bins.
|
||||
|
||||
.. ocv:cfunction:: void cvGetMinMaxHistValue( const CvHistogram* hist, float* min_value, float* max_value, int* min_idx=NULL, int* max_idx=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (minValue, maxValue, minIdx, maxIdx)
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (min_value, max_value, min_idx, max_idx)
|
||||
|
||||
:param hist: Histogram.
|
||||
|
||||
:param min_value: Pointer to the minimum value of the histogram.
|
||||
|
||||
:param max_value: Pointer to the maximum value of the histogram.
|
||||
|
||||
:param min_idx: Pointer to the array of coordinates for the minimum.
|
||||
|
||||
:param max_idx: Pointer to the array of coordinates for the maximum.
|
||||
|
||||
|
||||
:param min_value: Pointer to the minimum value of the histogram.
|
||||
|
||||
:param max_value: Pointer to the maximum value of the histogram.
|
||||
|
||||
:param min_idx: Pointer to the array of coordinates for the minimum.
|
||||
|
||||
:param max_idx: Pointer to the array of coordinates for the maximum.
|
||||
|
||||
The function finds the minimum and maximum histogram bins and their positions. All of output arguments are optional. Among several extremas with the same value the ones with the minimum index (in the lexicographical order) are returned. In case of several maximums or minimums, the earliest in the lexicographical order (extrema locations) is returned.
|
||||
|
||||
|
||||
@@ -469,19 +432,19 @@ MakeHistHeaderForArray
|
||||
Makes a histogram out of an array.
|
||||
|
||||
.. ocv:cfunction:: CvHistogram* cvMakeHistHeaderForArray( int dims, int* sizes, CvHistogram* hist, float* data, float** ranges=NULL, int uniform=1 )
|
||||
|
||||
:param dims: Number of the histogram dimensions.
|
||||
|
||||
:param sizes: Array of the histogram dimension sizes.
|
||||
|
||||
:param hist: Histogram header initialized by the function.
|
||||
|
||||
:param data: Array used to store histogram bins.
|
||||
|
||||
|
||||
:param dims: Number of the histogram dimensions.
|
||||
|
||||
:param sizes: Array of the histogram dimension sizes.
|
||||
|
||||
:param hist: Histogram header initialized by the function.
|
||||
|
||||
:param data: Array used to store histogram bins.
|
||||
|
||||
:param ranges: Histogram bin ranges. See :ocv:cfunc:`CreateHist` for details.
|
||||
|
||||
|
||||
:param uniform: Uniformity flag. See :ocv:cfunc:`CreateHist` for details.
|
||||
|
||||
|
||||
The function initializes the histogram, whose header and bins are allocated by the user. :ocv:cfunc:`ReleaseHist` does not need to be called afterwards. Only dense histograms can be initialized this way. The function returns ``hist``.
|
||||
|
||||
NormalizeHist
|
||||
@@ -490,48 +453,22 @@ Normalizes the histogram.
|
||||
|
||||
.. ocv:cfunction:: void cvNormalizeHist( CvHistogram* hist, double factor )
|
||||
.. ocv:pyoldfunction:: cv.NormalizeHist(hist, factor)-> None
|
||||
|
||||
:param hist: Pointer to the histogram.
|
||||
|
||||
:param factor: Normalization factor.
|
||||
|
||||
|
||||
:param hist: Pointer to the histogram.
|
||||
|
||||
:param factor: Normalization factor.
|
||||
|
||||
The function normalizes the histogram bins by scaling them so that the sum of the bins becomes equal to ``factor``.
|
||||
|
||||
|
||||
QueryHistValue*D
|
||||
----------------
|
||||
Queries the value of the histogram bin.
|
||||
|
||||
.. ocv:cfunction:: float QueryHistValue_1D(CvHistogram hist, int idx0)
|
||||
.. ocv:cfunction:: float QueryHistValue_2D(CvHistogram hist, int idx0, int idx1)
|
||||
.. ocv:cfunction:: float QueryHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
|
||||
.. ocv:cfunction:: float QueryHistValue_nD(CvHistogram hist, const int* idx)
|
||||
|
||||
.. ocv:pyoldfunction:: cv.QueryHistValue_1D(hist, idx0) -> float
|
||||
.. ocv:pyoldfunction:: cv.QueryHistValue_2D(hist, idx0, idx1) -> float
|
||||
.. ocv:pyoldfunction:: cv.QueryHistValue_3D(hist, idx0, idx1, idx2) -> float
|
||||
.. ocv:pyoldfunction:: cv.QueryHistValueND(hist, idx) -> float
|
||||
|
||||
:param hist: Histogram.
|
||||
|
||||
:param idx0: 0-th index.
|
||||
|
||||
:param idx1: 1-st index.
|
||||
|
||||
:param idx2: 2-nd index.
|
||||
|
||||
:param idx: Array of indices.
|
||||
|
||||
The macros return the value of the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function returns 0. If the bin is not present in the histogram, no new bin is created.
|
||||
|
||||
ReleaseHist
|
||||
-----------
|
||||
Releases the histogram.
|
||||
|
||||
.. ocv:cfunction:: void cvReleaseHist( CvHistogram** hist )
|
||||
|
||||
:param hist: Double pointer to the released histogram.
|
||||
|
||||
|
||||
:param hist: Double pointer to the released histogram.
|
||||
|
||||
The function releases the histogram (header and the data). The pointer to the histogram is cleared by the function. If ``*hist`` pointer is already ``NULL``, the function does nothing.
|
||||
|
||||
|
||||
@@ -541,12 +478,12 @@ Sets the bounds of the histogram bins.
|
||||
|
||||
.. ocv:cfunction:: void cvSetHistBinRanges( CvHistogram* hist, float** ranges, int uniform=1 )
|
||||
|
||||
:param hist: Histogram.
|
||||
|
||||
:param hist: Histogram.
|
||||
|
||||
:param ranges: Array of bin ranges arrays. See :ocv:cfunc:`CreateHist` for details.
|
||||
|
||||
:param uniform: Uniformity flag. See :ocv:cfunc:`CreateHist` for details.
|
||||
|
||||
|
||||
:param uniform: Uniformity flag. See :ocv:cfunc:`CreateHist` for details.
|
||||
|
||||
This is a standalone function for setting bin ranges in the histogram. For a more detailed description of the parameters ``ranges`` and ``uniform``, see the :ocv:cfunc:`CalcHist` function that can initialize the ranges as well. Ranges for the histogram bins must be set before the histogram is calculated or the backproject of the histogram is calculated.
|
||||
|
||||
|
||||
@@ -555,37 +492,13 @@ ThreshHist
|
||||
Thresholds the histogram.
|
||||
|
||||
.. ocv:cfunction:: void cvThreshHist( CvHistogram* hist, double threshold )
|
||||
.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold)-> None
|
||||
|
||||
:param hist: Pointer to the histogram.
|
||||
|
||||
:param threshold: Threshold level.
|
||||
|
||||
.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold) -> None
|
||||
|
||||
:param hist: Pointer to the histogram.
|
||||
|
||||
:param threshold: Threshold level.
|
||||
|
||||
The function clears histogram bins that are below the specified threshold.
|
||||
|
||||
|
||||
CalcPGH
|
||||
-------
|
||||
Calculates a pair-wise geometrical histogram for a contour.
|
||||
|
||||
.. ocv:cfunction:: void cvCalcPGH( const CvSeq* contour, CvHistogram* hist )
|
||||
.. ocv:pyoldfunction:: cv.CalcPGH(contour, hist)-> None
|
||||
|
||||
:param contour: Input contour. Currently, only integer point coordinates are allowed.
|
||||
|
||||
:param hist: Calculated histogram. It must be two-dimensional.
|
||||
|
||||
The function calculates a 2D pair-wise geometrical histogram (PGH), described in [Iivarinen97]_ for the contour. The algorithm considers every pair of contour
|
||||
edges. The angle between the edges and the minimum/maximum distances
|
||||
are determined for every pair. To do this, each of the edges in turn
|
||||
is taken as the base, while the function loops through all the other
|
||||
edges. When the base edge and any other edge are considered, the minimum
|
||||
and maximum distances from the points on the non-base edge and line of
|
||||
the base edge are selected. The angle between the edges defines the row
|
||||
of the histogram in which all the bins that correspond to the distance
|
||||
between the calculated minimum and maximum distances are incremented
|
||||
(that is, the histogram is transposed relatively to the definition in the original paper). The histogram can be used for contour matching.
|
||||
|
||||
.. [RubnerSept98] Y. Rubner. C. Tomasi, L.J. Guibas. *The Earth Mover’s Distance as a Metric for Image Retrieval*. Technical Report STAN-CS-TN-98-86, Department of Computer Science, Stanford University, September 1998.
|
||||
|
||||
.. [Iivarinen97] Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. *Comparison of Combined Shape Descriptors for Irregular Objects*, 8th British Machine Vision Conference, BMVC'97. http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html
|
||||
|
||||
@@ -12,19 +12,20 @@ Applies an adaptive threshold to an array.
|
||||
|
||||
.. ocv:pyfunction:: cv2.adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double maxValue, int adaptiveMethod=CV_ADAPTIVE_THRESH_MEAN_C, int thresholdType=CV_THRESH_BINARY, int blockSize=3, double param1=5 )
|
||||
.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptiveMethod=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None
|
||||
.. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, int adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, int threshold_type=CV_THRESH_BINARY, int block_size=3, double param1=5 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None
|
||||
|
||||
:param src: Source 8-bit single-channel image.
|
||||
|
||||
:param dst: Destination image of the same size and the same type as ``src`` .
|
||||
|
||||
|
||||
:param maxValue: Non-zero value assigned to the pixels for which the condition is satisfied. See the details below.
|
||||
|
||||
:param adaptiveMethod: Adaptive thresholding algorithm to use, ``ADAPTIVE_THRESH_MEAN_C`` or ``ADAPTIVE_THRESH_GAUSSIAN_C`` . See the details below.
|
||||
|
||||
:param thresholdType: Thresholding type that must be either ``THRESH_BINARY`` or ``THRESH_BINARY_INV`` .
|
||||
|
||||
|
||||
:param blockSize: Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
|
||||
|
||||
:param C: Constant subtracted from the mean or weighted mean (see the details below). Normally, it is positive but may be zero or negative as well.
|
||||
@@ -83,7 +84,7 @@ Converts an image from one color space to another.
|
||||
:param src: Source image: 8-bit unsigned, 16-bit unsigned ( ``CV_16UC...`` ), or single-precision floating-point.
|
||||
|
||||
:param dst: Destination image of the same size and depth as ``src`` .
|
||||
|
||||
|
||||
:param code: Color space conversion code. See the description below.
|
||||
|
||||
:param dstCn: Number of channels in the destination image. If the parameter is 0, the number of the channels is derived automatically from ``src`` and ``code`` .
|
||||
@@ -98,7 +99,7 @@ The conventional ranges for R, G, and B channel values are:
|
||||
0 to 255 for ``CV_8U`` images
|
||||
|
||||
*
|
||||
0 to 65535 for ``CV_16U`` images
|
||||
0 to 65535 for ``CV_16U`` images
|
||||
|
||||
*
|
||||
0 to 1 for ``CV_32F`` images
|
||||
@@ -414,22 +415,22 @@ Calculates the distance to the closest zero pixel for each pixel of the source i
|
||||
|
||||
.. ocv:function:: void distanceTransform( InputArray src, OutputArray dst, OutputArray labels, int distanceType, int maskSize, int labelType=DIST_LABEL_CCOMP )
|
||||
|
||||
.. ocv:pyfunction:: cv2.distanceTransform(src, distanceType, maskSize[, dst[, labels[, labelType=cv2.DIST_LABEL_CCOMP]]]) -> dst, labels
|
||||
.. ocv:pyfunction:: cv2.distanceTransform(src, distanceType, maskSize[, dst]) -> dst
|
||||
|
||||
.. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distanceType=CV_DIST_L2, int maskSize=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP )
|
||||
.. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distance_type=CV_DIST_L2, int mask_size=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distanceType=CV_DIST_L2, maskSize=3, mask=None, labels=None)-> None
|
||||
.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distance_type=CV_DIST_L2, mask_size=3, mask=None, labels=None) -> None
|
||||
|
||||
:param src: 8-bit, single-channel (binary) source image.
|
||||
|
||||
:param dst: Output image with calculated distances. It is a 32-bit floating-point, single-channel image of the same size as ``src`` .
|
||||
|
||||
|
||||
:param distanceType: Type of distance. It can be ``CV_DIST_L1, CV_DIST_L2`` , or ``CV_DIST_C`` .
|
||||
|
||||
|
||||
:param maskSize: Size of the distance transform mask. It can be 3, 5, or ``CV_DIST_MASK_PRECISE`` (the latter option is only supported by the first function). In case of the ``CV_DIST_L1`` or ``CV_DIST_C`` distance type, the parameter is forced to 3 because a :math:`3\times 3` mask gives the same result as :math:`5\times 5` or any larger aperture.
|
||||
|
||||
:param labels: Optional output 2D array of labels (the discrete Voronoi diagram). It has the type ``CV_32SC1`` and the same size as ``src`` . See the details below.
|
||||
|
||||
|
||||
:param labelType: Type of the label array to build. If ``labelType==DIST_LABEL_CCOMP`` then each connected component of zeros in ``src`` (as well as all the non-zero pixels closest to the connected component) will be assigned the same label. If ``labelType==DIST_LABEL_PIXEL`` then each zero pixel (and all the non-zero pixels closest to it) gets its own label.
|
||||
|
||||
The functions ``distanceTransform`` calculate the approximate or precise
|
||||
@@ -483,22 +484,22 @@ floodFill
|
||||
-------------
|
||||
Fills a connected component with the given color.
|
||||
|
||||
.. ocv:function:: int floodFill( InputOutputArray image, Point seed, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
|
||||
.. ocv:function:: int floodFill( InputOutputArray image, Point seedPoint, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
|
||||
|
||||
.. ocv:function:: int floodFill( InputOutputArray image, InputOutputArray mask, Point seed, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
|
||||
.. ocv:function:: int floodFill( InputOutputArray image, InputOutputArray mask, Point seedPoint, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
|
||||
|
||||
.. ocv:pyfunction:: cv2.floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]]) -> retval, rect
|
||||
|
||||
.. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seedPoint, CvScalar newVal, CvScalar loDiff=cvScalarAll(0), CvScalar upDiff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.FloodFill(image, seedPoint, newVal, loDiff=(0, 0, 0, 0), upDiff=(0, 0, 0, 0), flags=4, mask=None)-> comp
|
||||
.. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seed_point, CvScalar new_val, CvScalar lo_diff=cvScalarAll(0), CvScalar up_diff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.FloodFill(image, seed_point, new_val, lo_diff=(0, 0, 0, 0), up_diff=(0, 0, 0, 0), flags=4, mask=None)-> comp
|
||||
|
||||
:param image: Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the ``FLOODFILL_MASK_ONLY`` flag is set in the second variant of the function. See the details below.
|
||||
|
||||
:param mask: (For the second function only) Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function uses and updates the mask, so you take responsibility of initializing the ``mask`` content. Flood-filling cannot go across non-zero pixels in the mask. For example, an edge detector output can be used as a mask to stop filling at edges. It is possible to use the same mask in multiple calls to the function to make sure the filled area does not overlap.
|
||||
:param mask: (For the second function only) Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function uses and updates the mask, so you take responsibility of initializing the ``mask`` content. Flood-filling cannot go across non-zero pixels in the mask. For example, an edge detector output can be used as a mask to stop filling at edges. It is possible to use the same mask in multiple calls to the function to make sure the filled area does not overlap.
|
||||
|
||||
.. note:: Since the mask is larger than the filled image, a pixel :math:`(x, y)` in ``image`` corresponds to the pixel :math:`(x+1, y+1)` in the ``mask`` .
|
||||
|
||||
:param seed: Starting point.
|
||||
:param seedPoint: Starting point.
|
||||
|
||||
:param newVal: New value of the repainted domain pixels.
|
||||
|
||||
@@ -517,18 +518,18 @@ Fills a connected component with the given color.
|
||||
The functions ``floodFill`` fill a connected component starting from the seed point with the specified color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The pixel at
|
||||
:math:`(x,y)` is considered to belong to the repainted domain if:
|
||||
|
||||
*
|
||||
*
|
||||
.. math::
|
||||
|
||||
\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}
|
||||
|
||||
in case of a grayscale image and floating range
|
||||
|
||||
*
|
||||
*
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)+ \texttt{upDiff}
|
||||
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}
|
||||
|
||||
in case of a grayscale image and fixed range
|
||||
|
||||
@@ -541,7 +542,7 @@ The functions ``floodFill`` fill a connected component starting from the seed po
|
||||
.. math::
|
||||
|
||||
\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g
|
||||
|
||||
|
||||
and
|
||||
|
||||
.. math::
|
||||
@@ -555,17 +556,17 @@ The functions ``floodFill`` fill a connected component starting from the seed po
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_r+ \texttt{upDiff} _r,
|
||||
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_g+ \texttt{upDiff} _g
|
||||
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g
|
||||
|
||||
and
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_b+ \texttt{upDiff} _b
|
||||
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b
|
||||
|
||||
in case of a color image and fixed range
|
||||
|
||||
@@ -588,11 +589,11 @@ integral
|
||||
------------
|
||||
Calculates the integral of an image.
|
||||
|
||||
.. ocv:function:: void integral( InputArray image, OutputArray sum, int sdepth=-1 )
|
||||
.. ocv:function:: void integral( InputArray src, OutputArray sum, int sdepth=-1 )
|
||||
|
||||
.. ocv:function:: void integral( InputArray image, OutputArray sum, OutputArray sqsum, int sdepth=-1 )
|
||||
.. ocv:function:: void integral( InputArray src, OutputArray sum, OutputArray sqsum, int sdepth=-1 )
|
||||
|
||||
.. ocv:function:: void integral( InputArray image, OutputArray sum, OutputArray sqsum, OutputArray tilted, int sdepth=-1 )
|
||||
.. ocv:function:: void integral( InputArray src, OutputArray sum, OutputArray sqsum, OutputArray tilted, int sdepth=-1 )
|
||||
|
||||
.. ocv:pyfunction:: cv2.integral(src[, sum[, sdepth]]) -> sum
|
||||
|
||||
@@ -600,7 +601,8 @@ Calculates the integral of an image.
|
||||
|
||||
.. ocv:pyfunction:: cv2.integral3(src[, sum[, sqsum[, tilted[, sdepth]]]]) -> sum, sqsum, tilted
|
||||
|
||||
.. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tiltedSum=NULL )
|
||||
.. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tilted_sum=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Integral(image, sum, sqsum=None, tiltedSum=None)-> None
|
||||
|
||||
:param image: Source image as :math:`W \times H` , 8-bit or floating-point (32f or 64f).
|
||||
@@ -610,7 +612,7 @@ Calculates the integral of an image.
|
||||
:param sqsum: Integral image for squared pixel values. It is :math:`(W+1)\times (H+1)`, double-precision floating-point (64f) array.
|
||||
|
||||
:param tilted: Integral for the image rotated by 45 degrees. It is :math:`(W+1)\times (H+1)` array with the same data type as ``sum``.
|
||||
|
||||
|
||||
:param sdepth: Desired depth of the integral and the tilted integral images, ``CV_32S``, ``CV_32F``, or ``CV_64F``.
|
||||
|
||||
The functions calculate one or more integral images for the source image as follows:
|
||||
@@ -647,22 +649,23 @@ threshold
|
||||
-------------
|
||||
Applies a fixed-level threshold to each array element.
|
||||
|
||||
.. ocv:function:: double threshold( InputArray src, OutputArray dst, double thresh, double maxVal, int thresholdType )
|
||||
.. ocv:function:: double threshold( InputArray src, OutputArray dst, double thresh, double maxval, int type )
|
||||
|
||||
.. ocv:pyfunction:: cv2.threshold(src, thresh, maxval, type[, dst]) -> retval, dst
|
||||
|
||||
.. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double maxValue, int thresholdType )
|
||||
.. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double max_value, int threshold_type )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Threshold(src, dst, threshold, maxValue, thresholdType)-> None
|
||||
|
||||
:param src: Source array (single-channel, 8-bit of 32-bit floating point).
|
||||
|
||||
:param dst: Destination array of the same size and type as ``src`` .
|
||||
|
||||
|
||||
:param thresh: Threshold value.
|
||||
|
||||
:param maxVal: Maximum value to use with the ``THRESH_BINARY`` and ``THRESH_BINARY_INV`` thresholding types.
|
||||
:param maxval: Maximum value to use with the ``THRESH_BINARY`` and ``THRESH_BINARY_INV`` thresholding types.
|
||||
|
||||
:param thresholdType: Thresholding type (see the details below).
|
||||
:param type: Thresholding type (see the details below).
|
||||
|
||||
The function applies fixed-level thresholding
|
||||
to a single-channel array. The function is typically used to get a
|
||||
@@ -670,19 +673,19 @@ bi-level (binary) image out of a grayscale image (
|
||||
:ocv:func:`compare` could
|
||||
be also used for this purpose) or for removing a noise, that is, filtering
|
||||
out pixels with too small or too large values. There are several
|
||||
types of thresholding supported by the function. They are determined by ``thresholdType`` :
|
||||
types of thresholding supported by the function. They are determined by ``type`` :
|
||||
|
||||
* **THRESH_BINARY**
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{dst} (x,y) = \fork{\texttt{maxVal}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
|
||||
\texttt{dst} (x,y) = \fork{\texttt{maxval}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
|
||||
|
||||
* **THRESH_BINARY_INV**
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxVal}}{otherwise}
|
||||
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxval}}{otherwise}
|
||||
|
||||
* **THRESH_TRUNC**
|
||||
|
||||
@@ -706,7 +709,7 @@ Also, the special value ``THRESH_OTSU`` may be combined with
|
||||
one of the above values. In this case, the function determines the optimal threshold
|
||||
value using the Otsu's algorithm and uses it instead of the specified ``thresh`` .
|
||||
The function returns the computed threshold value.
|
||||
Currently, the Otsu's method is implemented only for 8-bit images.
|
||||
Currently, the Otsu's method is implemented only for 8-bit images.
|
||||
|
||||
|
||||
.. image:: pics/threshold.png
|
||||
@@ -748,11 +751,11 @@ grabCut
|
||||
-------
|
||||
Runs the GrabCut algorithm.
|
||||
|
||||
.. ocv:function:: void grabCut(InputArray image, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, int mode )
|
||||
.. ocv:function:: void grabCut( InputArray img, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, int mode=GC_EVAL )
|
||||
|
||||
.. ocv:pyfunction:: cv2.grabCut(img, mask, rect, bgdModel, fgdModel, iterCount[, mode]) -> None
|
||||
|
||||
:param image: Input 8-bit 3-channel image.
|
||||
:param img: Input 8-bit 3-channel image.
|
||||
|
||||
:param mask: Input/output 8-bit single-channel mask. The mask is initialized by the function when ``mode`` is set to ``GC_INIT_WITH_RECT``. Its elements may have one of following values:
|
||||
|
||||
@@ -765,13 +768,13 @@ Runs the GrabCut algorithm.
|
||||
* **GC_PR_BGD** defines a possible foreground pixel.
|
||||
|
||||
:param rect: ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". The parameter is only used when ``mode==GC_INIT_WITH_RECT`` .
|
||||
|
||||
|
||||
:param bgdModel: Temporary array for the background model. Do not modify it while you are processing the same image.
|
||||
|
||||
|
||||
:param fgdModel: Temporary arrays for the foreground model. Do not modify it while you are processing the same image.
|
||||
|
||||
:param iterCount: Number of iterations the algorithm should make before returning the result. Note that the result can be refined with further calls with ``mode==GC_INIT_WITH_MASK`` or ``mode==GC_EVAL`` .
|
||||
|
||||
|
||||
:param mode: Operation mode that could be one of the following:
|
||||
|
||||
* **GC_INIT_WITH_RECT** The function initializes the state and the mask using the provided rectangle. After that it runs ``iterCount`` iterations of the algorithm.
|
||||
|
||||
@@ -11,8 +11,9 @@ Adds an image to the accumulator.
|
||||
|
||||
.. ocv:pyfunction:: cv2.accumulate(src, dst[, mask]) -> None
|
||||
|
||||
.. ocv:cfunction:: void cvAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.Acc(src, dst, mask=None)-> None
|
||||
.. ocv:cfunction:: void cvAcc( const CvArr* image, CvArr* sum, const CvArr* mask=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Acc(image, sum, mask=None) -> None
|
||||
|
||||
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
|
||||
|
||||
@@ -46,8 +47,9 @@ Adds the square of a source image to the accumulator.
|
||||
|
||||
.. ocv:pyfunction:: cv2.accumulateSquare(src, dst[, mask]) -> None
|
||||
|
||||
.. ocv:cfunction:: void cvSquareAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.SquareAcc(src, dst, mask=None)-> None
|
||||
.. ocv:cfunction:: void cvSquareAcc( const CvArr* image, CvArr* sqsum, const CvArr* mask=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.SquareAcc(image, sqsum, mask=None) -> None
|
||||
|
||||
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
|
||||
|
||||
@@ -79,13 +81,14 @@ Adds the per-element product of two input images to the accumulator.
|
||||
|
||||
.. ocv:pyfunction:: cv2.accumulateProduct(src1, src2, dst[, mask]) -> None
|
||||
|
||||
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.MultiplyAcc(src1, src2, dst, mask=None)-> None
|
||||
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, const CvArr* mask=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.MultiplyAcc(image1, image2, acc, mask=None)-> None
|
||||
|
||||
:param src1: First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
|
||||
|
||||
:param src2: Second input image of the same type and the same size as ``src1`` .
|
||||
|
||||
|
||||
:param dst: Accumulator with the same number of channels as input images, 32-bit or 64-bit floating-point.
|
||||
|
||||
:param mask: Optional operation mask.
|
||||
@@ -114,8 +117,8 @@ Updates a running average.
|
||||
|
||||
.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> None
|
||||
|
||||
.. ocv:cfunction:: void cvRunningAvg( const CvArr* src, CvArr* dst, double alpha, const CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.RunningAvg(src, dst, alpha, mask=None)-> None
|
||||
.. ocv:cfunction:: void cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, const CvArr* mask=NULL )
|
||||
.. ocv:pyoldfunction:: cv.RunningAvg(image, acc, alpha, mask=None)-> None
|
||||
|
||||
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
|
||||
|
||||
@@ -138,7 +141,7 @@ The function supports multi-channel images. Each channel is processed independen
|
||||
|
||||
:ocv:func:`accumulate`,
|
||||
:ocv:func:`accumulateSquare`,
|
||||
:ocv:func:`accumulateProduct`
|
||||
:ocv:func:`accumulateProduct`
|
||||
|
||||
|
||||
|
||||
@@ -206,7 +209,7 @@ This function computes a Hanning window coefficients in two dimensions. See http
|
||||
|
||||
An example is shown below: ::
|
||||
|
||||
// create hanning window of size 100x100 and type CV_32F
|
||||
// create hanning window of size 100x100 and type CV_32F
|
||||
Mat hann;
|
||||
createHanningWindow(hann, Size(100, 100), CV_32F);
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ matchTemplate
|
||||
-----------------
|
||||
Compares a template against overlapped image regions.
|
||||
|
||||
.. ocv:function:: void matchTemplate( InputArray image, InputArray temp, OutputArray result, int method )
|
||||
.. ocv:function:: void matchTemplate( InputArray image, InputArray templ, OutputArray result, int method )
|
||||
|
||||
.. ocv:pyfunction:: cv2.matchTemplate(image, templ, method[, result]) -> result
|
||||
|
||||
@@ -19,7 +19,7 @@ Compares a template against overlapped image regions.
|
||||
:param templ: Searched template. It must be not greater than the source image and have the same data type.
|
||||
|
||||
:param result: Map of comparison results. It must be single-channel 32-bit floating-point. If ``image`` is :math:`W \times H` and ``templ`` is :math:`w \times h` , then ``result`` is :math:`(W-w+1) \times (H-h+1)` .
|
||||
|
||||
|
||||
:param method: Parameter specifying the comparison method (see below).
|
||||
|
||||
The function slides through ``image`` , compares the
|
||||
|
||||
@@ -11,13 +11,14 @@ Calculates all of the moments up to the third order of a polygon or rasterized s
|
||||
|
||||
.. ocv:pyfunction:: cv2.moments(array[, binaryImage]) -> retval
|
||||
|
||||
.. ocv:cfunction:: void cvMoments( const CvArr* array, CvMoments* moments, int binary=0 )
|
||||
.. ocv:pyoldfunction:: cv.Moments(array, binary=0) -> moments
|
||||
.. ocv:cfunction:: void cvMoments( const CvArr* arr, CvMoments* moments, int binary=0 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Moments(arr, binary=0) -> moments
|
||||
|
||||
:param array: Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( :math:`1 \times N` or :math:`N \times 1` ) of 2D points (``Point`` or ``Point2f`` ).
|
||||
|
||||
:param binaryImage: If it is true, all non-zero image pixels are treated as 1's. The parameter is used for images only.
|
||||
|
||||
|
||||
:param moments: Output moments.
|
||||
|
||||
The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The results are returned in the structure ``Moments`` defined as: ::
|
||||
@@ -30,7 +31,7 @@ The function computes moments, up to the 3rd order, of a vector shape or a raste
|
||||
double m02, double m30, double m21, double m12, double m03 );
|
||||
Moments( const CvMoments& moments );
|
||||
operator CvMoments() const;
|
||||
|
||||
|
||||
// spatial moments
|
||||
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
|
||||
// central moments
|
||||
@@ -69,7 +70,7 @@ The normalized central moments
|
||||
.. note::
|
||||
|
||||
:math:`\texttt{mu}_{00}=\texttt{m}_{00}`,
|
||||
:math:`\texttt{nu}_{00}=1`
|
||||
:math:`\texttt{nu}_{00}=1`
|
||||
:math:`\texttt{nu}_{10}=\texttt{mu}_{10}=\texttt{mu}_{01}=\texttt{mu}_{10}=0` , hence the values are not stored.
|
||||
|
||||
The moments of a contour are defined in the same way but computed using the Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to a limited raster resolution, the moments computed for a contour are slightly different from the moments computed for the same rasterized contour.
|
||||
@@ -89,11 +90,13 @@ HuMoments
|
||||
-------------
|
||||
Calculates seven Hu invariants.
|
||||
|
||||
.. ocv:function:: void HuMoments( const Moments& moments, double* hu )
|
||||
.. ocv:function:: void HuMoments( const Moments& m, OutputArray hu )
|
||||
|
||||
.. ocv:pyfunction:: cv2.HuMoments(m) -> hu
|
||||
.. ocv:function:: void HuMoments( const Moments& moments, double hu[7] )
|
||||
|
||||
.. ocv:cfunction:: void cvGetHuMoments( const CvMoments* moments, CvHuMoments* hu )
|
||||
.. ocv:pyfunction:: cv2.HuMoments(m[, hu]) -> hu
|
||||
|
||||
.. ocv:cfunction:: void cvGetHuMoments( CvMoments* moments, CvHuMoments* hu_moments )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.GetHuMoments(moments) -> hu
|
||||
|
||||
@@ -126,9 +129,9 @@ Finds contours in a binary image.
|
||||
|
||||
.. ocv:pyfunction:: cv2.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy
|
||||
|
||||
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** firstContour, int headerSize=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0, 0) )
|
||||
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0,0) )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> cvseq
|
||||
.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> contours
|
||||
|
||||
:param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours.
|
||||
|
||||
@@ -163,87 +166,6 @@ The function retrieves contours from the binary image using the algorithm
|
||||
|
||||
.. note:: If you use the new Python interface then the ``CV_`` prefix has to be omitted in contour retrieval mode and contour approximation method parameters (for example, use ``cv2.RETR_LIST`` and ``cv2.CHAIN_APPROX_NONE`` parameters). If you use the old Python interface then these parameters have the ``CV_`` prefix (for example, use ``cv.CV_RETR_LIST`` and ``cv.CV_CHAIN_APPROX_NONE``).
|
||||
|
||||
drawContours
|
||||
----------------
|
||||
Draws contours outlines or filled contours.
|
||||
|
||||
.. ocv:function:: void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Point offset=Point() )
|
||||
|
||||
.. ocv:pyfunction:: cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> None
|
||||
|
||||
.. ocv:cfunction:: void cvDrawContours( CvArr *img, CvSeq* contour, CvScalar externalColor, CvScalar holeColor, int maxLevel, int thickness=1, int lineType=8 )
|
||||
.. ocv:pyoldfunction:: cv.DrawContours(img, contour, externalColor, holeColor, maxLevel, thickness=1, lineType=8, offset=(0, 0))-> None
|
||||
|
||||
:param image: Destination image.
|
||||
|
||||
:param contours: All the input contours. Each contour is stored as a point vector.
|
||||
|
||||
:param contourIdx: Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
|
||||
|
||||
:param color: Color of the contours.
|
||||
|
||||
:param thickness: Thickness of lines the contours are drawn with. If it is negative (for example, ``thickness=CV_FILLED`` ), the contour interiors are
|
||||
drawn.
|
||||
|
||||
:param lineType: Line connectivity. See :ocv:func:`line` for details.
|
||||
|
||||
:param hierarchy: Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see ``maxLevel`` ).
|
||||
|
||||
:param maxLevel: Maximal level for drawn contours. If it is 0, only
|
||||
the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is ``hierarchy`` available.
|
||||
|
||||
:param offset: Optional contour shift parameter. Shift all the drawn contours by the specified :math:`\texttt{offset}=(dx,dy)` .
|
||||
|
||||
:param contour: Pointer to the first contour.
|
||||
|
||||
:param externalColor: Color of external contours.
|
||||
|
||||
:param holeColor: Color of internal contours (holes).
|
||||
|
||||
The function draws contour outlines in the image if
|
||||
:math:`\texttt{thickness} \ge 0` or fills the area bounded by the contours if
|
||||
:math:`\texttt{thickness}<0` . The example below shows how to retrieve connected components from the binary image and label them: ::
|
||||
|
||||
#include "cv.h"
|
||||
#include "highgui.h"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
Mat src;
|
||||
// the first command-line parameter must be a filename of the binary
|
||||
// (black-n-white) image
|
||||
if( argc != 2 || !(src=imread(argv[1], 0)).data)
|
||||
return -1;
|
||||
|
||||
Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);
|
||||
|
||||
src = src > 1;
|
||||
namedWindow( "Source", 1 );
|
||||
imshow( "Source", src );
|
||||
|
||||
vector<vector<Point> > contours;
|
||||
vector<Vec4i> hierarchy;
|
||||
|
||||
findContours( src, contours, hierarchy,
|
||||
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
|
||||
|
||||
// iterate through all the top-level contours,
|
||||
// draw each connected component with its own random color
|
||||
int idx = 0;
|
||||
for( ; idx >= 0; idx = hierarchy[idx][0] )
|
||||
{
|
||||
Scalar color( rand()&255, rand()&255, rand()&255 );
|
||||
drawContours( dst, contours, idx, color, CV_FILLED, 8, hierarchy );
|
||||
}
|
||||
|
||||
namedWindow( "Components", 1 );
|
||||
imshow( "Components", dst );
|
||||
waitKey(0);
|
||||
}
|
||||
|
||||
|
||||
|
||||
approxPolyDP
|
||||
----------------
|
||||
@@ -253,28 +175,28 @@ Approximates a polygonal curve(s) with the specified precision.
|
||||
|
||||
.. ocv:pyfunction:: cv2.approxPolyDP(curve, epsilon, closed[, approxCurve]) -> approxCurve
|
||||
|
||||
.. ocv:cfunction:: CvSeq* cvApproxPoly( const void* curve, int headerSize, CvMemStorage* storage, int method, double epsilon, int recursive=0 )
|
||||
.. ocv:cfunction:: CvSeq* cvApproxPoly( const void* src_seq, int header_size, CvMemStorage* storage, int method, double eps, int recursive=0 )
|
||||
|
||||
:param curve: Input vector of a 2D point stored in:
|
||||
|
||||
|
||||
* ``std::vector`` or ``Mat`` (C++ interface)
|
||||
|
||||
|
||||
* ``Nx2`` numpy array (Python interface)
|
||||
|
||||
* ``CvSeq`` or `` ``CvMat`` (C interface)
|
||||
|
||||
* ``CvSeq`` or `` ``CvMat`` (C interface)
|
||||
|
||||
:param approxCurve: Result of the approximation. The type should match the type of the input curve. In case of C interface the approximated curve is stored in the memory storage and pointer to it is returned.
|
||||
|
||||
:param epsilon: Parameter specifying the approximation accuracy. This is the maximum distance between the original curve and its approximation.
|
||||
|
||||
:param closed: If true, the approximated curve is closed (its first and last vertices are connected). Otherwise, it is not closed.
|
||||
|
||||
:param headerSize: Header size of the approximated curve. Normally, ``sizeof(CvContour)`` is used.
|
||||
|
||||
|
||||
:param header_size: Header size of the approximated curve. Normally, ``sizeof(CvContour)`` is used.
|
||||
|
||||
:param storage: Memory storage where the approximated curve is stored.
|
||||
|
||||
|
||||
:param method: Contour approximation algorithm. Only ``CV_POLY_APPROX_DP`` is supported.
|
||||
|
||||
|
||||
:param recursive: Recursion flag. If it is non-zero and ``curve`` is ``CvSeq*``, the function ``cvApproxPoly`` approximates all the contours accessible from ``curve`` by ``h_next`` and ``v_next`` links.
|
||||
|
||||
The functions ``approxPolyDP`` approximate a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. It uses the Douglas-Peucker algorithm
|
||||
@@ -287,22 +209,22 @@ ApproxChains
|
||||
-------------
|
||||
Approximates Freeman chain(s) with a polygonal curve.
|
||||
|
||||
.. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* chain, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimalPerimeter=0, int recursive=0 )
|
||||
.. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* src_seq, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimal_perimeter=0, int recursive=0 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.ApproxChains(src_seq, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimal_perimeter=0, recursive=0)-> contours
|
||||
|
||||
:param src_seq: Pointer to the approximated Freeman chain that can refer to other chains.
|
||||
|
||||
:param storage: Storage location for the resulting polylines.
|
||||
|
||||
:param method: Approximation method (see the description of the function :ocv:cfunc:`FindContours` ).
|
||||
|
||||
:param parameter: Method parameter (not used now).
|
||||
|
||||
:param minimal_perimeter: Approximates only those contours whose perimeters are not less than ``minimal_perimeter`` . Other chains are removed from the resulting structure.
|
||||
|
||||
.. ocv:pyoldfunction:: cv.ApproxChains(chain, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimalPerimeter=0, recursive=0)-> contours
|
||||
|
||||
:param chain: Pointer to the approximated Freeman chain that can refer to other chains.
|
||||
|
||||
:param storage: Storage location for the resulting polylines.
|
||||
|
||||
:param method: Approximation method (see the description of the function :ocv:cfunc:`FindContours` ).
|
||||
|
||||
:param parameter: Method parameter (not used now).
|
||||
|
||||
:param minimalPerimeter: Approximates only those contours whose perimeters are not less than ``minimal_perimeter`` . Other chains are removed from the resulting structure.
|
||||
|
||||
:param recursive: Recursion flag. If it is non-zero, the function approximates all chains that can be obtained from ``chain`` by using the ``h_next`` or ``v_next`` links. Otherwise, the single input chain is approximated.
|
||||
|
||||
|
||||
This is a standalone contour approximation routine, not represented in the new interface. When :ocv:cfunc:`FindContours` retrieves contours as Freeman chains, it calls the function to get approximated contours, represented as polygons.
|
||||
|
||||
|
||||
@@ -314,8 +236,9 @@ Calculates a contour perimeter or a curve length.
|
||||
|
||||
.. ocv:pyfunction:: cv2.arcLength(curve, closed) -> retval
|
||||
|
||||
.. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int isClosed=-1 )
|
||||
.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1)-> double
|
||||
.. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int is_closed=-1 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1) -> float
|
||||
|
||||
:param curve: Input vector of 2D points, stored in ``std::vector`` or ``Mat``.
|
||||
|
||||
@@ -351,9 +274,9 @@ Calculates a contour area.
|
||||
|
||||
.. ocv:pyfunction:: cv2.contourArea(contour[, oriented]) -> retval
|
||||
|
||||
.. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ )
|
||||
.. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ, int oriented=0 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ)-> double
|
||||
.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ) -> float
|
||||
|
||||
:param contour: Input vector of 2D points (contour vertices), stored in ``std::vector`` or ``Mat``.
|
||||
|
||||
@@ -390,22 +313,22 @@ Finds the convex hull of a point set.
|
||||
|
||||
.. ocv:function:: void convexHull( InputArray points, OutputArray hull, bool clockwise=false, bool returnPoints=true )
|
||||
|
||||
.. ocv:pyfunction:: cv2.convexHull(points[, hull[, returnPoints[, clockwise]]]) -> hull
|
||||
.. ocv:pyfunction:: cv2.convexHull(points[, hull[, clockwise[, returnPoints]]]) -> hull
|
||||
|
||||
.. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* storage=NULL, int orientation=CV_CLOCKWISE, int returnPoints=0 )
|
||||
.. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0 )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, returnPoints=0)-> convexHull
|
||||
.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, return_points=0) -> convexHull
|
||||
|
||||
:param points: Input 2D point set, stored in ``std::vector`` or ``Mat``.
|
||||
|
||||
:param hull: Output convex hull. It is either an integer vector of indices or vector of points. In the first case, the ``hull`` elements are 0-based indices of the convex hull points in the original array (since the set of convex hull points is a subset of the original point set). In the second case, ``hull`` elements are the convex hull points themselves.
|
||||
|
||||
:param storage: Output memory storage in the old API (``cvConvexHull2`` returns a sequence containing the convex hull points or their indices).
|
||||
|
||||
:param hull_storage: Output memory storage in the old API (``cvConvexHull2`` returns a sequence containing the convex hull points or their indices).
|
||||
|
||||
:param clockwise: Orientation flag. If it is true, the output convex hull is oriented clockwise. Otherwise, it is oriented counter-clockwise. The usual screen coordinate system is assumed so that the origin is at the top-left corner, x axis is oriented to the right, and y axis is oriented downwards.
|
||||
|
||||
|
||||
:param orientation: Convex hull orientation parameter in the old API, ``CV_CLOCKWISE`` or ``CV_COUNTERCLOCKWISE``.
|
||||
|
||||
|
||||
:param returnPoints: Operation flag. In case of a matrix, when the flag is true, the function returns convex hull points. Otherwise, it returns indices of the convex hull points. When the output array is ``std::vector``, the flag is ignored, and the output depends on the type of the vector: ``std::vector<int>`` implies ``returnPoints=true``, ``std::vector<Point>`` implies ``returnPoints=false``.
|
||||
|
||||
The functions find the convex hull of a 2D point set using the Sklansky's algorithm
|
||||
@@ -420,20 +343,20 @@ Finds the convexity defects of a contour.
|
||||
|
||||
.. ocv:function:: void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects )
|
||||
|
||||
.. ocv:pyfunction:: cv2.ConvexityDefects(contour, convexhull)-> convexityDefects
|
||||
.. ocv:pyfunction:: cv2.convexityDefects(contour, convexhull[, convexityDefects]) -> convexityDefects
|
||||
|
||||
.. ocv:cfunction:: CvSeq* cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, CvMemStorage* storage=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.ConvexityDefects(contour, convexhull, storage)-> convexityDefects
|
||||
|
||||
:param contour: Input contour.
|
||||
|
||||
:param convexhull: Convex hull obtained using :ocv:func:`convexHull` that should contain indices of the contour points that make the hull.
|
||||
:param contour: Input contour.
|
||||
|
||||
:param convexhull: Convex hull obtained using :ocv:func:`convexHull` that should contain indices of the contour points that make the hull.
|
||||
|
||||
:param convexityDefects: The output vector of convexity defects. In C++ and the new Python/Java interface each convexity defect is represented as 4-element integer vector (a.k.a. ``cv::Vec4i``): ``(start_index, end_index, farthest_pt_index, fixpt_depth)``, where indices are 0-based indices in the original contour of the convexity defect beginning, end and the farthest point, and ``fixpt_depth`` is fixed-point approximation (with 8 fractional bits) of the distance between the farthest contour point and the hull. That is, to get the floating-point value of the depth will be ``fixpt_depth/256.0``. In C interface convexity defect is represented by ``CvConvexityDefect`` structure - see below.
|
||||
|
||||
|
||||
:param storage: Container for the output sequence of convexity defects. If it is NULL, the contour or hull (in that order) storage is used.
|
||||
|
||||
|
||||
The function finds all convexity defects of the input contour and returns a sequence of the ``CvConvexityDefect`` structures, where ``CvConvexityDetect`` is defined as: ::
|
||||
|
||||
struct CvConvexityDefect
|
||||
@@ -460,7 +383,7 @@ Fits an ellipse around a set of 2D points.
|
||||
.. ocv:pyoldfunction:: cv.FitEllipse2(points)-> Box2D
|
||||
|
||||
:param points: Input 2D point set, stored in:
|
||||
|
||||
|
||||
* ``std::vector<>`` or ``Mat`` (C++ interface)
|
||||
|
||||
* ``CvSeq*`` or ``CvMat*`` (C interface)
|
||||
@@ -475,10 +398,11 @@ Fits a line to a 2D or 3D point set.
|
||||
|
||||
.. ocv:function:: void fitLine( InputArray points, OutputArray line, int distType, double param, double reps, double aeps )
|
||||
|
||||
.. ocv:pyfunction:: cv2.fitLine(points, distType, param, reps, aeps) -> line
|
||||
.. ocv:pyfunction:: cv2.fitLine(points, distType, param, reps, aeps[, line]) -> line
|
||||
|
||||
.. ocv:cfunction:: void cvFitLine( const CvArr* points, int distType, double param, double reps, double aeps, float* line )
|
||||
.. ocv:pyoldfunction:: cv.FitLine(points, distType, param, reps, aeps) -> line
|
||||
.. ocv:cfunction:: void cvFitLine( const CvArr* points, int dist_type, double param, double reps, double aeps, float* line )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.FitLine(points, dist_type, param, reps, aeps) -> line
|
||||
|
||||
:param points: Input vector of 2D or 3D points, stored in ``std::vector<>`` or ``Mat``.
|
||||
|
||||
@@ -489,7 +413,7 @@ Fits a line to a 2D or 3D point set.
|
||||
:param param: Numerical parameter ( ``C`` ) for some types of distances. If it is 0, an optimal value is chosen.
|
||||
|
||||
:param reps: Sufficient accuracy for the radius (distance between the coordinate origin and the line).
|
||||
|
||||
|
||||
:param aeps: Sufficient accuracy for the angle. 0.01 would be a good default value for ``reps`` and ``aeps``.
|
||||
|
||||
The function ``fitLine`` fits a line to a 2D or 3D point set by minimizing
|
||||
@@ -554,7 +478,7 @@ Tests a contour convexity.
|
||||
.. ocv:pyoldfunction:: cv.CheckContourConvexity(contour)-> int
|
||||
|
||||
:param contour: Input vector of 2D points, stored in:
|
||||
|
||||
|
||||
* ``std::vector<>`` or ``Mat`` (C++ interface)
|
||||
|
||||
* ``CvSeq*`` or ``CvMat*`` (C interface)
|
||||
@@ -575,14 +499,14 @@ Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
|
||||
|
||||
.. ocv:cfunction:: CvBox2D cvMinAreaRect2( const CvArr* points, CvMemStorage* storage=NULL )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None)-> CvBox2D
|
||||
.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None) -> Box2D
|
||||
|
||||
:param points: Input vector of 2D points, stored in:
|
||||
|
||||
|
||||
* ``std::vector<>`` or ``Mat`` (C++ interface)
|
||||
|
||||
|
||||
* ``CvSeq*`` or ``CvMat*`` (C interface)
|
||||
|
||||
|
||||
* Nx2 numpy array (Python interface)
|
||||
|
||||
The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a specified point set. See the OpenCV sample ``minarea.cpp`` .
|
||||
@@ -595,18 +519,18 @@ Finds a circle of the minimum area enclosing a 2D point set.
|
||||
|
||||
.. ocv:function:: void minEnclosingCircle( InputArray points, Point2f& center, float& radius )
|
||||
|
||||
.. ocv:pyfunction:: cv2.minEnclosingCircle(points, center, radius) -> None
|
||||
.. ocv:pyfunction:: cv2.minEnclosingCircle(points) -> center, radius
|
||||
|
||||
.. ocv:cfunction:: int cvMinEnclosingCircle( const CvArr* points, CvPoint2D32f* center, float* radius )
|
||||
|
||||
.. ocv:pyoldfunction:: cv.MinEnclosingCircle(points)-> (int, center, radius)
|
||||
|
||||
:param points: Input vector of 2D points, stored in:
|
||||
|
||||
|
||||
* ``std::vector<>`` or ``Mat`` (C++ interface)
|
||||
|
||||
|
||||
* ``CvSeq*`` or ``CvMat*`` (C interface)
|
||||
|
||||
|
||||
* Nx2 numpy array (Python interface)
|
||||
|
||||
:param center: Output center of the circle.
|
||||
@@ -621,12 +545,12 @@ matchShapes
|
||||
---------------
|
||||
Compares two shapes.
|
||||
|
||||
.. ocv:function:: double matchShapes( InputArray object1, InputArray object2, int method, double parameter=0 )
|
||||
.. ocv:function:: double matchShapes( InputArray contour1, InputArray contour2, int method, double parameter )
|
||||
|
||||
.. ocv:pyfunction:: cv2.matchShapes(contour1, contour2, method, parameter) -> retval
|
||||
|
||||
.. ocv:cfunction:: double cvMatchShapes( const void* object1, const void* object2, int method, double parameter=0 )
|
||||
.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0)-> None
|
||||
.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0) -> float
|
||||
|
||||
:param object1: First contour or grayscale image.
|
||||
|
||||
@@ -680,8 +604,8 @@ Performs a point-in-contour test.
|
||||
|
||||
.. ocv:pyfunction:: cv2.pointPolygonTest(contour, pt, measureDist) -> retval
|
||||
|
||||
.. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measureDist )
|
||||
.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measureDist)-> double
|
||||
.. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measure_dist )
|
||||
.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measure_dist) -> float
|
||||
|
||||
:param contour: Input contour.
|
||||
|
||||
|
||||
@@ -60,20 +60,20 @@ namespace cv
|
||||
|
||||
//! various border interpolation methods
|
||||
enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT,
|
||||
BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP,
|
||||
BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP,
|
||||
BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101,
|
||||
BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT,
|
||||
BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 };
|
||||
|
||||
//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
|
||||
//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
|
||||
CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType );
|
||||
|
||||
/*!
|
||||
The Base Class for 1D or Row-wise Filters
|
||||
|
||||
|
||||
This is the base class for linear or non-linear filters that process 1D data.
|
||||
In particular, such filters are used for the "horizontal" filtering parts in separable filters.
|
||||
|
||||
|
||||
Several functions in OpenCV return Ptr<BaseRowFilter> for the specific types of filters,
|
||||
and those pointers can be used directly or within cv::FilterEngine.
|
||||
*/
|
||||
@@ -93,17 +93,17 @@ public:
|
||||
|
||||
/*!
|
||||
The Base Class for Column-wise Filters
|
||||
|
||||
|
||||
This is the base class for linear or non-linear filters that process columns of 2D arrays.
|
||||
Such filters are used for the "vertical" filtering parts in separable filters.
|
||||
|
||||
|
||||
Several functions in OpenCV return Ptr<BaseColumnFilter> for the specific types of filters,
|
||||
and those pointers can be used directly or within cv::FilterEngine.
|
||||
|
||||
|
||||
Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information,
|
||||
i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset()
|
||||
must be called (e.g. the method is called by cv::FilterEngine)
|
||||
*/
|
||||
*/
|
||||
class CV_EXPORTS BaseColumnFilter
|
||||
{
|
||||
public:
|
||||
@@ -121,15 +121,15 @@ public:
|
||||
|
||||
/*!
|
||||
The Base Class for Non-Separable 2D Filters.
|
||||
|
||||
|
||||
This is the base class for linear or non-linear 2D filters.
|
||||
|
||||
|
||||
Several functions in OpenCV return Ptr<BaseFilter> for the specific types of filters,
|
||||
and those pointers can be used directly or within cv::FilterEngine.
|
||||
|
||||
|
||||
Similar to cv::BaseColumnFilter, the class may have some context information,
|
||||
that should be reset using BaseFilter::reset() method before processing the new array.
|
||||
*/
|
||||
*/
|
||||
class CV_EXPORTS BaseFilter
|
||||
{
|
||||
public:
|
||||
@@ -148,7 +148,7 @@ public:
|
||||
|
||||
/*!
|
||||
The Main Class for Image Filtering.
|
||||
|
||||
|
||||
The class can be used to apply an arbitrary filtering operation to an image.
|
||||
It contains all the necessary intermediate buffers, it computes extrapolated values
|
||||
of the "virtual" pixels outside of the image etc.
|
||||
@@ -156,45 +156,45 @@ public:
|
||||
are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(),
|
||||
cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(),
|
||||
cv::createBoxFilter() and cv::createMorphologyFilter().
|
||||
|
||||
|
||||
Using the class you can process large images by parts and build complex pipelines
|
||||
that include filtering as some of the stages. If all you need is to apply some pre-defined
|
||||
filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc.
|
||||
functions that create FilterEngine internally.
|
||||
|
||||
|
||||
Here is the example on how to use the class to implement Laplacian operator, which is the sum of
|
||||
second-order derivatives. More complex variant for different types is implemented in cv::Laplacian().
|
||||
|
||||
|
||||
\code
|
||||
void laplace_f(const Mat& src, Mat& dst)
|
||||
{
|
||||
CV_Assert( src.type() == CV_32F );
|
||||
// make sure the destination array has the proper size and type
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
|
||||
// get the derivative and smooth kernels for d2I/dx2.
|
||||
// for d2I/dy2 we could use the same kernels, just swapped
|
||||
Mat kd, ks;
|
||||
getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );
|
||||
|
||||
|
||||
// let's process 10 source rows at once
|
||||
int DELTA = std::min(10, src.rows);
|
||||
Ptr<FilterEngine> Fxx = createSeparableLinearFilter(src.type(),
|
||||
dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
|
||||
dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
|
||||
Ptr<FilterEngine> Fyy = createSeparableLinearFilter(src.type(),
|
||||
dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );
|
||||
|
||||
|
||||
int y = Fxx->start(src), dsty = 0, dy = 0;
|
||||
Fyy->start(src);
|
||||
const uchar* sptr = src.data + y*src.step;
|
||||
|
||||
|
||||
// allocate the buffers for the spatial image derivatives;
|
||||
// the buffers need to have more than DELTA rows, because at the
|
||||
// last iteration the output may take max(kd.rows-1,ks.rows-1)
|
||||
// rows more than the input.
|
||||
Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() );
|
||||
Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() );
|
||||
|
||||
|
||||
// inside the loop we always pass DELTA rows to the filter
|
||||
// (note that the "proceed" method takes care of possibe overflow, since
|
||||
// it was given the actual image height in the "start" method)
|
||||
@@ -241,7 +241,7 @@ public:
|
||||
int srcType, int dstType, int bufType,
|
||||
int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1,
|
||||
const Scalar& _borderValue=Scalar());
|
||||
//! starts filtering of the specified ROI of an image of size wholeSize.
|
||||
//! starts filtering of the specified ROI of an image of size wholeSize.
|
||||
virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1);
|
||||
//! starts filtering of the specified ROI of the specified image.
|
||||
virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1),
|
||||
@@ -256,10 +256,10 @@ public:
|
||||
bool isolated=false);
|
||||
//! returns true if the filter is separable
|
||||
bool isSeparable() const { return (const BaseFilter*)filter2D == 0; }
|
||||
//! returns the number
|
||||
//! returns the number
|
||||
int remainingInputRows() const;
|
||||
int remainingOutputRows() const;
|
||||
|
||||
|
||||
int srcType, dstType, bufType;
|
||||
Size ksize;
|
||||
Point anchor;
|
||||
@@ -276,7 +276,7 @@ public:
|
||||
vector<uchar> constBorderRow;
|
||||
int bufStep, startY, startY0, endY, rowCount, dstY;
|
||||
vector<uchar*> rows;
|
||||
|
||||
|
||||
Ptr<BaseFilter> filter2D;
|
||||
Ptr<BaseRowFilter> rowFilter;
|
||||
Ptr<BaseColumnFilter> columnFilter;
|
||||
@@ -309,16 +309,16 @@ CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,
|
||||
//! returns the separable linear filter engine
|
||||
CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,
|
||||
InputArray rowKernel, InputArray columnKernel,
|
||||
Point _anchor=Point(-1,-1), double delta=0,
|
||||
int _rowBorderType=BORDER_DEFAULT,
|
||||
int _columnBorderType=-1,
|
||||
const Scalar& _borderValue=Scalar());
|
||||
Point anchor=Point(-1,-1), double delta=0,
|
||||
int rowBorderType=BORDER_DEFAULT,
|
||||
int columnBorderType=-1,
|
||||
const Scalar& borderValue=Scalar());
|
||||
|
||||
//! returns the non-separable linear filter engine
|
||||
CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,
|
||||
InputArray kernel, Point _anchor=Point(-1,-1),
|
||||
double delta=0, int _rowBorderType=BORDER_DEFAULT,
|
||||
int _columnBorderType=-1, const Scalar& _borderValue=Scalar());
|
||||
double delta=0, int rowBorderType=BORDER_DEFAULT,
|
||||
int columnBorderType=-1, const Scalar& borderValue=Scalar());
|
||||
|
||||
//! returns the Gaussian kernel with the specified parameters
|
||||
CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );
|
||||
@@ -335,7 +335,7 @@ CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky,
|
||||
CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,
|
||||
int dx, int dy, int ksize,
|
||||
int borderType=BORDER_DEFAULT );
|
||||
//! returns horizontal 1D box filter
|
||||
//! returns horizontal 1D box filter
|
||||
CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,
|
||||
int ksize, int anchor=-1);
|
||||
//! returns vertical 1D box filter
|
||||
@@ -347,11 +347,11 @@ CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksi
|
||||
Point anchor=Point(-1,-1),
|
||||
bool normalize=true,
|
||||
int borderType=BORDER_DEFAULT);
|
||||
|
||||
|
||||
//! returns the Gabor kernel with the specified parameters
|
||||
CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd,
|
||||
double gamma, double psi=CV_PI*0.5, int ktype=CV_64F );
|
||||
|
||||
|
||||
//! type of morphological operation
|
||||
enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE,
|
||||
MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE,
|
||||
@@ -365,15 +365,15 @@ CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int
|
||||
//! returns 2D morphological filter
|
||||
CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray kernel,
|
||||
Point anchor=Point(-1,-1));
|
||||
|
||||
|
||||
//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation.
|
||||
static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }
|
||||
|
||||
//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
|
||||
CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray kernel,
|
||||
Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT,
|
||||
int _columnBorderType=-1,
|
||||
const Scalar& _borderValue=morphologyDefaultBorderValue());
|
||||
Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT,
|
||||
int columnBorderType=-1,
|
||||
const Scalar& borderValue=morphologyDefaultBorderValue());
|
||||
|
||||
//! shape of the structuring element
|
||||
enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };
|
||||
@@ -382,7 +382,7 @@ CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point
|
||||
|
||||
template<> CV_EXPORTS void Ptr<IplConvKernel>::delete_obj();
|
||||
|
||||
//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
|
||||
//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
|
||||
CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst,
|
||||
int top, int bottom, int left, int right,
|
||||
int borderType, const Scalar& value=Scalar() );
|
||||
@@ -392,7 +392,7 @@ CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );
|
||||
//! smooths the image using Gaussian filter.
|
||||
CV_EXPORTS_W void GaussianBlur( InputArray src,
|
||||
OutputArray dst, Size ksize,
|
||||
double sigma1, double sigma2=0,
|
||||
double sigmaX, double sigmaY=0,
|
||||
int borderType=BORDER_DEFAULT );
|
||||
//! smooths the image using bilateral filter
|
||||
CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,
|
||||
@@ -418,7 +418,7 @@ CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth,
|
||||
InputArray kernelX, InputArray kernelY,
|
||||
Point anchor=Point(-1,-1),
|
||||
double delta=0, int borderType=BORDER_DEFAULT );
|
||||
|
||||
|
||||
//! applies generalized Sobel operator to the image
|
||||
CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth,
|
||||
int dx, int dy, int ksize=3,
|
||||
@@ -452,7 +452,7 @@ CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize,
|
||||
|
||||
// low-level function for computing eigenvalues and eigenvectors of 2x2 matrices
|
||||
CV_EXPORTS void eigen2x2( const float* a, float* e, int n );
|
||||
|
||||
|
||||
//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix.
|
||||
CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst,
|
||||
int blockSize, int ksize,
|
||||
@@ -483,7 +483,7 @@ CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines,
|
||||
double rho, double theta, int threshold,
|
||||
double minLineLength=0, double maxLineGap=0 );
|
||||
|
||||
//! finds circles in the grayscale image using 2+1 gradient Hough transform
|
||||
//! finds circles in the grayscale image using 2+1 gradient Hough transform
|
||||
CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles,
|
||||
int method, double dp, double minDist,
|
||||
double param1=100, double param2=100,
|
||||
@@ -494,13 +494,13 @@ CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel,
|
||||
Point anchor=Point(-1,-1), int iterations=1,
|
||||
int borderType=BORDER_CONSTANT,
|
||||
const Scalar& borderValue=morphologyDefaultBorderValue() );
|
||||
|
||||
|
||||
//! dilates the image (applies the local maximum operator)
|
||||
CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel,
|
||||
Point anchor=Point(-1,-1), int iterations=1,
|
||||
int borderType=BORDER_CONSTANT,
|
||||
const Scalar& borderValue=morphologyDefaultBorderValue() );
|
||||
|
||||
|
||||
//! applies an advanced morphological operation to the image
|
||||
CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst,
|
||||
int op, InputArray kernel,
|
||||
@@ -531,7 +531,7 @@ CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst,
|
||||
int flags=INTER_LINEAR,
|
||||
int borderMode=BORDER_CONSTANT,
|
||||
const Scalar& borderValue=Scalar());
|
||||
|
||||
|
||||
//! warps the image using perspective transformation
|
||||
CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst,
|
||||
InputArray M, Size dsize,
|
||||
@@ -556,7 +556,7 @@ CV_EXPORTS_W void remap( InputArray src, OutputArray dst,
|
||||
CV_EXPORTS_W void convertMaps( InputArray map1, InputArray map2,
|
||||
OutputArray dstmap1, OutputArray dstmap2,
|
||||
int dstmap1type, bool nninterpolation=false );
|
||||
|
||||
|
||||
//! returns 2x3 affine transformation matrix for the planar rotation.
|
||||
CV_EXPORTS_W Mat getRotationMatrix2D( Point2f center, double angle, double scale );
|
||||
//! returns 3x3 perspective transformation for the corresponding 4 point pairs.
|
||||
@@ -597,12 +597,12 @@ CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,
|
||||
CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
|
||||
double alpha, InputArray mask=noArray() );
|
||||
|
||||
//! computes PSNR image/video quality metric
|
||||
//! computes PSNR image/video quality metric
|
||||
CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
|
||||
|
||||
|
||||
CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2, InputArray window = noArray());
|
||||
CV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type);
|
||||
|
||||
|
||||
//! type of the threshold operation
|
||||
enum { THRESH_BINARY=CV_THRESH_BINARY, THRESH_BINARY_INV=CV_THRESH_BINARY_INV,
|
||||
THRESH_TRUNC=CV_THRESH_TRUNC, THRESH_TOZERO=CV_THRESH_TOZERO,
|
||||
@@ -637,7 +637,7 @@ CV_EXPORTS_W void undistort( InputArray src, OutputArray dst,
|
||||
InputArray cameraMatrix,
|
||||
InputArray distCoeffs,
|
||||
InputArray newCameraMatrix=noArray() );
|
||||
|
||||
|
||||
//! initializes maps for cv::remap() to correct lens distortion and optionally rectify the image
|
||||
CV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs,
|
||||
InputArray R, InputArray newCameraMatrix,
|
||||
@@ -647,25 +647,25 @@ enum
|
||||
{
|
||||
PROJ_SPHERICAL_ORTHO = 0,
|
||||
PROJ_SPHERICAL_EQRECT = 1
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
//! initializes maps for cv::remap() for wide-angle
|
||||
CV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs,
|
||||
Size imageSize, int destImageWidth,
|
||||
int m1type, OutputArray map1, OutputArray map2,
|
||||
int projType=PROJ_SPHERICAL_EQRECT, double alpha=0);
|
||||
|
||||
|
||||
//! returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true)
|
||||
CV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize=Size(),
|
||||
bool centerPrincipalPoint=false );
|
||||
|
||||
|
||||
//! returns points' coordinates after lens distortion correction
|
||||
CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
InputArray R=noArray(), InputArray P=noArray());
|
||||
|
||||
template<> CV_EXPORTS void Ptr<CvHistogram>::delete_obj();
|
||||
|
||||
|
||||
//! computes the joint dense histogram for a set of images.
|
||||
CV_EXPORTS void calcHist( const Mat* images, int nimages,
|
||||
const int* channels, InputArray mask,
|
||||
@@ -678,7 +678,7 @@ CV_EXPORTS void calcHist( const Mat* images, int nimages,
|
||||
SparseMat& hist, int dims,
|
||||
const int* histSize, const float** ranges,
|
||||
bool uniform=true, bool accumulate=false );
|
||||
|
||||
|
||||
CV_EXPORTS_W void calcHist( InputArrayOfArrays images,
|
||||
const vector<int>& channels,
|
||||
InputArray mask, OutputArray hist,
|
||||
@@ -694,7 +694,7 @@ CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
|
||||
|
||||
//! computes back projection for the set of images
|
||||
CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
|
||||
const int* channels, const SparseMat& hist,
|
||||
const int* channels, const SparseMat& hist,
|
||||
OutputArray backProject, const float** ranges,
|
||||
double scale=1, bool uniform=true );
|
||||
|
||||
@@ -705,8 +705,8 @@ CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector<int>&
|
||||
|
||||
/*CV_EXPORTS void calcBackProjectPatch( const Mat* images, int nimages, const int* channels,
|
||||
InputArray hist, OutputArray dst, Size patchSize,
|
||||
int method, double factor=1 );
|
||||
|
||||
int method, double factor=1 );
|
||||
|
||||
CV_EXPORTS_W void calcBackProjectPatch( InputArrayOfArrays images, const vector<int>& channels,
|
||||
InputArray hist, OutputArray dst, Size patchSize,
|
||||
int method, double factor=1 );*/
|
||||
@@ -719,7 +719,7 @@ CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int met
|
||||
|
||||
//! normalizes the grayscale image brightness and contrast by normalizing its histogram
|
||||
CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst );
|
||||
|
||||
|
||||
CV_EXPORTS float EMD( InputArray signature1, InputArray signature2,
|
||||
int distType, InputArray cost=noArray(),
|
||||
float* lowerBound=0, OutputArray flow=noArray() );
|
||||
@@ -739,7 +739,7 @@ enum
|
||||
GC_BGD = 0, //!< background
|
||||
GC_FGD = 1, //!< foreground
|
||||
GC_PR_BGD = 2, //!< most probably background
|
||||
GC_PR_FGD = 3 //!< most probably foreground
|
||||
GC_PR_FGD = 3 //!< most probably foreground
|
||||
};
|
||||
|
||||
//! GrabCut algorithm flags
|
||||
@@ -751,7 +751,7 @@ enum
|
||||
};
|
||||
|
||||
//! segments the image using GrabCut algorithm
|
||||
CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
|
||||
CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
|
||||
InputOutputArray bgdModel, InputOutputArray fgdModel,
|
||||
int iterCount, int mode = GC_EVAL );
|
||||
|
||||
@@ -760,7 +760,7 @@ enum
|
||||
DIST_LABEL_CCOMP = 0,
|
||||
DIST_LABEL_PIXEL = 1
|
||||
};
|
||||
|
||||
|
||||
//! builds the discrete Voronoi diagram
|
||||
CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst,
|
||||
OutputArray labels, int distanceType, int maskSize,
|
||||
@@ -784,27 +784,27 @@ CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask,
|
||||
Scalar loDiff=Scalar(), Scalar upDiff=Scalar(),
|
||||
int flags=4 );
|
||||
|
||||
|
||||
|
||||
enum
|
||||
{
|
||||
COLOR_BGR2BGRA =0,
|
||||
COLOR_RGB2RGBA =COLOR_BGR2BGRA,
|
||||
|
||||
|
||||
COLOR_BGRA2BGR =1,
|
||||
COLOR_RGBA2RGB =COLOR_BGRA2BGR,
|
||||
|
||||
|
||||
COLOR_BGR2RGBA =2,
|
||||
COLOR_RGB2BGRA =COLOR_BGR2RGBA,
|
||||
|
||||
|
||||
COLOR_RGBA2BGR =3,
|
||||
COLOR_BGRA2RGB =COLOR_RGBA2BGR,
|
||||
|
||||
|
||||
COLOR_BGR2RGB =4,
|
||||
COLOR_RGB2BGR =COLOR_BGR2RGB,
|
||||
|
||||
|
||||
COLOR_BGRA2RGBA =5,
|
||||
COLOR_RGBA2BGRA =COLOR_BGRA2RGBA,
|
||||
|
||||
|
||||
COLOR_BGR2GRAY =6,
|
||||
COLOR_RGB2GRAY =7,
|
||||
COLOR_GRAY2BGR =8,
|
||||
@@ -813,7 +813,7 @@ enum
|
||||
COLOR_GRAY2RGBA =COLOR_GRAY2BGRA,
|
||||
COLOR_BGRA2GRAY =10,
|
||||
COLOR_RGBA2GRAY =11,
|
||||
|
||||
|
||||
COLOR_BGR2BGR565 =12,
|
||||
COLOR_RGB2BGR565 =13,
|
||||
COLOR_BGR5652BGR =14,
|
||||
@@ -822,10 +822,10 @@ enum
|
||||
COLOR_RGBA2BGR565 =17,
|
||||
COLOR_BGR5652BGRA =18,
|
||||
COLOR_BGR5652RGBA =19,
|
||||
|
||||
|
||||
COLOR_GRAY2BGR565 =20,
|
||||
COLOR_BGR5652GRAY =21,
|
||||
|
||||
|
||||
COLOR_BGR2BGR555 =22,
|
||||
COLOR_RGB2BGR555 =23,
|
||||
COLOR_BGR5552BGR =24,
|
||||
@@ -834,86 +834,86 @@ enum
|
||||
COLOR_RGBA2BGR555 =27,
|
||||
COLOR_BGR5552BGRA =28,
|
||||
COLOR_BGR5552RGBA =29,
|
||||
|
||||
|
||||
COLOR_GRAY2BGR555 =30,
|
||||
COLOR_BGR5552GRAY =31,
|
||||
|
||||
|
||||
COLOR_BGR2XYZ =32,
|
||||
COLOR_RGB2XYZ =33,
|
||||
COLOR_XYZ2BGR =34,
|
||||
COLOR_XYZ2RGB =35,
|
||||
|
||||
|
||||
COLOR_BGR2YCrCb =36,
|
||||
COLOR_RGB2YCrCb =37,
|
||||
COLOR_YCrCb2BGR =38,
|
||||
COLOR_YCrCb2RGB =39,
|
||||
|
||||
|
||||
COLOR_BGR2HSV =40,
|
||||
COLOR_RGB2HSV =41,
|
||||
|
||||
|
||||
COLOR_BGR2Lab =44,
|
||||
COLOR_RGB2Lab =45,
|
||||
|
||||
|
||||
COLOR_BayerBG2BGR =46,
|
||||
COLOR_BayerGB2BGR =47,
|
||||
COLOR_BayerRG2BGR =48,
|
||||
COLOR_BayerGR2BGR =49,
|
||||
|
||||
|
||||
COLOR_BayerBG2RGB =COLOR_BayerRG2BGR,
|
||||
COLOR_BayerGB2RGB =COLOR_BayerGR2BGR,
|
||||
COLOR_BayerRG2RGB =COLOR_BayerBG2BGR,
|
||||
COLOR_BayerGR2RGB =COLOR_BayerGB2BGR,
|
||||
|
||||
|
||||
COLOR_BGR2Luv =50,
|
||||
COLOR_RGB2Luv =51,
|
||||
COLOR_BGR2HLS =52,
|
||||
COLOR_RGB2HLS =53,
|
||||
|
||||
|
||||
COLOR_HSV2BGR =54,
|
||||
COLOR_HSV2RGB =55,
|
||||
|
||||
|
||||
COLOR_Lab2BGR =56,
|
||||
COLOR_Lab2RGB =57,
|
||||
COLOR_Luv2BGR =58,
|
||||
COLOR_Luv2RGB =59,
|
||||
COLOR_HLS2BGR =60,
|
||||
COLOR_HLS2RGB =61,
|
||||
|
||||
|
||||
COLOR_BayerBG2BGR_VNG =62,
|
||||
COLOR_BayerGB2BGR_VNG =63,
|
||||
COLOR_BayerRG2BGR_VNG =64,
|
||||
COLOR_BayerGR2BGR_VNG =65,
|
||||
|
||||
|
||||
COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG,
|
||||
COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG,
|
||||
COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG,
|
||||
COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG,
|
||||
|
||||
|
||||
COLOR_BGR2HSV_FULL = 66,
|
||||
COLOR_RGB2HSV_FULL = 67,
|
||||
COLOR_BGR2HLS_FULL = 68,
|
||||
COLOR_RGB2HLS_FULL = 69,
|
||||
|
||||
|
||||
COLOR_HSV2BGR_FULL = 70,
|
||||
COLOR_HSV2RGB_FULL = 71,
|
||||
COLOR_HLS2BGR_FULL = 72,
|
||||
COLOR_HLS2RGB_FULL = 73,
|
||||
|
||||
|
||||
COLOR_LBGR2Lab = 74,
|
||||
COLOR_LRGB2Lab = 75,
|
||||
COLOR_LBGR2Luv = 76,
|
||||
COLOR_LRGB2Luv = 77,
|
||||
|
||||
|
||||
COLOR_Lab2LBGR = 78,
|
||||
COLOR_Lab2LRGB = 79,
|
||||
COLOR_Luv2LBGR = 80,
|
||||
COLOR_Luv2LRGB = 81,
|
||||
|
||||
|
||||
COLOR_BGR2YUV = 82,
|
||||
COLOR_RGB2YUV = 83,
|
||||
COLOR_YUV2BGR = 84,
|
||||
COLOR_YUV2RGB = 85,
|
||||
|
||||
|
||||
COLOR_BayerBG2GRAY = 86,
|
||||
COLOR_BayerGB2GRAY = 87,
|
||||
COLOR_BayerRG2GRAY = 88,
|
||||
@@ -921,7 +921,7 @@ enum
|
||||
|
||||
//YUV 4:2:0 formats family
|
||||
COLOR_YUV2RGB_NV12 = 90,
|
||||
COLOR_YUV2BGR_NV12 = 91,
|
||||
COLOR_YUV2BGR_NV12 = 91,
|
||||
COLOR_YUV2RGB_NV21 = 92,
|
||||
COLOR_YUV2BGR_NV21 = 93,
|
||||
COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
|
||||
@@ -933,7 +933,7 @@ enum
|
||||
COLOR_YUV2BGRA_NV21 = 97,
|
||||
COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
|
||||
COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
|
||||
|
||||
|
||||
COLOR_YUV2RGB_YV12 = 98,
|
||||
COLOR_YUV2BGR_YV12 = 99,
|
||||
COLOR_YUV2RGB_IYUV = 100,
|
||||
@@ -942,7 +942,7 @@ enum
|
||||
COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
|
||||
COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
|
||||
COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
|
||||
|
||||
|
||||
COLOR_YUV2RGBA_YV12 = 102,
|
||||
COLOR_YUV2BGRA_YV12 = 103,
|
||||
COLOR_YUV2RGBA_IYUV = 104,
|
||||
@@ -951,7 +951,7 @@ enum
|
||||
COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
|
||||
COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
|
||||
COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
|
||||
|
||||
|
||||
COLOR_YUV2GRAY_420 = 106,
|
||||
COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
|
||||
COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
|
||||
@@ -960,7 +960,7 @@ enum
|
||||
COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
|
||||
COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
|
||||
COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
|
||||
|
||||
|
||||
//YUV 4:2:2 formats family
|
||||
COLOR_YUV2RGB_UYVY = 107,
|
||||
COLOR_YUV2BGR_UYVY = 108,
|
||||
@@ -970,7 +970,7 @@ enum
|
||||
COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
|
||||
COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
|
||||
COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
|
||||
|
||||
|
||||
COLOR_YUV2RGBA_UYVY = 111,
|
||||
COLOR_YUV2BGRA_UYVY = 112,
|
||||
//COLOR_YUV2RGBA_VYUY = 113,
|
||||
@@ -979,7 +979,7 @@ enum
|
||||
COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
|
||||
COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
|
||||
COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
|
||||
|
||||
|
||||
COLOR_YUV2RGB_YUY2 = 115,
|
||||
COLOR_YUV2BGR_YUY2 = 116,
|
||||
COLOR_YUV2RGB_YVYU = 117,
|
||||
@@ -988,7 +988,7 @@ enum
|
||||
COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
|
||||
COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
|
||||
COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
|
||||
|
||||
|
||||
COLOR_YUV2RGBA_YUY2 = 119,
|
||||
COLOR_YUV2BGRA_YUY2 = 120,
|
||||
COLOR_YUV2RGBA_YVYU = 121,
|
||||
@@ -997,7 +997,7 @@ enum
|
||||
COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
|
||||
COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
|
||||
COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
|
||||
|
||||
|
||||
COLOR_YUV2GRAY_UYVY = 123,
|
||||
COLOR_YUV2GRAY_YUY2 = 124,
|
||||
//COLOR_YUV2GRAY_VYUY = COLOR_YUV2GRAY_UYVY,
|
||||
@@ -1006,11 +1006,11 @@ enum
|
||||
COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
|
||||
COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
|
||||
COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
|
||||
|
||||
|
||||
COLOR_COLORCVT_MAX = 125
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
//! converts image from one color space to another
|
||||
CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 );
|
||||
|
||||
@@ -1027,7 +1027,7 @@ public:
|
||||
Moments( const CvMoments& moments );
|
||||
//! the conversion to CvMoments
|
||||
operator CvMoments() const;
|
||||
|
||||
|
||||
//! spatial moments
|
||||
CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
|
||||
//! central moments
|
||||
@@ -1100,7 +1100,7 @@ CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false );
|
||||
CV_EXPORTS_W RotatedRect minAreaRect( InputArray points );
|
||||
//! computes the minimal enclosing circle for a set of points
|
||||
CV_EXPORTS_W void minEnclosingCircle( InputArray points,
|
||||
CV_OUT Point2f& center, CV_OUT float& radius );
|
||||
CV_OUT Point2f& center, CV_OUT float& radius );
|
||||
//! matches two contours using one of the available algorithms
|
||||
CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2,
|
||||
int method, double parameter );
|
||||
@@ -1125,7 +1125,7 @@ CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType,
|
||||
double param, double reps, double aeps );
|
||||
//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary
|
||||
CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist );
|
||||
|
||||
|
||||
|
||||
class CV_EXPORTS_W Subdiv2D
|
||||
{
|
||||
@@ -1138,7 +1138,7 @@ public:
|
||||
PTLOC_VERTEX = 1,
|
||||
PTLOC_ON_EDGE = 2
|
||||
};
|
||||
|
||||
|
||||
enum
|
||||
{
|
||||
NEXT_AROUND_ORG = 0x00,
|
||||
@@ -1150,30 +1150,30 @@ public:
|
||||
PREV_AROUND_LEFT = 0x20,
|
||||
PREV_AROUND_RIGHT = 0x02
|
||||
};
|
||||
|
||||
|
||||
CV_WRAP Subdiv2D();
|
||||
CV_WRAP Subdiv2D(Rect rect);
|
||||
CV_WRAP void initDelaunay(Rect rect);
|
||||
|
||||
|
||||
CV_WRAP int insert(Point2f pt);
|
||||
CV_WRAP void insert(const vector<Point2f>& ptvec);
|
||||
CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex);
|
||||
|
||||
|
||||
CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0);
|
||||
CV_WRAP void getEdgeList(CV_OUT vector<Vec4f>& edgeList) const;
|
||||
CV_WRAP void getTriangleList(CV_OUT vector<Vec6f>& triangleList) const;
|
||||
CV_WRAP void getVoronoiFacetList(const vector<int>& idx, CV_OUT vector<vector<Point2f> >& facetList,
|
||||
CV_OUT vector<Point2f>& facetCenters);
|
||||
|
||||
|
||||
CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const;
|
||||
|
||||
|
||||
CV_WRAP int getEdge( int edge, int nextEdgeType ) const;
|
||||
CV_WRAP int nextEdge(int edge) const;
|
||||
CV_WRAP int rotateEdge(int edge, int rotate) const;
|
||||
CV_WRAP int symEdge(int edge) const;
|
||||
CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const;
|
||||
CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const;
|
||||
|
||||
|
||||
protected:
|
||||
int newEdge();
|
||||
void deleteEdge(int edge);
|
||||
@@ -1187,7 +1187,7 @@ protected:
|
||||
void calcVoronoi();
|
||||
void clearVoronoi();
|
||||
void checkSubdiv() const;
|
||||
|
||||
|
||||
struct CV_EXPORTS Vertex
|
||||
{
|
||||
Vertex();
|
||||
@@ -1206,13 +1206,13 @@ protected:
|
||||
int next[4];
|
||||
int pt[4];
|
||||
};
|
||||
|
||||
|
||||
vector<Vertex> vtx;
|
||||
vector<QuadEdge> qedges;
|
||||
int freeQEdge;
|
||||
int freePoint;
|
||||
bool validGeometry;
|
||||
|
||||
|
||||
int recentEdge;
|
||||
Point2f topLeft;
|
||||
Point2f bottomRight;
|
||||
|
||||
@@ -351,8 +351,8 @@ CVAPI(CvPoint) cvReadChainPoint( CvChainPtReader* reader );
|
||||
a tree of polygonal curves (contours) */
|
||||
CVAPI(CvSeq*) cvApproxPoly( const void* src_seq,
|
||||
int header_size, CvMemStorage* storage,
|
||||
int method, double parameter,
|
||||
int parameter2 CV_DEFAULT(0));
|
||||
int method, double eps,
|
||||
int recursive CV_DEFAULT(0));
|
||||
|
||||
/* Calculates perimeter of a contour or length of a part of contour */
|
||||
CVAPI(double) cvArcLength( const void* curve,
|
||||
|
||||
@@ -182,37 +182,37 @@ enum
|
||||
CV_BayerGB2BGR_VNG =63,
|
||||
CV_BayerRG2BGR_VNG =64,
|
||||
CV_BayerGR2BGR_VNG =65,
|
||||
|
||||
|
||||
CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG,
|
||||
CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG,
|
||||
CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG,
|
||||
CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG,
|
||||
|
||||
|
||||
CV_BGR2HSV_FULL = 66,
|
||||
CV_RGB2HSV_FULL = 67,
|
||||
CV_BGR2HLS_FULL = 68,
|
||||
CV_RGB2HLS_FULL = 69,
|
||||
|
||||
|
||||
CV_HSV2BGR_FULL = 70,
|
||||
CV_HSV2RGB_FULL = 71,
|
||||
CV_HLS2BGR_FULL = 72,
|
||||
CV_HLS2RGB_FULL = 73,
|
||||
|
||||
|
||||
CV_LBGR2Lab = 74,
|
||||
CV_LRGB2Lab = 75,
|
||||
CV_LBGR2Luv = 76,
|
||||
CV_LRGB2Luv = 77,
|
||||
|
||||
|
||||
CV_Lab2LBGR = 78,
|
||||
CV_Lab2LRGB = 79,
|
||||
CV_Luv2LBGR = 80,
|
||||
CV_Luv2LRGB = 81,
|
||||
|
||||
|
||||
CV_BGR2YUV = 82,
|
||||
CV_RGB2YUV = 83,
|
||||
CV_YUV2BGR = 84,
|
||||
CV_YUV2RGB = 85,
|
||||
|
||||
|
||||
CV_BayerBG2GRAY = 86,
|
||||
CV_BayerGB2GRAY = 87,
|
||||
CV_BayerRG2GRAY = 88,
|
||||
@@ -220,7 +220,7 @@ enum
|
||||
|
||||
//YUV 4:2:0 formats family
|
||||
CV_YUV2RGB_NV12 = 90,
|
||||
CV_YUV2BGR_NV12 = 91,
|
||||
CV_YUV2BGR_NV12 = 91,
|
||||
CV_YUV2RGB_NV21 = 92,
|
||||
CV_YUV2BGR_NV21 = 93,
|
||||
CV_YUV420sp2RGB = CV_YUV2RGB_NV21,
|
||||
@@ -232,7 +232,7 @@ enum
|
||||
CV_YUV2BGRA_NV21 = 97,
|
||||
CV_YUV420sp2RGBA = CV_YUV2RGBA_NV21,
|
||||
CV_YUV420sp2BGRA = CV_YUV2BGRA_NV21,
|
||||
|
||||
|
||||
CV_YUV2RGB_YV12 = 98,
|
||||
CV_YUV2BGR_YV12 = 99,
|
||||
CV_YUV2RGB_IYUV = 100,
|
||||
@@ -241,7 +241,7 @@ enum
|
||||
CV_YUV2BGR_I420 = CV_YUV2BGR_IYUV,
|
||||
CV_YUV420p2RGB = CV_YUV2RGB_YV12,
|
||||
CV_YUV420p2BGR = CV_YUV2BGR_YV12,
|
||||
|
||||
|
||||
CV_YUV2RGBA_YV12 = 102,
|
||||
CV_YUV2BGRA_YV12 = 103,
|
||||
CV_YUV2RGBA_IYUV = 104,
|
||||
@@ -250,7 +250,7 @@ enum
|
||||
CV_YUV2BGRA_I420 = CV_YUV2BGRA_IYUV,
|
||||
CV_YUV420p2RGBA = CV_YUV2RGBA_YV12,
|
||||
CV_YUV420p2BGRA = CV_YUV2BGRA_YV12,
|
||||
|
||||
|
||||
CV_YUV2GRAY_420 = 106,
|
||||
CV_YUV2GRAY_NV21 = CV_YUV2GRAY_420,
|
||||
CV_YUV2GRAY_NV12 = CV_YUV2GRAY_420,
|
||||
@@ -259,7 +259,7 @@ enum
|
||||
CV_YUV2GRAY_I420 = CV_YUV2GRAY_420,
|
||||
CV_YUV420sp2GRAY = CV_YUV2GRAY_420,
|
||||
CV_YUV420p2GRAY = CV_YUV2GRAY_420,
|
||||
|
||||
|
||||
//YUV 4:2:2 formats family
|
||||
CV_YUV2RGB_UYVY = 107,
|
||||
CV_YUV2BGR_UYVY = 108,
|
||||
@@ -269,7 +269,7 @@ enum
|
||||
CV_YUV2BGR_Y422 = CV_YUV2BGR_UYVY,
|
||||
CV_YUV2RGB_UYNV = CV_YUV2RGB_UYVY,
|
||||
CV_YUV2BGR_UYNV = CV_YUV2BGR_UYVY,
|
||||
|
||||
|
||||
CV_YUV2RGBA_UYVY = 111,
|
||||
CV_YUV2BGRA_UYVY = 112,
|
||||
//CV_YUV2RGBA_VYUY = 113,
|
||||
@@ -278,7 +278,7 @@ enum
|
||||
CV_YUV2BGRA_Y422 = CV_YUV2BGRA_UYVY,
|
||||
CV_YUV2RGBA_UYNV = CV_YUV2RGBA_UYVY,
|
||||
CV_YUV2BGRA_UYNV = CV_YUV2BGRA_UYVY,
|
||||
|
||||
|
||||
CV_YUV2RGB_YUY2 = 115,
|
||||
CV_YUV2BGR_YUY2 = 116,
|
||||
CV_YUV2RGB_YVYU = 117,
|
||||
@@ -287,7 +287,7 @@ enum
|
||||
CV_YUV2BGR_YUYV = CV_YUV2BGR_YUY2,
|
||||
CV_YUV2RGB_YUNV = CV_YUV2RGB_YUY2,
|
||||
CV_YUV2BGR_YUNV = CV_YUV2BGR_YUY2,
|
||||
|
||||
|
||||
CV_YUV2RGBA_YUY2 = 119,
|
||||
CV_YUV2BGRA_YUY2 = 120,
|
||||
CV_YUV2RGBA_YVYU = 121,
|
||||
@@ -296,7 +296,7 @@ enum
|
||||
CV_YUV2BGRA_YUYV = CV_YUV2BGRA_YUY2,
|
||||
CV_YUV2RGBA_YUNV = CV_YUV2RGBA_YUY2,
|
||||
CV_YUV2BGRA_YUNV = CV_YUV2BGRA_YUY2,
|
||||
|
||||
|
||||
CV_YUV2GRAY_UYVY = 123,
|
||||
CV_YUV2GRAY_YUY2 = 124,
|
||||
//CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY,
|
||||
@@ -305,7 +305,7 @@ enum
|
||||
CV_YUV2GRAY_YVYU = CV_YUV2GRAY_YUY2,
|
||||
CV_YUV2GRAY_YUYV = CV_YUV2GRAY_YUY2,
|
||||
CV_YUV2GRAY_YUNV = CV_YUV2GRAY_YUY2,
|
||||
|
||||
|
||||
CV_COLORCVT_MAX = 125
|
||||
};
|
||||
|
||||
|
||||
@@ -57,8 +57,6 @@ int icvIntersectLines( double x1, double dx1, double y1, double dy1,
|
||||
double* t2 );
|
||||
|
||||
|
||||
void icvCreateCenterNormalLine( CvSubdiv2DEdge edge, double* a, double* b, double* c );
|
||||
|
||||
void icvIntersectLines3( double* a0, double* b0, double* c0,
|
||||
double* a1, double* b1, double* c1,
|
||||
CvPoint2D32f* point );
|
||||
|
||||
@@ -70,15 +70,15 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
|
||||
CvChainPtReader reader;
|
||||
CvSeqWriter writer;
|
||||
CvPoint pt = chain->origin;
|
||||
|
||||
|
||||
CV_Assert( CV_IS_SEQ_CHAIN_CONTOUR( chain ));
|
||||
CV_Assert( header_size >= (int)sizeof(CvContour) );
|
||||
|
||||
|
||||
cvStartWriteSeq( (chain->flags & ~CV_SEQ_ELTYPE_MASK) | CV_SEQ_ELTYPE_POINT,
|
||||
header_size, sizeof( CvPoint ), storage, &writer );
|
||||
|
||||
|
||||
if( chain->total == 0 )
|
||||
{
|
||||
{
|
||||
CV_WRITE_SEQ_ELEM( pt, writer );
|
||||
return cvEndWriteSeq( &writer );
|
||||
}
|
||||
@@ -380,13 +380,13 @@ CV_IMPL CvSeq*
|
||||
cvApproxChains( CvSeq* src_seq,
|
||||
CvMemStorage* storage,
|
||||
int method,
|
||||
double /*parameter*/,
|
||||
int minimal_perimeter,
|
||||
double /*parameter*/,
|
||||
int minimal_perimeter,
|
||||
int recursive )
|
||||
{
|
||||
CvSeq *prev_contour = 0, *parent = 0;
|
||||
CvSeq *dst_seq = 0;
|
||||
|
||||
|
||||
if( !src_seq || !storage )
|
||||
CV_Error( CV_StsNullPtr, "" );
|
||||
if( method > CV_CHAIN_APPROX_TC89_KCOS || method <= 0 || minimal_perimeter < 0 )
|
||||
@@ -399,7 +399,7 @@ cvApproxChains( CvSeq* src_seq,
|
||||
if( len >= minimal_perimeter )
|
||||
{
|
||||
CvSeq *contour = 0;
|
||||
|
||||
|
||||
switch( method )
|
||||
{
|
||||
case CV_CHAIN_APPROX_NONE:
|
||||
@@ -471,7 +471,7 @@ cvApproxChains( CvSeq* src_seq,
|
||||
|
||||
/* the version for integer point coordinates */
|
||||
template<typename T> static CvSeq*
|
||||
icvApproxPolyDP( CvSeq* src_contour, int header_size,
|
||||
icvApproxPolyDP( CvSeq* src_contour, int header_size,
|
||||
CvMemStorage* storage, double eps )
|
||||
{
|
||||
typedef cv::Point_<T> PT;
|
||||
@@ -486,7 +486,7 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
|
||||
CvMemStorage* temp_storage = 0;
|
||||
CvSeq* stack = 0;
|
||||
CvSeq* dst_contour;
|
||||
|
||||
|
||||
assert( CV_SEQ_ELTYPE(src_contour) == cv::DataType<PT>::type );
|
||||
cvStartWriteSeq( src_contour->flags, header_size, sizeof(pt), storage, &writer );
|
||||
|
||||
@@ -518,7 +518,7 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
|
||||
init_iters = 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if( is_closed )
|
||||
{
|
||||
/* 1. Find approximately two farthest points of the contour */
|
||||
@@ -629,10 +629,10 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
|
||||
CV_WRITE_SEQ_ELEM( end_pt, writer );
|
||||
|
||||
dst_contour = cvEndWriteSeq( &writer );
|
||||
|
||||
|
||||
// last stage: do final clean-up of the approximated contour -
|
||||
// remove extra points on the [almost] stright lines.
|
||||
|
||||
// remove extra points on the [almost] stright lines.
|
||||
|
||||
cvStartReadSeq( dst_contour, &reader, is_closed );
|
||||
CV_READ_SEQ_ELEM( start_pt, reader );
|
||||
|
||||
@@ -675,7 +675,7 @@ icvApproxPolyDP( CvSeq* src_contour, int header_size,
|
||||
|
||||
CV_IMPL CvSeq*
|
||||
cvApproxPoly( const void* array, int header_size,
|
||||
CvMemStorage* storage, int method,
|
||||
CvMemStorage* storage, int method,
|
||||
double parameter, int parameter2 )
|
||||
{
|
||||
CvSeq* dst_seq = 0;
|
||||
|
||||
@@ -159,7 +159,7 @@ typedef struct _CvContourScanner
|
||||
external contours and holes),
|
||||
3 - full hierarchy;
|
||||
4 - connected components of a multi-level image
|
||||
*/
|
||||
*/
|
||||
int subst_flag;
|
||||
int seq_type1; /* type of fetched contours */
|
||||
int header_size1; /* hdr size of fetched contours */
|
||||
@@ -190,7 +190,7 @@ cvStartFindContours( void* _img, CvMemStorage* storage,
|
||||
|
||||
if( CV_MAT_TYPE(mat->type) == CV_32SC1 && mode == CV_RETR_CCOMP )
|
||||
mode = CV_RETR_FLOODFILL;
|
||||
|
||||
|
||||
if( !((CV_IS_MASK_ARR( mat ) && mode < CV_RETR_FLOODFILL) ||
|
||||
(CV_MAT_TYPE(mat->type) == CV_32SC1 && mode == CV_RETR_FLOODFILL)) )
|
||||
CV_Error( CV_StsUnsupportedFormat, "[Start]FindContours support only 8uC1 and 32sC1 images" );
|
||||
@@ -207,7 +207,7 @@ cvStartFindContours( void* _img, CvMemStorage* storage,
|
||||
|
||||
CvContourScanner scanner = (CvContourScanner)cvAlloc( sizeof( *scanner ));
|
||||
memset( scanner, 0, sizeof(*scanner) );
|
||||
|
||||
|
||||
scanner->storage1 = scanner->storage2 = storage;
|
||||
scanner->img0 = (schar *) img;
|
||||
scanner->img = (schar *) (img + step);
|
||||
@@ -805,13 +805,13 @@ icvTraceContour_32s( int *ptr, int step, int *stop_ptr, int is_hole )
|
||||
const int new_flag = (int)((unsigned)INT_MIN >> 1);
|
||||
const int value_mask = ~(right_flag | new_flag);
|
||||
const int ccomp_val = *i0 & value_mask;
|
||||
|
||||
|
||||
/* initialize local state */
|
||||
CV_INIT_3X3_DELTAS( deltas, step, 1 );
|
||||
memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));
|
||||
|
||||
|
||||
s_end = s = is_hole ? 0 : 4;
|
||||
|
||||
|
||||
do
|
||||
{
|
||||
s = (s - 1) & 7;
|
||||
@@ -820,9 +820,9 @@ icvTraceContour_32s( int *ptr, int step, int *stop_ptr, int is_hole )
|
||||
break;
|
||||
}
|
||||
while( s != s_end );
|
||||
|
||||
|
||||
i3 = i0;
|
||||
|
||||
|
||||
/* check single pixel domain */
|
||||
if( s != s_end )
|
||||
{
|
||||
@@ -830,17 +830,17 @@ icvTraceContour_32s( int *ptr, int step, int *stop_ptr, int is_hole )
|
||||
for( ;; )
|
||||
{
|
||||
s_end = s;
|
||||
|
||||
|
||||
for( ;; )
|
||||
{
|
||||
i4 = i3 + deltas[++s];
|
||||
if( (*i4 & value_mask) == ccomp_val )
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if( i3 == stop_ptr || (i4 == i0 && i3 == i1) )
|
||||
break;
|
||||
|
||||
|
||||
i3 = i4;
|
||||
s = (s + 4) & 7;
|
||||
} /* end of border following loop */
|
||||
@@ -869,24 +869,24 @@ icvFetchContourEx_32s( int* ptr,
|
||||
const int ccomp_val = *i0 & value_mask;
|
||||
const int nbd0 = ccomp_val | new_flag;
|
||||
const int nbd1 = nbd0 | right_flag;
|
||||
|
||||
|
||||
assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE );
|
||||
|
||||
|
||||
/* initialize local state */
|
||||
CV_INIT_3X3_DELTAS( deltas, step, 1 );
|
||||
memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] ));
|
||||
|
||||
|
||||
/* initialize writer */
|
||||
cvStartAppendToSeq( contour, &writer );
|
||||
|
||||
|
||||
if( method < 0 )
|
||||
((CvChain *)contour)->origin = pt;
|
||||
|
||||
|
||||
rect.x = rect.width = pt.x;
|
||||
rect.y = rect.height = pt.y;
|
||||
|
||||
|
||||
s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4;
|
||||
|
||||
|
||||
do
|
||||
{
|
||||
s = (s - 1) & 7;
|
||||
@@ -895,7 +895,7 @@ icvFetchContourEx_32s( int* ptr,
|
||||
break;
|
||||
}
|
||||
while( s != s_end );
|
||||
|
||||
|
||||
if( s == s_end ) /* single pixel domain */
|
||||
{
|
||||
*i0 = nbd1;
|
||||
@@ -908,12 +908,12 @@ icvFetchContourEx_32s( int* ptr,
|
||||
{
|
||||
i3 = i0;
|
||||
prev_s = s ^ 4;
|
||||
|
||||
|
||||
/* follow border */
|
||||
for( ;; )
|
||||
{
|
||||
s_end = s;
|
||||
|
||||
|
||||
for( ;; )
|
||||
{
|
||||
i4 = i3 + deltas[++s];
|
||||
@@ -921,7 +921,7 @@ icvFetchContourEx_32s( int* ptr,
|
||||
break;
|
||||
}
|
||||
s &= 7;
|
||||
|
||||
|
||||
/* check "right" bound */
|
||||
if( (unsigned) (s - 1) < (unsigned) s_end )
|
||||
{
|
||||
@@ -931,7 +931,7 @@ icvFetchContourEx_32s( int* ptr,
|
||||
{
|
||||
*i3 = nbd0;
|
||||
}
|
||||
|
||||
|
||||
if( method < 0 )
|
||||
{
|
||||
schar _s = (schar) s;
|
||||
@@ -941,7 +941,7 @@ icvFetchContourEx_32s( int* ptr,
|
||||
{
|
||||
CV_WRITE_SEQ_ELEM( pt, writer );
|
||||
}
|
||||
|
||||
|
||||
if( s != prev_s )
|
||||
{
|
||||
/* update bounds */
|
||||
@@ -949,37 +949,37 @@ icvFetchContourEx_32s( int* ptr,
|
||||
rect.x = pt.x;
|
||||
else if( pt.x > rect.width )
|
||||
rect.width = pt.x;
|
||||
|
||||
|
||||
if( pt.y < rect.y )
|
||||
rect.y = pt.y;
|
||||
else if( pt.y > rect.height )
|
||||
rect.height = pt.y;
|
||||
}
|
||||
|
||||
|
||||
prev_s = s;
|
||||
pt.x += icvCodeDeltas[s].x;
|
||||
pt.y += icvCodeDeltas[s].y;
|
||||
|
||||
|
||||
if( i4 == i0 && i3 == i1 ) break;
|
||||
|
||||
|
||||
i3 = i4;
|
||||
s = (s + 4) & 7;
|
||||
} /* end of border following loop */
|
||||
}
|
||||
|
||||
|
||||
rect.width -= rect.x - 1;
|
||||
rect.height -= rect.y - 1;
|
||||
|
||||
|
||||
cvEndWriteSeq( &writer );
|
||||
|
||||
|
||||
if( _method != CV_CHAIN_CODE )
|
||||
((CvContour*)contour)->rect = rect;
|
||||
|
||||
|
||||
assert( (writer.seq->total == 0 && writer.seq->first == 0) ||
|
||||
writer.seq->total > writer.seq->first->count ||
|
||||
(writer.seq->first->prev == writer.seq->first &&
|
||||
writer.seq->first->next == writer.seq->first) );
|
||||
|
||||
|
||||
if( _rect ) *_rect = rect;
|
||||
}
|
||||
|
||||
@@ -1005,7 +1005,7 @@ cvFindNextContour( CvContourScanner scanner )
|
||||
int nbd = scanner->nbd;
|
||||
int prev = img[x - 1];
|
||||
int new_mask = -2;
|
||||
|
||||
|
||||
if( mode == CV_RETR_FLOODFILL )
|
||||
{
|
||||
prev = ((int*)img)[x - 1];
|
||||
@@ -1017,13 +1017,13 @@ cvFindNextContour( CvContourScanner scanner )
|
||||
int* img0_i = 0;
|
||||
int* img_i = 0;
|
||||
int p = 0;
|
||||
|
||||
|
||||
if( mode == CV_RETR_FLOODFILL )
|
||||
{
|
||||
img0_i = (int*)img0;
|
||||
img_i = (int*)img;
|
||||
}
|
||||
|
||||
|
||||
for( ; x < width; x++ )
|
||||
{
|
||||
if( img_i )
|
||||
@@ -1036,10 +1036,10 @@ cvFindNextContour( CvContourScanner scanner )
|
||||
for( ; x < width && (p = img[x]) == prev; x++ )
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
if( x >= width )
|
||||
break;
|
||||
|
||||
|
||||
{
|
||||
_CvContourInfo *par_info = 0;
|
||||
_CvContourInfo *l_cinfo = 0;
|
||||
@@ -1053,7 +1053,7 @@ cvFindNextContour( CvContourScanner scanner )
|
||||
{
|
||||
/* check hole */
|
||||
if( (!img_i && (p != 0 || prev < 1)) ||
|
||||
(img_i && ((prev & new_mask) != 0 || (p & new_mask) != 0)))
|
||||
(img_i && ((prev & new_mask) != 0 || (p & new_mask) != 0)))
|
||||
goto resume_scan;
|
||||
|
||||
if( prev & new_mask )
|
||||
@@ -1219,7 +1219,7 @@ cvFindNextContour( CvContourScanner scanner )
|
||||
return l_cinfo->contour;
|
||||
|
||||
resume_scan:
|
||||
|
||||
|
||||
prev = p;
|
||||
/* update lnbd */
|
||||
if( prev & -2 )
|
||||
@@ -1663,7 +1663,7 @@ cvFindContours( void* img, CvMemStorage* storage,
|
||||
|
||||
if( !firstContour )
|
||||
CV_Error( CV_StsNullPtr, "NULL double CvSeq pointer" );
|
||||
|
||||
|
||||
*firstContour = 0;
|
||||
|
||||
if( method == CV_LINK_RUNS )
|
||||
@@ -1733,7 +1733,7 @@ void cv::findContours( InputOutputArray _image, OutputArrayOfArrays _contours,
|
||||
{
|
||||
_hierarchy.create(1, total, CV_32SC4, -1, true);
|
||||
Vec4i* hierarchy = _hierarchy.getMat().ptr<Vec4i>();
|
||||
|
||||
|
||||
it = all_contours.begin();
|
||||
for( i = 0; i < total; i++, ++it )
|
||||
{
|
||||
@@ -1934,7 +1934,7 @@ double cv::matchShapes( InputArray _contour1,
|
||||
CV_Assert(contour1.checkVector(2) >= 0 && contour2.checkVector(2) >= 0 &&
|
||||
(contour1.depth() == CV_32F || contour1.depth() == CV_32S) &&
|
||||
contour1.depth() == contour2.depth());
|
||||
|
||||
|
||||
CvMat c1 = Mat(contour1), c2 = Mat(contour2);
|
||||
return cvMatchShapes(&c1, &c2, method, parameter);
|
||||
}
|
||||
@@ -1945,13 +1945,13 @@ void cv::convexHull( InputArray _points, OutputArray _hull, bool clockwise, bool
|
||||
Mat points = _points.getMat();
|
||||
int nelems = points.checkVector(2), depth = points.depth();
|
||||
CV_Assert(nelems >= 0 && (depth == CV_32F || depth == CV_32S));
|
||||
|
||||
|
||||
if( nelems == 0 )
|
||||
{
|
||||
_hull.release();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
returnPoints = !_hull.fixedType() ? returnPoints : _hull.type() != CV_32S;
|
||||
Mat hull(nelems, 1, returnPoints ? CV_MAKETYPE(depth, 2) : CV_32S);
|
||||
CvMat _cpoints = points, _chull = hull;
|
||||
@@ -1970,23 +1970,23 @@ void cv::convexityDefects( InputArray _points, InputArray _hull, OutputArray _de
|
||||
Mat hull = _hull.getMat();
|
||||
CV_Assert( hull.checkVector(1, CV_32S) > 2 );
|
||||
Ptr<CvMemStorage> storage = cvCreateMemStorage();
|
||||
|
||||
|
||||
CvMat c_points = points, c_hull = hull;
|
||||
CvSeq* seq = cvConvexityDefects(&c_points, &c_hull, storage);
|
||||
int i, n = seq->total;
|
||||
|
||||
|
||||
if( n == 0 )
|
||||
{
|
||||
_defects.release();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
_defects.create(n, 1, CV_32SC4);
|
||||
Mat defects = _defects.getMat();
|
||||
|
||||
|
||||
SeqIterator<CvConvexityDefect> it = Seq<CvConvexityDefect>(seq).begin();
|
||||
CvPoint* ptorg = (CvPoint*)points.data;
|
||||
|
||||
|
||||
for( i = 0; i < n; i++, ++it )
|
||||
{
|
||||
CvConvexityDefect& d = *it;
|
||||
@@ -2034,9 +2034,9 @@ void cv::fitLine( InputArray _points, OutputArray _line, int distType,
|
||||
CvMat _cpoints = points.reshape(2 + (int)is3d);
|
||||
float line[6];
|
||||
cvFitLine(&_cpoints, distType, param, reps, aeps, &line[0]);
|
||||
|
||||
|
||||
int out_size = (is2d)?( (is3d)? (points.channels() * points.rows * 2) : 4 ): 6;
|
||||
|
||||
|
||||
_line.create(out_size, 1, CV_32F, -1, true);
|
||||
Mat l = _line.getMat();
|
||||
CV_Assert( l.isContinuous() );
|
||||
|
||||
Reference in New Issue
Block a user