Merge remote-tracking branch 'refs/remotes/upstream/master'
2
3rdparty/libpng/CMakeLists.txt
vendored
@ -14,7 +14,7 @@ ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" ${ZLIB_INCLUDE_DIRS})
|
||||
file(GLOB lib_srcs *.c)
|
||||
file(GLOB lib_hdrs *.h)
|
||||
|
||||
if(NEON AND CMAKE_SIZEOF_VOID_P EQUAL 4)
|
||||
if(NEON AND ARM)
|
||||
list(APPEND lib_srcs arm/filter_neon.S)
|
||||
add_definitions(-DPNG_ARM_NEON)
|
||||
endif()
|
||||
|
@ -225,8 +225,8 @@ OCV_OPTION(ENABLE_POPCNT "Enable POPCNT instructions"
|
||||
OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) )
|
||||
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) )
|
||||
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
|
||||
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
|
||||
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
|
||||
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
|
||||
OCV_OPTION(ENABLE_WINRT_MODE "Build with Windows Runtime support" OFF IF WIN32 )
|
||||
|
@ -80,11 +80,18 @@ if(CUDA_FOUND)
|
||||
|
||||
if(NOT DEFINED __cuda_arch_bin)
|
||||
if(ANDROID)
|
||||
set(__cuda_arch_bin "3.2")
|
||||
set(__cuda_arch_ptx "")
|
||||
if(ARM)
|
||||
set(__cuda_arch_bin "3.2")
|
||||
set(__cuda_arch_ptx "")
|
||||
elseif(AARCH64)
|
||||
set(__cuda_arch_bin "5.2")
|
||||
set(__cuda_arch_ptx "")
|
||||
endif()
|
||||
else()
|
||||
if(${CUDA_VERSION} VERSION_LESS "5.0")
|
||||
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0")
|
||||
elseif(${CUDA_VERSION} VERSION_GREATER "6.5")
|
||||
set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5")
|
||||
else()
|
||||
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0 3.5")
|
||||
endif()
|
||||
|
@ -107,8 +107,10 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
|
||||
set(X86_64 1)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*")
|
||||
set(X86 1)
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "arm.*|ARM.*")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
|
||||
set(ARM 1)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
|
||||
set(AARCH64 1)
|
||||
endif()
|
||||
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
# OPENCV_MODULE_${the_module}_IS_PART_OF_WORLD
|
||||
# OPENCV_MODULE_${the_module}_CUDA_OBJECTS - compiled CUDA objects list
|
||||
# OPENCV_MODULE_${the_module}_CHILDREN - list of submodules for compound modules (cmake >= 2.8.8)
|
||||
# OPENCV_MODULE_${the_module}_WRAPPERS - list of wrappers supporting this module
|
||||
# HAVE_${the_module} - for fast check of module availability
|
||||
|
||||
# To control the setup of the module you could also set:
|
||||
@ -60,6 +61,7 @@ foreach(mod ${OPENCV_MODULES_BUILD} ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MOD
|
||||
unset(OPENCV_MODULE_${mod}_PRIVATE_REQ_DEPS CACHE)
|
||||
unset(OPENCV_MODULE_${mod}_PRIVATE_OPT_DEPS CACHE)
|
||||
unset(OPENCV_MODULE_${mod}_LINK_DEPS CACHE)
|
||||
unset(OPENCV_MODULE_${mod}_WRAPPERS CACHE)
|
||||
endforeach()
|
||||
|
||||
# clean modules info which needs to be recalculated
|
||||
@ -72,7 +74,7 @@ unset(OPENCV_WORLD_MODULES CACHE)
|
||||
|
||||
# adds dependencies to OpenCV module
|
||||
# Usage:
|
||||
# add_dependencies(opencv_<name> [REQUIRED] [<list of dependencies>] [OPTIONAL <list of modules>])
|
||||
# add_dependencies(opencv_<name> [REQUIRED] [<list of dependencies>] [OPTIONAL <list of modules>] [WRAP <list of wrappers>])
|
||||
# Notes:
|
||||
# * <list of dependencies> - can include full names of modules or full pathes to shared/static libraries or cmake targets
|
||||
macro(ocv_add_dependencies full_modname)
|
||||
@ -87,16 +89,28 @@ macro(ocv_add_dependencies full_modname)
|
||||
set(__depsvar OPENCV_MODULE_${full_modname}_PRIVATE_REQ_DEPS)
|
||||
elseif(d STREQUAL "PRIVATE_OPTIONAL")
|
||||
set(__depsvar OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS)
|
||||
elseif(d STREQUAL "WRAP")
|
||||
set(__depsvar OPENCV_MODULE_${full_modname}_WRAPPERS)
|
||||
else()
|
||||
list(APPEND ${__depsvar} "${d}")
|
||||
endif()
|
||||
endforeach()
|
||||
unset(__depsvar)
|
||||
|
||||
# hack for python
|
||||
set(__python_idx)
|
||||
list(FIND OPENCV_MODULE_${full_modname}_WRAPPERS "python" __python_idx)
|
||||
if (NOT __python_idx EQUAL -1)
|
||||
list(REMOVE_ITEM OPENCV_MODULE_${full_modname}_WRAPPERS "python")
|
||||
list(APPEND OPENCV_MODULE_${full_modname}_WRAPPERS "python2" "python3")
|
||||
endif()
|
||||
unset(__python_idx)
|
||||
|
||||
ocv_list_unique(OPENCV_MODULE_${full_modname}_REQ_DEPS)
|
||||
ocv_list_unique(OPENCV_MODULE_${full_modname}_OPT_DEPS)
|
||||
ocv_list_unique(OPENCV_MODULE_${full_modname}_PRIVATE_REQ_DEPS)
|
||||
ocv_list_unique(OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS)
|
||||
ocv_list_unique(OPENCV_MODULE_${full_modname}_WRAPPERS)
|
||||
|
||||
set(OPENCV_MODULE_${full_modname}_REQ_DEPS ${OPENCV_MODULE_${full_modname}_REQ_DEPS}
|
||||
CACHE INTERNAL "Required dependencies of ${full_modname} module")
|
||||
@ -106,11 +120,13 @@ macro(ocv_add_dependencies full_modname)
|
||||
CACHE INTERNAL "Required private dependencies of ${full_modname} module")
|
||||
set(OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS ${OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS}
|
||||
CACHE INTERNAL "Optional private dependencies of ${full_modname} module")
|
||||
set(OPENCV_MODULE_${full_modname}_WRAPPERS ${OPENCV_MODULE_${full_modname}_WRAPPERS}
|
||||
CACHE INTERNAL "List of wrappers supporting module ${full_modname}")
|
||||
endmacro()
|
||||
|
||||
# declare new OpenCV module in current folder
|
||||
# Usage:
|
||||
# ocv_add_module(<name> [INTERNAL|BINDINGS] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>])
|
||||
# ocv_add_module(<name> [INTERNAL|BINDINGS] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>] [WRAP <list of wrappers>])
|
||||
# Example:
|
||||
# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cudev)
|
||||
macro(ocv_add_module _name)
|
||||
@ -181,6 +197,11 @@ macro(ocv_add_module _name)
|
||||
# add submodules if any
|
||||
set(OPENCV_MODULE_${the_module}_CHILDREN "${OPENCV_MODULE_CHILDREN}" CACHE INTERNAL "List of ${the_module} submodules")
|
||||
|
||||
# add reverse wrapper dependencies
|
||||
foreach (wrapper ${OPENCV_MODULE_${the_module}_WRAPPERS})
|
||||
ocv_add_dependencies(opencv_${wrapper} OPTIONAL ${the_module})
|
||||
endforeach()
|
||||
|
||||
# stop processing of current file
|
||||
return()
|
||||
else()
|
||||
@ -796,7 +817,7 @@ endmacro()
|
||||
# short command for adding simple OpenCV module
|
||||
# see ocv_add_module for argument details
|
||||
# Usage:
|
||||
# ocv_define_module(module_name [INTERNAL] [EXCLUDE_CUDA] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>])
|
||||
# ocv_define_module(module_name [INTERNAL] [EXCLUDE_CUDA] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>] [WRAP <list of wrappers>])
|
||||
macro(ocv_define_module module_name)
|
||||
ocv_debug_message("ocv_define_module(" ${module_name} ${ARGN} ")")
|
||||
set(_argn ${ARGN})
|
||||
|
15452
data/haarcascades_cuda/haarcascade_eye.xml
Normal file
33158
data/haarcascades_cuda/haarcascade_eye_tree_eyeglasses.xml
Normal file
26161
data/haarcascades_cuda/haarcascade_frontalface_alt.xml
Normal file
23550
data/haarcascades_cuda/haarcascade_frontalface_alt2.xml
Normal file
103493
data/haarcascades_cuda/haarcascade_frontalface_alt_tree.xml
Normal file
35712
data/haarcascades_cuda/haarcascade_frontalface_default.xml
Normal file
18118
data/haarcascades_cuda/haarcascade_fullbody.xml
Normal file
9803
data/haarcascades_cuda/haarcascade_lefteye_2splits.xml
Normal file
15085
data/haarcascades_cuda/haarcascade_lowerbody.xml
Normal file
31930
data/haarcascades_cuda/haarcascade_profileface.xml
Normal file
9833
data/haarcascades_cuda/haarcascade_righteye_2splits.xml
Normal file
8353
data/haarcascades_cuda/haarcascade_smile.xml
Normal file
29767
data/haarcascades_cuda/haarcascade_upperbody.xml
Normal file
@ -30,7 +30,7 @@ import cv2
|
||||
im = cv2.imread('test.jpg')
|
||||
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
|
||||
ret,thresh = cv2.threshold(imgray,127,255,0)
|
||||
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
@endcode
|
||||
See, there are three arguments in **cv2.findContours()** function, first one is source image, second
|
||||
is contour retrieval mode, third is contour approximation method. And it outputs the contours and
|
||||
|
BIN
doc/tutorials/imgproc/images/Morphology_3_Tutorial_Cover.jpg
Normal file
After Width: | Height: | Size: 6.5 KiB |
After Width: | Height: | Size: 7.5 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/binary.png
Normal file
After Width: | Height: | Size: 3.2 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/gray.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/horiz.png
Normal file
After Width: | Height: | Size: 767 B |
After Width: | Height: | Size: 1.2 KiB |
After Width: | Height: | Size: 1.5 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/morph12.gif
Normal file
After Width: | Height: | Size: 2.0 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/morph21.gif
Normal file
After Width: | Height: | Size: 2.4 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/morph211.png
Normal file
After Width: | Height: | Size: 3.3 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/morph6.gif
Normal file
After Width: | Height: | Size: 3.9 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/morph61.png
Normal file
After Width: | Height: | Size: 5.9 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/smooth.png
Normal file
After Width: | Height: | Size: 4.1 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/src.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
doc/tutorials/imgproc/morph_lines_detection/images/vert.png
Normal file
After Width: | Height: | Size: 2.3 KiB |
@ -0,0 +1,86 @@
|
||||
Extract horizontal and vertical lines by using morphological operations {#tutorial_moprh_lines_detection}
|
||||
=============
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
In this tutorial you will learn how to:
|
||||
|
||||
- Apply two very common morphology operators (i.e. Dilation and Erosion), with the creation of custom kernels, in order to extract straight lines on the horizontal and vertical axes. For this purpose, you will use the following OpenCV functions:
|
||||
- @ref cv::erode
|
||||
- @ref cv::dilate
|
||||
- @ref cv::getStructuringElement
|
||||
|
||||
in an example where your goal will be to extract the music notes from a music sheet.
|
||||
|
||||
Theory
|
||||
------
|
||||
|
||||
### Morphology Operations
|
||||
Morphology is a set of image processing operations that process images based on predefined *structuring elements* known also as kernels. The value of each pixel in the output image is based on a comparison of the corresponding pixel in the input image with its neighbors. By choosing the size and shape of the kernel, you can construct a morphological operation that is sensitive to specific shapes regarding the input image.
|
||||
|
||||
Two of the most basic morphological operations are dilation and erosion. Dilation adds pixels to the boundaries of the object in an image, while erosion does exactly the opposite. The amount of pixels added or removed, respectively depends on the size and shape of the structuring element used to process the image. In general the rules followed from these two operations have as follows:
|
||||
|
||||
- __Dilation__: The value of the output pixel is the <b><em>maximum</em></b> value of all the pixels that fall within the structuring element's size and shape. For example in a binary image, if any of the pixels of the input image falling within the range of the kernel is set to the value 1, the corresponding pixel of the output image will be set to 1 as well. The latter applies to any type of image (e.g. grayscale, rgb, etc).
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
- __Erosion__: The vise versa applies for the erosion operation. The value of the output pixel is the <b><em>minimum</em></b> value of all the pixels that fall within the structuring element's size and shape. Look the at the example figures below:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
### Structuring Elements
|
||||
|
||||
As it can be seen above and in general in any morphological operation the structuring element used to probe the input image, is the most important part.
|
||||
|
||||
A structuring element is a matrix consisting of only 0's and 1's that can have any arbitrary shape and size. Typically are much smaller than the image being processed, while the pixels with values of 1 define the neighborhood. The center pixel of the structuring element, called the origin, identifies the pixel of interest -- the pixel being processed.
|
||||
|
||||
For example, the following illustrates a diamond-shaped structuring element of 7x7 size.
|
||||
|
||||

|
||||
|
||||
A structuring element can have many common shapes, such as lines, diamonds, disks, periodic lines, and circles and sizes. You typically choose a structuring element the same size and shape as the objects you want to process/extract in the input image. For example, to find lines in an image, create a linear structuring element as you will see later.
|
||||
|
||||
Code
|
||||
----
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from [here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp).
|
||||
@includelineno samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp
|
||||
|
||||
Explanation / Result
|
||||
--------------------
|
||||
|
||||
-# Load the source image and check if it is loaded without any problem, then show it:
|
||||
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp load_image
|
||||

|
||||
|
||||
-# Then transform image to grayscale if it not already:
|
||||
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp gray
|
||||

|
||||
|
||||
-# Afterwards transform grayscale image to binary. Notice the ~ symbol which indicates that we use the inverse (i.e. bitwise_not) version of it:
|
||||
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp bin
|
||||

|
||||
|
||||
-# Now we are ready to apply morphological operations in order to extract the horizontal and vertical lines and as a consequence to separate the the music notes from the music sheet, but first let's initialize the output images that we will use for that reason:
|
||||
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp init
|
||||
|
||||
-# As we specified in the theory in order to extract the object that we desire, we need to create the corresponding structure element. Since here we want to extract the horizontal lines, a corresponding structure element for that purpose will have the following shape:
|
||||

|
||||
and in the source code this is represented by the following code snippet:
|
||||
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp horiz
|
||||

|
||||
|
||||
-# The same applies for the vertical lines, with the corresponding structure element:
|
||||

|
||||
and again this is represented as follows:
|
||||
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp vert
|
||||

|
||||
|
||||
-# As you can see we are almost there. However, at that point you will notice that the edges of the notes are a bit rough. For that reason we need to refine the edges in order to obtain a smoother result:
|
||||
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp smooth
|
||||

|
@ -27,6 +27,14 @@ In this section you will learn about the image processing (manipulation) functio
|
||||
|
||||
Here we investigate different morphology operators
|
||||
|
||||
- @subpage tutorial_moprh_lines_detection
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Theodore Tsesmelis
|
||||
|
||||
Here we will show how we can use different morphology operators to extract horizontal and vertical lines
|
||||
|
||||
- @subpage tutorial_pyramids
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
BIN
doc/tutorials/ml/images/introduction_to_pca_cover.png
Normal file
After Width: | Height: | Size: 3.4 KiB |
BIN
doc/tutorials/ml/introduction_to_pca/images/output.png
Normal file
After Width: | Height: | Size: 203 KiB |
BIN
doc/tutorials/ml/introduction_to_pca/images/pca_eigen.png
Normal file
After Width: | Height: | Size: 8.2 KiB |
BIN
doc/tutorials/ml/introduction_to_pca/images/pca_line.png
Normal file
After Width: | Height: | Size: 8.6 KiB |
BIN
doc/tutorials/ml/introduction_to_pca/images/pca_test1.jpg
Normal file
After Width: | Height: | Size: 32 KiB |
@ -0,0 +1,133 @@
|
||||
Introduction to Principal Component Analysis (PCA) {#tutorial_introduction_to_pca}
|
||||
=======================================
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
In this tutorial you will learn how to:
|
||||
|
||||
- Use the OpenCV class @ref cv::PCA to calculate the orientation of an object.
|
||||
|
||||
What is PCA?
|
||||
--------------
|
||||
|
||||
Principal Component Analysis (PCA) is a statistical procedure that extracts the most important features of a dataset.
|
||||
|
||||

|
||||
|
||||
Consider that you have a set of 2D points as it is shown in the figure above. Each dimension corresponds to a feature you are interested in. Here some could argue that the points are set in a random order. However, if you have a better look you will see that there is a linear pattern (indicated by the blue line) which is hard to dismiss. A key point of PCA is the Dimensionality Reduction. Dimensionality Reduction is the process of reducing the number of the dimensions of the given dataset. For example, in the above case it is possible to approximate the set of points to a single line and therefore, reduce the dimensionality of the given points from 2D to 1D.
|
||||
|
||||
Moreover, you could also see that the points vary the most along the blue line, more than they vary along the Feature 1 or Feature 2 axes. This means that if you know the position of a point along the blue line you have more information about the point than if you only knew where it was on Feature 1 axis or Feature 2 axis.
|
||||
|
||||
Hence, PCA allows us to find the direction along which our data varies the most. In fact, the result of running PCA on the set of points in the diagram consist of 2 vectors called _eigenvectors_ which are the _principal components_ of the data set.
|
||||
|
||||

|
||||
|
||||
The size of each eigenvector is encoded in the corresponding eigenvalue and indicates how much the data vary along the principal component. The beginning of the eigenvectors is the center of all points in the data set. Applying PCA to N-dimensional data set yields N N-dimensional eigenvectors, N eigenvalues and 1 N-dimensional center point. Enough theory, let’s see how we can put these ideas into code.
|
||||
|
||||
How are the eigenvectors and eigenvalues computed?
|
||||
--------------------------------------------------
|
||||
|
||||
The goal is to transform a given data set __X__ of dimension _p_ to an alternative data set __Y__ of smaller dimension _L_. Equivalently, we are seeking to find the matrix __Y__, where __Y__ is the _Karhunen–Loève transform_ (KLT) of matrix __X__:
|
||||
|
||||
\f[ \mathbf{Y} = \mathbb{K} \mathbb{L} \mathbb{T} \{\mathbf{X}\} \f]
|
||||
|
||||
__Organize the data set__
|
||||
|
||||
Suppose you have data comprising a set of observations of _p_ variables, and you want to reduce the data so that each observation can be described with only _L_ variables, _L_ < _p_. Suppose further, that the data are arranged as a set of _n_ data vectors \f$ x_1...x_n \f$ with each \f$ x_i \f$ representing a single grouped observation of the _p_ variables.
|
||||
|
||||
- Write \f$ x_1...x_n \f$ as row vectors, each of which has _p_ columns.
|
||||
- Place the row vectors into a single matrix __X__ of dimensions \f$ n\times p \f$.
|
||||
|
||||
__Calculate the empirical mean__
|
||||
|
||||
- Find the empirical mean along each dimension \f$ j = 1, ..., p \f$.
|
||||
|
||||
- Place the calculated mean values into an empirical mean vector __u__ of dimensions \f$ p\times 1 \f$.
|
||||
|
||||
\f[ \mathbf{u[j]} = \frac{1}{n}\sum_{i=1}^{n}\mathbf{X[i,j]} \f]
|
||||
|
||||
__Calculate the deviations from the mean__
|
||||
|
||||
Mean subtraction is an integral part of the solution towards finding a principal component basis that minimizes the mean square error of approximating the data. Hence, we proceed by centering the data as follows:
|
||||
|
||||
- Subtract the empirical mean vector __u__ from each row of the data matrix __X__.
|
||||
|
||||
- Store mean-subtracted data in the \f$ n\times p \f$ matrix __B__.
|
||||
|
||||
\f[ \mathbf{B} = \mathbf{X} - \mathbf{h}\mathbf{u^{T}} \f]
|
||||
|
||||
where __h__ is an \f$ n\times 1 \f$ column vector of all 1s:
|
||||
|
||||
\f[ h[i] = 1, i = 1, ..., n \f]
|
||||
|
||||
__Find the covariance matrix__
|
||||
|
||||
- Find the \f$ p\times p \f$ empirical covariance matrix __C__ from the outer product of matrix __B__ with itself:
|
||||
|
||||
\f[ \mathbf{C} = \frac{1}{n-1} \mathbf{B^{*}} \cdot \mathbf{B} \f]
|
||||
|
||||
where * is the conjugate transpose operator. Note that if B consists entirely of real numbers, which is the case in many applications, the "conjugate transpose" is the same as the regular transpose.
|
||||
|
||||
__Find the eigenvectors and eigenvalues of the covariance matrix__
|
||||
|
||||
- Compute the matrix __V__ of eigenvectors which diagonalizes the covariance matrix __C__:
|
||||
|
||||
\f[ \mathbf{V^{-1}} \mathbf{C} \mathbf{V} = \mathbf{D} \f]
|
||||
|
||||
where __D__ is the diagonal matrix of eigenvalues of __C__.
|
||||
|
||||
- Matrix __D__ will take the form of an \f$ p \times p \f$ diagonal matrix:
|
||||
|
||||
\f[ D[k,l] = \left\{\begin{matrix} \lambda_k, k = l \\ 0, k \neq l \end{matrix}\right. \f]
|
||||
|
||||
here, \f$ \lambda_j \f$ is the _j_-th eigenvalue of the covariance matrix __C__
|
||||
|
||||
- Matrix __V__, also of dimension _p_ x _p_, contains _p_ column vectors, each of length _p_, which represent the _p_ eigenvectors of the covariance matrix __C__.
|
||||
- The eigenvalues and eigenvectors are ordered and paired. The _j_ th eigenvalue corresponds to the _j_ th eigenvector.
|
||||
|
||||
@note sources [[1]](https://robospace.wordpress.com/2013/10/09/object-orientation-principal-component-analysis-opencv/), [[2]](http://en.wikipedia.org/wiki/Principal_component_analysis) and special thanks to Svetlin Penkov for the original tutorial.
|
||||
|
||||
Source Code
|
||||
-----------
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp).
|
||||
@includelineno cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp
|
||||
|
||||
@note Another example using PCA for dimensionality reduction while maintaining an amount of variance can be found at [opencv_source_code/samples/cpp/pca.cpp](https://github.com/Itseez/opencv/tree/master/samples/cpp/pca.cpp)
|
||||
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
-# __Read image and convert it to binary__
|
||||
|
||||
Here we apply the necessary pre-processing procedures in order to be able to detect the objects of interest.
|
||||
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp pre-process
|
||||
|
||||
-# __Extract objects of interest__
|
||||
|
||||
Then find and filter contours by size and obtain the orientation of the remaining ones.
|
||||
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp contours
|
||||
|
||||
-# __Extract orientation__
|
||||
|
||||
Orientation is extracted by the call of getOrientation() function, which performs all the PCA procedure.
|
||||
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp pca
|
||||
|
||||
First the data need to be arranged in a matrix with size n x 2, where n is the number of data points we have. Then we can perform that PCA analysis. The calculated mean (i.e. center of mass) is stored in the _cntr_ variable and the eigenvectors and eigenvalues are stored in the corresponding std::vector’s.
|
||||
|
||||
-# __Visualize result__
|
||||
|
||||
The final result is visualized through the drawAxis() function, where the principal components are drawn in lines, and each eigenvector is multiplied by its eigenvalue and translated to the mean position.
|
||||
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp visualization
|
||||
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp visualization1
|
||||
|
||||
Results
|
||||
-------
|
||||
|
||||
The code opens an image, finds the orientation of the detected objects of interest and then visualizes the result by drawing the contours of the detected objects of interest, the center point, and the x-axis, y-axis regarding the extracted orientation.
|
||||
|
||||

|
||||
|
||||

|
@ -1,7 +1,7 @@
|
||||
Machine Learning (ml module) {#tutorial_table_of_content_ml}
|
||||
============================
|
||||
|
||||
Use the powerfull machine learning classes for statistical classification, regression and clustering
|
||||
Use the powerful machine learning classes for statistical classification, regression and clustering
|
||||
of data.
|
||||
|
||||
- @subpage tutorial_introduction_to_svm
|
||||
@ -20,3 +20,11 @@ of data.
|
||||
|
||||
Here you will learn how to define the optimization problem for SVMs when it is not possible to
|
||||
separate linearly the training data.
|
||||
|
||||
- @subpage tutorial_introduction_to_pca
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Theodore Tsesmelis
|
||||
|
||||
Learn what a Principal Component Analysis (PCA) is.
|
||||
|
@ -1,2 +1,2 @@
|
||||
set(the_description "Camera Calibration and 3D Reconstruction")
|
||||
ocv_define_module(calib3d opencv_imgproc opencv_features2d)
|
||||
ocv_define_module(calib3d opencv_imgproc opencv_features2d WRAP java python)
|
||||
|
@ -69,7 +69,7 @@ void drawPoints(const std::vector<Point2f> &points, Mat &outImage, int radius =
|
||||
void CirclesGridClusterFinder::hierarchicalClustering(const std::vector<Point2f> &points, const Size &patternSz, std::vector<Point2f> &patternPoints)
|
||||
{
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if(tegra::hierarchicalClustering(points, patternSz, patternPoints))
|
||||
if(tegra::useTegra() && tegra::hierarchicalClustering(points, patternSz, patternPoints))
|
||||
return;
|
||||
#endif
|
||||
int j, n = (int)points.size();
|
||||
|
@ -504,7 +504,7 @@ private:
|
||||
H[n1][n1 - 1] = 0.0;
|
||||
H[n1][n1] = 1.0;
|
||||
for (int i = n1 - 2; i >= 0; i--) {
|
||||
double ra, sa, vr, vi;
|
||||
double ra, sa;
|
||||
ra = 0.0;
|
||||
sa = 0.0;
|
||||
for (int j = l; j <= n1; j++) {
|
||||
@ -529,8 +529,8 @@ private:
|
||||
|
||||
x = H[i][i + 1];
|
||||
y = H[i + 1][i];
|
||||
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
|
||||
vi = (d[i] - p) * 2.0 * q;
|
||||
double vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
|
||||
double vi = (d[i] - p) * 2.0 * q;
|
||||
if (vr == 0.0 && vi == 0.0) {
|
||||
vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x)
|
||||
+ std::abs(y) + std::abs(z));
|
||||
|
@ -116,7 +116,7 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
|
||||
{
|
||||
int i, j, k;
|
||||
int count = 0, converged = 0;
|
||||
float inorm, jnorm, invInorm, invJnorm, invScale, scale = 0, inv_Z = 0;
|
||||
float scale = 0, inv_Z = 0;
|
||||
float diff = (float)criteria.epsilon;
|
||||
|
||||
/* Check bad arguments */
|
||||
@ -195,16 +195,18 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
|
||||
}
|
||||
}
|
||||
|
||||
inorm = rotation[0] /*[0][0]*/ * rotation[0] /*[0][0]*/ +
|
||||
float inorm =
|
||||
rotation[0] /*[0][0]*/ * rotation[0] /*[0][0]*/ +
|
||||
rotation[1] /*[0][1]*/ * rotation[1] /*[0][1]*/ +
|
||||
rotation[2] /*[0][2]*/ * rotation[2] /*[0][2]*/;
|
||||
|
||||
jnorm = rotation[3] /*[1][0]*/ * rotation[3] /*[1][0]*/ +
|
||||
float jnorm =
|
||||
rotation[3] /*[1][0]*/ * rotation[3] /*[1][0]*/ +
|
||||
rotation[4] /*[1][1]*/ * rotation[4] /*[1][1]*/ +
|
||||
rotation[5] /*[1][2]*/ * rotation[5] /*[1][2]*/;
|
||||
|
||||
invInorm = cvInvSqrt( inorm );
|
||||
invJnorm = cvInvSqrt( jnorm );
|
||||
const float invInorm = cvInvSqrt( inorm );
|
||||
const float invJnorm = cvInvSqrt( jnorm );
|
||||
|
||||
inorm *= invInorm;
|
||||
jnorm *= invJnorm;
|
||||
@ -234,7 +236,7 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
|
||||
converged = ((criteria.type & CV_TERMCRIT_EPS) && (diff < criteria.epsilon));
|
||||
converged |= ((criteria.type & CV_TERMCRIT_ITER) && (count == criteria.max_iter));
|
||||
}
|
||||
invScale = 1 / scale;
|
||||
const float invScale = 1 / scale;
|
||||
translation[0] = imagePoints[0].x * invScale;
|
||||
translation[1] = imagePoints[0].y * invScale;
|
||||
translation[2] = 1 / inv_Z;
|
||||
@ -266,8 +268,6 @@ static CvStatus icvReleasePOSITObject( CvPOSITObject ** ppObject )
|
||||
void
|
||||
icvPseudoInverse3D( float *a, float *b, int n, int method )
|
||||
{
|
||||
int k;
|
||||
|
||||
if( method == 0 )
|
||||
{
|
||||
float ata00 = 0;
|
||||
@ -276,8 +276,8 @@ icvPseudoInverse3D( float *a, float *b, int n, int method )
|
||||
float ata01 = 0;
|
||||
float ata02 = 0;
|
||||
float ata12 = 0;
|
||||
float det = 0;
|
||||
|
||||
int k;
|
||||
/* compute matrix ata = transpose(a) * a */
|
||||
for( k = 0; k < n; k++ )
|
||||
{
|
||||
@ -295,7 +295,6 @@ icvPseudoInverse3D( float *a, float *b, int n, int method )
|
||||
}
|
||||
/* inverse matrix ata */
|
||||
{
|
||||
float inv_det;
|
||||
float p00 = ata11 * ata22 - ata12 * ata12;
|
||||
float p01 = -(ata01 * ata22 - ata12 * ata02);
|
||||
float p02 = ata12 * ata01 - ata11 * ata02;
|
||||
@ -304,11 +303,12 @@ icvPseudoInverse3D( float *a, float *b, int n, int method )
|
||||
float p12 = -(ata00 * ata12 - ata01 * ata02);
|
||||
float p22 = ata00 * ata11 - ata01 * ata01;
|
||||
|
||||
float det = 0;
|
||||
det += ata00 * p00;
|
||||
det += ata01 * p01;
|
||||
det += ata02 * p02;
|
||||
|
||||
inv_det = 1 / det;
|
||||
const float inv_det = 1 / det;
|
||||
|
||||
/* compute resultant matrix */
|
||||
for( k = 0; k < n; k++ )
|
||||
|
@ -1,5 +1,7 @@
|
||||
set(the_description "The Core Functionality")
|
||||
ocv_add_module(core PRIVATE_REQUIRED ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}" OPTIONAL opencv_cudev)
|
||||
ocv_add_module(core PRIVATE_REQUIRED ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}"
|
||||
OPTIONAL opencv_cudev
|
||||
WRAP java python)
|
||||
|
||||
if(HAVE_WINRT_CX)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW")
|
||||
|
@ -191,7 +191,7 @@
|
||||
# include "arm_neon.h"
|
||||
# define CV_NEON 1
|
||||
# define CPU_HAS_NEON_FEATURE (true)
|
||||
#elif defined(__ARM_NEON__)
|
||||
#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))
|
||||
# include <arm_neon.h>
|
||||
# define CV_NEON 1
|
||||
#endif
|
||||
|
@ -269,6 +269,15 @@ typedef enum CvStatus
|
||||
}
|
||||
CvStatus;
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
namespace tegra {
|
||||
|
||||
CV_EXPORTS bool useTegra();
|
||||
CV_EXPORTS void setUseTegra(bool flag);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
//! @endcond
|
||||
|
||||
#endif // __OPENCV_CORE_PRIVATE_HPP__
|
||||
|
@ -1,4 +1,4 @@
|
||||
include/opencv2/core/base.hpp
|
||||
include/opencv2/core.hpp
|
||||
include/opencv2/core/utility.hpp
|
||||
../java/generator/src/cpp/core_manual.hpp
|
||||
misc/java/src/cpp/core_manual.hpp
|
@ -1,6 +1,6 @@
|
||||
#define LOG_TAG "org.opencv.core.Core"
|
||||
#include "common.h"
|
||||
|
||||
#include "core_manual.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
|
||||
static int quietCallback( int, const char*, const char*, const char*, int, void* )
|
||||
@ -8,10 +8,14 @@ static int quietCallback( int, const char*, const char*, const char*, int, void*
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cv::setErrorVerbosity(bool verbose)
|
||||
namespace cv {
|
||||
|
||||
void setErrorVerbosity(bool verbose)
|
||||
{
|
||||
if(verbose)
|
||||
cv::redirectError(0);
|
||||
else
|
||||
cv::redirectError((cv::ErrorCallback)quietCallback);
|
||||
}
|
||||
|
||||
}
|
@ -2256,51 +2256,54 @@ void cv::subtract( InputArray _src1, InputArray _src2, OutputArray _dst,
|
||||
InputArray mask, int dtype )
|
||||
{
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
int kind1 = _src1.kind(), kind2 = _src2.kind();
|
||||
Mat src1 = _src1.getMat(), src2 = _src2.getMat();
|
||||
bool src1Scalar = checkScalar(src1, _src2.type(), kind1, kind2);
|
||||
bool src2Scalar = checkScalar(src2, _src1.type(), kind2, kind1);
|
||||
|
||||
if (!src1Scalar && !src2Scalar &&
|
||||
src1.depth() == CV_8U && src2.type() == src1.type() &&
|
||||
src1.dims == 2 && src2.size() == src1.size() &&
|
||||
mask.empty())
|
||||
if (tegra::useTegra())
|
||||
{
|
||||
if (dtype < 0)
|
||||
int kind1 = _src1.kind(), kind2 = _src2.kind();
|
||||
Mat src1 = _src1.getMat(), src2 = _src2.getMat();
|
||||
bool src1Scalar = checkScalar(src1, _src2.type(), kind1, kind2);
|
||||
bool src2Scalar = checkScalar(src2, _src1.type(), kind2, kind1);
|
||||
|
||||
if (!src1Scalar && !src2Scalar &&
|
||||
src1.depth() == CV_8U && src2.type() == src1.type() &&
|
||||
src1.dims == 2 && src2.size() == src1.size() &&
|
||||
mask.empty())
|
||||
{
|
||||
if (_dst.fixedType())
|
||||
if (dtype < 0)
|
||||
{
|
||||
dtype = _dst.depth();
|
||||
if (_dst.fixedType())
|
||||
{
|
||||
dtype = _dst.depth();
|
||||
}
|
||||
else
|
||||
{
|
||||
dtype = src1.depth();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
dtype = src1.depth();
|
||||
}
|
||||
}
|
||||
|
||||
dtype = CV_MAT_DEPTH(dtype);
|
||||
dtype = CV_MAT_DEPTH(dtype);
|
||||
|
||||
if (!_dst.fixedType() || dtype == _dst.depth())
|
||||
{
|
||||
_dst.create(src1.size(), CV_MAKE_TYPE(dtype, src1.channels()));
|
||||
if (!_dst.fixedType() || dtype == _dst.depth())
|
||||
{
|
||||
_dst.create(src1.size(), CV_MAKE_TYPE(dtype, src1.channels()));
|
||||
|
||||
if (dtype == CV_16S)
|
||||
{
|
||||
Mat dst = _dst.getMat();
|
||||
if(tegra::subtract_8u8u16s(src1, src2, dst))
|
||||
return;
|
||||
}
|
||||
else if (dtype == CV_32F)
|
||||
{
|
||||
Mat dst = _dst.getMat();
|
||||
if(tegra::subtract_8u8u32f(src1, src2, dst))
|
||||
return;
|
||||
}
|
||||
else if (dtype == CV_8S)
|
||||
{
|
||||
Mat dst = _dst.getMat();
|
||||
if(tegra::subtract_8u8u8s(src1, src2, dst))
|
||||
return;
|
||||
if (dtype == CV_16S)
|
||||
{
|
||||
Mat dst = _dst.getMat();
|
||||
if(tegra::subtract_8u8u16s(src1, src2, dst))
|
||||
return;
|
||||
}
|
||||
else if (dtype == CV_32F)
|
||||
{
|
||||
Mat dst = _dst.getMat();
|
||||
if(tegra::subtract_8u8u32f(src1, src2, dst))
|
||||
return;
|
||||
}
|
||||
else if (dtype == CV_8S)
|
||||
{
|
||||
Mat dst = _dst.getMat();
|
||||
if(tegra::subtract_8u8u8s(src1, src2, dst))
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ static void FastAtan2_32f(const float *Y, const float *X, float *angle, int len,
|
||||
float scale = angleInDegrees ? 1 : (float)(CV_PI/180);
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if (tegra::FastAtan2_32f(Y, X, angle, len, scale))
|
||||
if (tegra::useTegra() && tegra::FastAtan2_32f(Y, X, angle, len, scale))
|
||||
return;
|
||||
#endif
|
||||
|
||||
|
@ -236,6 +236,9 @@ struct CoreTLSData
|
||||
{
|
||||
CoreTLSData() : device(0), useOpenCL(-1), useIPP(-1), useCollection(false)
|
||||
{
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
useTegra = -1;
|
||||
#endif
|
||||
#ifdef CV_COLLECT_IMPL_DATA
|
||||
implFlags = 0;
|
||||
#endif
|
||||
@ -246,6 +249,9 @@ struct CoreTLSData
|
||||
ocl::Queue oclQueue;
|
||||
int useOpenCL; // 1 - use, 0 - do not use, -1 - auto/not initialized
|
||||
int useIPP; // 1 - use, 0 - do not use, -1 - auto/not initialized
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
int useTegra; // 1 - use, 0 - do not use, -1 - auto/not initialized
|
||||
#endif
|
||||
bool useCollection; // enable/disable impl data collection
|
||||
|
||||
#ifdef CV_COLLECT_IMPL_DATA
|
||||
|
@ -517,9 +517,11 @@ static const uchar * initPopcountTable()
|
||||
unsigned int j = 0u;
|
||||
#if CV_POPCNT
|
||||
if (checkHardwareSupport(CV_CPU_POPCNT))
|
||||
{
|
||||
for( ; j < 256u; j++ )
|
||||
tab[j] = (uchar)(8 - _mm_popcnt_u32(j));
|
||||
#else
|
||||
}
|
||||
#endif
|
||||
for( ; j < 256u; j++ )
|
||||
{
|
||||
int val = 0;
|
||||
@ -527,7 +529,6 @@ static const uchar * initPopcountTable()
|
||||
val += (j & mask) == 0;
|
||||
tab[j] = (uchar)val;
|
||||
}
|
||||
#endif
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
|