Merge branch 'master' of code.opencv.org:opencv

Conflicts:
	samples/python2/common.py
	samples/python2/feature_homography.py
	samples/python2/plane_ar.py
	samples/python2/plane_tracker.py
This commit is contained in:
Alexander Mordvintesv 2012-08-07 23:21:56 +03:00
commit 7db1f711f6
132 changed files with 9929 additions and 1613 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
*.pyc
.DS_Store
refman.rst
OpenCV4Tegra/

1
3rdparty/tbb/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
tbb*.tgz

View File

@ -140,6 +140,9 @@ OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF
OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_CLP "Include Clp support (EPL)" OFF)
OCV_OPTION(WITH_OPENCL "Include OpenCL Runtime support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_OPENCLAMDFFT "Include AMD OpenCL FFT library support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" OFF IF (NOT ANDROID AND NOT IOS) )
# OpenCV build components
# ===================================================
@ -282,25 +285,37 @@ ocv_include_directories(${OPENCV_CONFIG_FILE_INCLUDE_DIR})
# ----------------------------------------------------------------------------
# Autodetect if we are in a SVN repository
# Autodetect if we are in a GIT repository
# ----------------------------------------------------------------------------
find_host_program(SVNVERSION_PATH svnversion)
mark_as_advanced(force SVNVERSION_PATH)
if(SVNVERSION_PATH)
message(STATUS "Extracting svn version, please wait...")
execute_process(COMMAND ${SVNVERSION_PATH} -n ${OpenCV_SOURCE_DIR} OUTPUT_VARIABLE SVNVERSION_RESULT)
if(SVNVERSION_RESULT MATCHES "exported")
# This is NOT a svn repository:
set(OPENCV_SVNVERSION "")
message(STATUS "SVNVERSION: exported")
else()
set(OPENCV_SVNVERSION " svn:${SVNVERSION_RESULT}")
message(STATUS "SVNVERSION: ${OPENCV_SVNVERSION}")
endif()
# don't use FindGit because it requires CMake 2.8.2
set(git_names git eg) # eg = easy git
# Prefer .cmd variants on Windows unless running in a Makefile in the MSYS shell
if(WIN32)
if(NOT CMAKE_GENERATOR MATCHES "MSYS")
set(git_names git.cmd git eg.cmd eg)
endif()
endif()
find_host_program(GIT_EXECUTABLE NAMES ${git_names} PATH_SUFFIXES Git/cmd Git/bin DOC "git command line client")
mark_as_advanced(GIT_EXECUTABLE)
if(GIT_EXECUTABLE)
execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD
WORKING_DIRECTORY "${OpenCV_SOURCE_DIR}"
OUTPUT_VARIABLE OPENCV_GIT_HASH_SORT
RESULT_VARIABLE GIT_RESULT
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if(GIT_RESULT EQUAL 0)
set(OPENCV_VCSVERSION "commit:${OPENCV_GIT_HASH_SORT}")
else()
set(OPENCV_VCSVERSION "exported")
endif()
else()
# We don't have svnversion:
set(OPENCV_SVNVERSION "")
# We don't have git:
set(OPENCV_VCSVERSION "")
endif()
@ -396,6 +411,12 @@ if(WITH_OPENCL)
if(OPENCL_FOUND)
set(HAVE_OPENCL 1)
endif()
if(WITH_OPENCLAMDFFT)
set(HAVE_CLAMDFFT 1)
endif()
if(WITH_OPENCLAMDBLAS)
set(HAVE_CLAMDBLAS 1)
endif()
endif()
# ----------------------------------------------------------------------------
@ -465,8 +486,8 @@ include(cmake/OpenCVGenConfig.cmake REQUIRED)
# ----------------------------------------------------------------------------
status("")
status("General configuration for OpenCV ${OPENCV_VERSION} =====================================")
if(OPENCV_SVNVERSION)
status("Version control:" ${OPENCV_SVNVERSION})
if(OPENCV_VCSVERSION)
status(" Version control:" ${OPENCV_VCSVERSION})
endif()
# ========================== build platform ==========================

View File

@ -4,7 +4,7 @@
# See home page: http://code.google.com/p/android-cmake/
#
# The file is mantained by the OpenCV project. And also can be found at
# http://code.opencv.org/svn/opencv/trunk/opencv/android/android.toolchain.cmake
# http://code.opencv.org/projects/opencv/repository/revisions/master/changes/android/android.toolchain.cmake
#
# Usage Linux:
# $ export ANDROID_NDK=/absolute/path/to/the/android-ndk
@ -182,6 +182,7 @@
# [+] added mips architecture support
# - modified August 2012
# [+] updated for NDK r8b
# [~] all intermediate files generated by toolchain are moved into CMakeFiles
# ------------------------------------------------------------------------------
cmake_minimum_required( VERSION 2.6.3 )
@ -854,45 +855,48 @@ elseif( X86 )
endif()
#linker flags
list( APPEND ANDROID_SYSTEM_LIB_DIRS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}" "${CMAKE_INSTALL_PREFIX}/libs/${ANDROID_NDK_ABI_NAME}" )
if( NOT DEFINED __ndklibspath )
set( __ndklibspath "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/ndklibs/${ANDROID_NDK_ABI_NAME}" )
endif()
list( APPEND ANDROID_SYSTEM_LIB_DIRS "${__ndklibspath}" "${CMAKE_INSTALL_PREFIX}/libs/${ANDROID_NDK_ABI_NAME}" )
set( ANDROID_LINKER_FLAGS "" )
#STL
if( ANDROID_USE_STLPORT )
if( EXISTS "${__stlLibPath}/libstlport_static.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libstlport_static.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstlport_static.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libstlport_static.a" "${__ndklibspath}/libstlport_static.a" )
endif()
if( EXISTS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstlport_static.a" )
if( EXISTS "${__ndklibspath}/libstlport_static.a" )
set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,--start-group -lstlport_static" )
endif()
else( ANDROID_USE_STLPORT )
if( EXISTS "${__stlLibPath}/libgnustl_static.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libgnustl_static.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libgnustl_static.a" "${__ndklibspath}/libstdc++.a" )
elseif( ANDROID_ARCH_NAME STREQUAL "arm" AND EXISTS "${__stlLibPath}/${CMAKE_SYSTEM_PROCESSOR}/thumb/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/${CMAKE_SYSTEM_PROCESSOR}/thumb/libstdc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/${CMAKE_SYSTEM_PROCESSOR}/thumb/libstdc++.a" "${__ndklibspath}/libstdc++.a" )
elseif( ANDROID_ARCH_NAME STREQUAL "arm" AND EXISTS "${__stlLibPath}/${CMAKE_SYSTEM_PROCESSOR}/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/${CMAKE_SYSTEM_PROCESSOR}/libstdc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/${CMAKE_SYSTEM_PROCESSOR}/libstdc++.a" "${__ndklibspath}/libstdc++.a" )
elseif( ANDROID_ARCH_NAME STREQUAL "arm" AND EXISTS "${__stlLibPath}/thumb/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/thumb/libstdc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/thumb/libstdc++.a" "${__ndklibspath}/libstdc++.a" )
elseif( EXISTS "${__stlLibPath}/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libstdc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libstdc++.a" "${__ndklibspath}/libstdc++.a" )
endif()
if( EXISTS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
if( EXISTS "${__ndklibspath}/libstdc++.a" )
set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -lstdc++" )
endif()
#gcc exception & rtti support
if( EXISTS "${__stlLibPath}/libsupc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libsupc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libsupc++.a" "${__ndklibspath}/libsupc++.a" )
elseif( ANDROID_ARCH_NAME STREQUAL "arm" AND EXISTS "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/thumb/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/thumb/libsupc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/thumb/libsupc++.a" "${__ndklibspath}/libsupc++.a" )
elseif( ANDROID_ARCH_NAME STREQUAL "arm" AND EXISTS "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/libsupc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/libsupc++.a" "${__ndklibspath}/libsupc++.a" )
elseif( ANDROID_ARCH_NAME STREQUAL "arm" AND EXISTS "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb/libsupc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb/libsupc++.a" "${__ndklibspath}/libsupc++.a" )
elseif( EXISTS "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libsupc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libsupc++.a" "${__ndklibspath}/libsupc++.a" )
endif()
if( EXISTS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
if( EXISTS "${__ndklibspath}/libsupc++.a" )
set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -lsupc++" )
endif()
endif( ANDROID_USE_STLPORT )
@ -1038,13 +1042,14 @@ endmacro()
# export toolchain settings for the try_compile() command
if( NOT PROJECT_NAME STREQUAL "CMAKE_TRY_COMPILE" )
set( __toolchain_config "")
foreach( __var ANDROID_ABI ANDROID_FORCE_ARM_BUILD ANDROID_NATIVE_API_LEVEL ANDROID_NO_UNDEFINED ANDROID_SO_UNDEFINED ANDROID_SET_OBSOLETE_VARIABLES LIBRARY_OUTPUT_PATH_ROOT ANDROID_USE_STLPORT ANDROID_FORBID_SYGWIN ANDROID_NDK ANDROID_STANDALONE_TOOLCHAIN ANDROID_FUNCTION_LEVEL_LINKING )
foreach( __var ANDROID_ABI ANDROID_FORCE_ARM_BUILD ANDROID_NATIVE_API_LEVEL ANDROID_NO_UNDEFINED ANDROID_SO_UNDEFINED ANDROID_SET_OBSOLETE_VARIABLES LIBRARY_OUTPUT_PATH_ROOT ANDROID_USE_STLPORT ANDROID_FORBID_SYGWIN ANDROID_NDK ANDROID_STANDALONE_TOOLCHAIN ANDROID_FUNCTION_LEVEL_LINKING __ndklibspath )
if( DEFINED ${__var} )
set( __toolchain_config "${__toolchain_config}set( ${__var} \"${${__var}}\" )\n" )
endif()
endforeach()
file( WRITE "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/android.toolchain.config.cmake" "${__toolchain_config}" )
file( WRITE "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/android.toolchain.config.cmake" "${__toolchain_config}" )
unset( __toolchain_config )
unset( __ndklibspath )
endif()
@ -1073,6 +1078,7 @@ endif()
# Can be set only at the first run:
# ANDROID_NDK
# ANDROID_STANDALONE_TOOLCHAIN
# ANDROID_TOOLCHAIN_NAME : "arm-linux-androideabi-4.4.3" or "arm-linux-androideabi-4.6" or "mipsel-linux-android-4.4.3" or "mipsel-linux-android-4.6" or "x86-4.4.3" or "x86-4.6"
# Obsolete:
# ANDROID_API_LEVEL : superseded by ANDROID_NATIVE_API_LEVEL
# ARM_TARGET : superseded by ANDROID_ABI
@ -1105,7 +1111,6 @@ endif()
# ANDROID_COMPILER_VERSION : GCC version used
# ANDROID_CXX_FLAGS : C/C++ compiler flags required by Android platform
# ANDROID_SUPPORTED_ABIS : list of currently allowed values for ANDROID_ABI
# ANDROID_TOOLCHAIN_NAME : "standalone", "arm-linux-androideabi-4.4.3" or "x86-4.4.3" or something similar.
# ANDROID_TOOLCHAIN_MACHINE_NAME : "arm-linux-androideabi", "arm-eabi" or "i686-android-linux"
# ANDROID_TOOLCHAIN_ROOT : path to the top level of toolchain (standalone or placed inside NDK)
# ANDROID_SUPPORTED_NATIVE_API_LEVELS : list of native API levels found inside NDK

View File

@ -1,125 +0,0 @@
#!/bin/sh
cd `dirname $0`/..
ANDROID_DIR=`pwd`
rm -rf package
mkdir -p package
cd package
PRG_DIR=`pwd`
mkdir opencv
# neon-enabled build
#cd $PRG_DIR
#mkdir build-neon
#cd build-neon
#cmake -DANDROID_ABI="armeabi-v7a with NEON" -DBUILD_DOCS=OFF -DBUILD_TESTS=OFF -DBUILD_EXAMPLES=OFF -DBUILD_ANDROID_EXAMPLES=OFF -DCMAKE_TOOLCHAIN_FILE="$ANDROID_DIR/android.toolchain.cmake" -DCMAKE_INSTALL_PREFIX="$PRG_DIR/opencv" "$ANDROID_DIR/.." || exit 1
#make -j8 install/strip || exit 1
#cd "$PRG_DIR/opencv"
#rm -rf doc include src .classpath .project AndroidManifest.xml default.properties share/OpenCV/haarcascades share/OpenCV/lbpcascades share/OpenCV/*.cmake share/OpenCV/OpenCV.mk
#mv libs/armeabi-v7a libs/armeabi-v7a-neon
#mv share/OpenCV/3rdparty/libs/armeabi-v7a share/OpenCV/3rdparty/libs/armeabi-v7a-neon
# armeabi-v7a build
cd "$PRG_DIR"
mkdir build
cd build
cmake -DANDROID_ABI="armeabi-v7a" -DBUILD_DOCS=OFF -DBUILD_TESTS=ON -DBUILD_EXAMPLES=OFF -DBUILD_ANDROID_EXAMPLES=ON -DCMAKE_TOOLCHAIN_FILE="$ANDROID_DIR/android.toolchain.cmake" -DCMAKE_INSTALL_PREFIX="$PRG_DIR/opencv" "$ANDROID_DIR/.." || exit 1
make -j8 install/strip || exit 1
cd "$PRG_DIR/opencv"
rm -rf doc include src .classpath .project AndroidManifest.xml default.properties project.properties share/OpenCV/haarcascades share/OpenCV/lbpcascades share/OpenCV/*.cmake share/OpenCV/OpenCV.mk
# armeabi build
cd "$PRG_DIR/build"
rm -rf CMakeCache.txt
cmake -DANDROID_ABI="armeabi" -DBUILD_DOCS=ON -DBUILD_TESTS=ON -DBUILD_EXAMPLES=OFF -DBUILD_ANDROID_EXAMPLES=ON -DINSTALL_ANDROID_EXAMPLES=ON -DCMAKE_TOOLCHAIN_FILE="$ANDROID_DIR/android.toolchain.cmake" -DCMAKE_INSTALL_PREFIX="$PRG_DIR/opencv" "$ANDROID_DIR/.." || exit 1
make -j8 install/strip docs || exit 1
find doc -name "*.pdf" -exec cp {} $PRG_DIR/opencv/doc \;
cd $PRG_DIR
rm -rf opencv/doc/CMakeLists.txt
cp "$ANDROID_DIR/README.android" opencv/
cp "$ANDROID_DIR/../README" opencv/
# get opencv version
CV_VERSION=`grep -o "[0-9]\+\.[0-9]\+\.[0-9]\+" opencv/share/OpenCV/OpenCVConfig-version.cmake`
OPENCV_NAME=OpenCV-$CV_VERSION
mv opencv $OPENCV_NAME
#samples
cp -r "$ANDROID_DIR/../samples/android" "$PRG_DIR/samples"
cd "$PRG_DIR/samples"
#enable for loops over items with spaces in their name
IFS="
"
for dir in `ls -1`
do
if [ -f "$dir/default.properties" ]
then
HAS_REFERENCE=`cat "$dir/project.properties" | grep -c android.library.reference.1`
if [ $HAS_REFERENCE = 1 ]
then
echo -n > "$dir/project.properties"
android update project --name "$dir" --target "android-8" --library "../../$OPENCV_NAME" --path "$dir"
#echo 'android update project --name "$dir" --target "android-8" --library "../opencv$CV_VERSION" --path "$dir"'
fi
else
if [ -f "$dir/default.properties" ]
then
HAS_REFERENCE=`cat "$dir/default.properties" | grep -c android.library.reference.1`
if [ $HAS_REFERENCE = 1 ]
then
echo -n > "$dir/default.properties"
android update project --name "$dir" --target "android-8" --library "../../$OPENCV_NAME" --path "$dir"
#echo 'android update project --name "$dir" --target "android-8" --library "../opencv$CV_VERSION" --path "$dir"'
fi
else
rm -rf "$dir"
fi
fi
done
echo "OPENCV_MK_PATH:=../../$OPENCV_NAME/share/OpenCV/OpenCV.mk" > includeOpenCV.mk
#clean samples
cd "$PRG_DIR/samples"
#remove ignored files/folders
svn status --no-ignore | grep ^I | cut -c9- | xargs -d \\n rm -rf
#remove unversioned files/folders
svn status | grep ^\? | cut -c9- | xargs -d \\n rm -rf
#generate "gen" folders to eliminate eclipse warnings
cd "$PRG_DIR/samples"
for dir in `ls -1`
do
if [ -d "$dir" ]
then
mkdir "$dir/gen"
fi
done
#generate folders "gen" and "res" for opencv (dummy eclipse stiff)
cd $PRG_DIR
mkdir "$OPENCV_NAME/gen"
mkdir "$OPENCV_NAME/res"
# pack all files
cd $PRG_DIR
PRG_NAME=OpenCV-$CV_VERSION-tp-android-bin.tar.bz2
tar cjpf $PRG_NAME --exclude-vcs $OPENCV_NAME samples || exit -1
echo
echo "Package $PRG_NAME is successfully created"

View File

@ -8,7 +8,7 @@ Loader Callback Interface
Interface for callback object in case of asynchronous initialization of OpenCV
void onManagerConnected()
------------------------
-------------------------
.. method:: void onManagerConnected(int status)

View File

@ -2,8 +2,19 @@ if(APPLE)
set(OPENCL_FOUND YES)
set(OPENCL_LIBRARIES "-framework OpenCL")
else()
find_package(OpenCL QUIET)
#find_package(OpenCL QUIET)
if(WITH_OPENCLAMDFFT)
find_path(CLAMDFFT_INCLUDE_DIR
NAMES clAmdFft.h)
find_library(CLAMDFFT_LIBRARIES
NAMES clAmdFft.Runtime)
endif()
if(WITH_OPENCLAMDBLAS)
find_path(CLAMDBLAS_INCLUDE_DIR
NAMES clAmdBlas.h)
find_library(CLAMDBLAS_LIBRARIES
NAMES clAmdBlas)
endif()
# Try AMD/ATI Stream SDK
if (NOT OPENCL_FOUND)
set(ENV_AMDSTREAMSDKROOT $ENV{AMDAPPSDKROOT})

View File

@ -10,7 +10,7 @@ ADD_CUSTOM_TARGET(uninstall "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/c
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(uninstall PROPERTIES FOLDER "CMakeTargets")
endif()
# ----------------------------------------------------------------------------
# Source package, for "make package_source"
@ -26,11 +26,11 @@ if(BUILD_PACKAGE)
set(TAR_TRANSFORM "\"s,^,${TARBALL_NAME}/,\"")
add_custom_target(package_source
#TODO: maybe we should not remove dll's
COMMAND ${TAR_CMD} --transform ${TAR_TRANSFORM} -cjpf ${CMAKE_CURRENT_BINARY_DIR}/${TARBALL_NAME}.tar.bz2 --exclude=".svn" --exclude="*.pyc" --exclude="*.vcproj" --exclude="*/lib/*" --exclude="*.dll" ./
COMMAND ${TAR_CMD} --transform ${TAR_TRANSFORM} -cjpf ${CMAKE_CURRENT_BINARY_DIR}/${TARBALL_NAME}.tar.bz2 --exclude=".svn" --exclude=".git" --exclude="*.pyc" --exclude="*.vcproj" --exclude="*/lib/*" --exclude="*.dll" ./
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
else()
add_custom_target(package_source
COMMAND zip -9 -r ${CMAKE_CURRENT_BINARY_DIR}/${TARBALL_NAME}.zip . -x '*/.svn/*' '*.vcproj' '*.pyc'
COMMAND zip -9 -r ${CMAKE_CURRENT_BINARY_DIR}/${TARBALL_NAME}.zip . -x '*/.svn/*' '*/.git/*' '*.vcproj' '*.pyc'
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endif()
if(ENABLE_SOLUTION_FOLDERS)

View File

@ -175,6 +175,12 @@
/* OpenCL Support */
#cmakedefine HAVE_OPENCL
/* AMD's OpenCL Fast Fourier Transform Library*/
#cmakedefine HAVE_CLAMDFFT
/* AMD's Basic Linear Algebra Subprograms Library*/
#cmakedefine HAVE_CLAMDBLAS
/* NVidia Cuda Fast Fourier Transform (FFT) API*/
#cmakedefine HAVE_CUFFT

View File

@ -1,4 +1,4 @@
IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,

View File

@ -7,7 +7,7 @@
%
% creating matrices
% from scratch
% from previously allocated data: plain arrays, vectors
% from previously allocated data: plain arrays, vectors
% converting to/from old-style structures
%
% element access, iteration through matrix elements
@ -30,7 +30,7 @@
% color space transformations
% histograms & back projections
% contours
%
%
% i/o:
% displaying images
% saving/loading to/from file (XML/YAML & image file formats)
@ -40,19 +40,19 @@
% findcontours, bounding box, convex hull, min area rect,
% transformations, to/from homogeneous coordinates
% matching point sets: homography, fundamental matrix, rigid transforms
%
%
% 3d:
% camera calibration, pose estimation.
% uncalibrated case
% stereo: rectification, running stereo correspondence, obtaining the depth.
%
%
% feature detection:
% features2d toolbox
%
%
% object detection:
% using a classifier running on a sliding window: cascadeclassifier + hog.
% using salient point features: features2d -> matching
%
%
% statistical data processing:
% clustering (k-means),
% classification + regression (SVM, boosting, k-nearest),
@ -148,22 +148,22 @@
%\texttt{\href{http://www.ros.org/wiki/Stack Manifest}{stack manifest}} & Description of a ROS stack.
%\end{tabular}
\emph{The OpenCV C++ reference manual is here: \url{http://opencv.itseez.com}. Use \textbf{Quick Search} to find descriptions of the particular functions and classes}
\emph{The OpenCV C++ reference manual is here: \url{http://docs.opencv.org}. Use \textbf{Quick Search} to find descriptions of the particular functions and classes}
\section{Key OpenCV Classes}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Point_}{Point\_}} & Template 2D point class \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Point3_}{Point3\_}} & Template 3D point class \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Size_}{Size\_}} & Template size (width, height) class \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Vec}{Vec}} & Template short vector class \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Matx}{Matx}} & Template small matrix class \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Scalar_}{Scalar}} & 4-element vector \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Rect_}{Rect}} & Rectangle \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Range}{Range}} & Integer value range \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Mat}{Mat}} & 2D or multi-dimensional dense array (can be used to store matrices, images, histograms, feature descriptors, voxel volumes etc.)\\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#sparsemat}{SparseMat}} & Multi-dimensional sparse array \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Ptr}{Ptr}} & Template smart pointer class
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Point_}{Point\_}} & Template 2D point class \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Point3_}{Point3\_}} & Template 3D point class \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Size_}{Size\_}} & Template size (width, height) class \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Vec}{Vec}} & Template short vector class \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Matx}{Matx}} & Template small matrix class \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Scalar_}{Scalar}} & 4-element vector \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Rect_}{Rect}} & Rectangle \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Range}{Range}} & Integer value range \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Mat}{Mat}} & 2D or multi-dimensional dense array (can be used to store matrices, images, histograms, feature descriptors, voxel volumes etc.)\\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#sparsemat}{SparseMat}} & Multi-dimensional sparse array \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Ptr}{Ptr}} & Template smart pointer class
\end{tabular}
\section{Matrix Basics}
@ -173,7 +173,7 @@
\> \texttt{Mat image(240, 320, CV\_8UC3);} \\
\textbf{[Re]allocate a pre-declared matrix}\\
\> \texttt{image.\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-create}{create}(480, 640, CV\_8UC3);}\\
\> \texttt{image.\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-create}{create}(480, 640, CV\_8UC3);}\\
\textbf{Create a matrix initialized with a constant}\\
\> \texttt{Mat A33(3, 3, CV\_32F, Scalar(5));} \\
@ -189,8 +189,8 @@
\> \texttt{Mat B22 = Mat(2, 2, CV\_32F, B22data).clone();}\\
\textbf{Initialize a random matrix}\\
\> \texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#randu}{randu}(image, Scalar(0), Scalar(256)); }\textit{// uniform dist}\\
\> \texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#randn}{randn}(image, Scalar(128), Scalar(10)); }\textit{// Gaussian dist}\\
\> \texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#randu}{randu}(image, Scalar(0), Scalar(256)); }\textit{// uniform dist}\\
\> \texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#randn}{randn}(image, Scalar(128), Scalar(10)); }\textit{// Gaussian dist}\\
\textbf{Convert matrix to/from other structures}\\
\>\textbf{(without copying the data)}\\
@ -230,32 +230,32 @@
\section{Matrix Manipulations: Copying, Shuffling, Part Access}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-copyto}{src.copyTo(dst)}} & Copy matrix to another one \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-convertto}{src.convertTo(dst,type,scale,shift)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Scale and convert to another datatype \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-clone}{m.clone()}} & Make deep copy of a matrix \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-reshape}{m.reshape(nch,nrows)}} & Change matrix dimensions and/or number of channels without copying data \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-copyto}{src.copyTo(dst)}} & Copy matrix to another one \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-convertto}{src.convertTo(dst,type,scale,shift)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Scale and convert to another datatype \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-clone}{m.clone()}} & Make deep copy of a matrix \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-reshape}{m.reshape(nch,nrows)}} & Change matrix dimensions and/or number of channels without copying data \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-row}{m.row(i)}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-col}{m.col(i)}} & Take a matrix row/column \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-row}{m.row(i)}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-col}{m.col(i)}} & Take a matrix row/column \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-rowrange}{m.rowRange(Range(i1,i2))}}
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-colrange}{m.colRange(Range(j1,j2))}} & \ \ \ \ \ \ \ Take a matrix row/column span \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-rowrange}{m.rowRange(Range(i1,i2))}}
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-colrange}{m.colRange(Range(j1,j2))}} & \ \ \ \ \ \ \ Take a matrix row/column span \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#mat-diag}{m.diag(i)}} & Take a matrix diagonal \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#mat-diag}{m.diag(i)}} & Take a matrix diagonal \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#Mat}{m(Range(i1,i2),Range(j1,j2)), m(roi)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ Take a submatrix \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#Mat}{m(Range(i1,i2),Range(j1,j2)), m(roi)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ Take a submatrix \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#repeat}{m.repeat(ny,nx)}} & Make a bigger matrix from a smaller one \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#repeat}{m.repeat(ny,nx)}} & Make a bigger matrix from a smaller one \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#flip}{flip(src,dst,dir)}} & Reverse the order of matrix rows and/or columns \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#flip}{flip(src,dst,dir)}} & Reverse the order of matrix rows and/or columns \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#split}{split(...)}} & Split multi-channel matrix into separate channels \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#split}{split(...)}} & Split multi-channel matrix into separate channels \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#merge}{merge(...)}} & Make a multi-channel matrix out of the separate channels \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#merge}{merge(...)}} & Make a multi-channel matrix out of the separate channels \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#mixchannels}{mixChannels(...)}} & Generalized form of split() and merge() \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#mixchannels}{mixChannels(...)}} & Generalized form of split() and merge() \\
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#randshuffle}{randShuffle(...)}} & Randomly shuffle matrix elements \\
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#randshuffle}{randShuffle(...)}} & Randomly shuffle matrix elements \\
\end{tabular}
@ -278,17 +278,17 @@ other matrix operations, such as
\begin{itemize}
\item
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#add}{add()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#subtract}{subtract()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#multiply}{multiply()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#divide}{divide()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#absdiff}{absdiff()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#bitwise-and}{bitwise\_and()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#bitwise-or}{bitwise\_or()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#bitwise-xor}{bitwise\_xor()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#max}{max()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#min}{min()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#compare}{compare()}}
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#add}{add()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#subtract}{subtract()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#multiply}{multiply()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#divide}{divide()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#absdiff}{absdiff()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#bitwise-and}{bitwise\_and()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#bitwise-or}{bitwise\_or()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#bitwise-xor}{bitwise\_xor()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#max}{max()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#min}{min()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#compare}{compare()}}
-- correspondingly, addition, subtraction, element-wise multiplication ... comparison of two matrices or a matrix and a scalar.
@ -314,49 +314,49 @@ Exa\=mple. \href{http://en.wikipedia.org/wiki/Alpha_compositing}{Alpha compositi
\item
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#sum}{sum()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#mean}{mean()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#meanstddev}{meanStdDev()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#norm}{norm()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#countnonzero}{countNonZero()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#minmaxloc}{minMaxLoc()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#sum}{sum()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#mean}{mean()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#meanstddev}{meanStdDev()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#norm}{norm()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#countnonzero}{countNonZero()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#minmaxloc}{minMaxLoc()}},
-- various statistics of matrix elements.
\item
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#exp}{exp()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#log}{log()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#pow}{pow()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#sqrt}{sqrt()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#carttopolar}{cartToPolar()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#polartocart}{polarToCart()}}
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#exp}{exp()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#log}{log()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#pow}{pow()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#sqrt}{sqrt()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#carttopolar}{cartToPolar()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#polartocart}{polarToCart()}}
-- the classical math functions.
\item
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#scaleadd}{scaleAdd()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#transpose}{transpose()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#gemm}{gemm()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#invert}{invert()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#solve}{solve()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#determinant}{determinant()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#trace}{trace()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#eigen}{eigen()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#SVD}{SVD}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#scaleadd}{scaleAdd()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#transpose}{transpose()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#gemm}{gemm()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#invert}{invert()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#solve}{solve()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#determinant}{determinant()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#trace}{trace()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#eigen}{eigen()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#SVD}{SVD}},
-- the algebraic functions + SVD class.
\item
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#dft}{dft()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#idft}{idft()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#dct}{dct()}},
\texttt{\href{http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html\#idct}{idct()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#dft}{dft()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#idft}{idft()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#dct}{dct()}},
\texttt{\href{http://docs.opencv.org/modules/core/doc/operations_on_arrays.html\#idct}{idct()}},
-- discrete Fourier and cosine transformations
-- discrete Fourier and cosine transformations
\end{itemize}
For some operations a more convenient \href{http://opencv.itseez.com/modules/core/doc/basic_structures.html\#matrix-expressions}{algebraic notation} can be used, for example:
For some operations a more convenient \href{http://docs.opencv.org/modules/core/doc/basic_structures.html\#matrix-expressions}{algebraic notation} can be used, for example:
\begin{tabbing}
\texttt{Mat}\={} \texttt{delta = (J.t()*J + lambda*}\\
\>\texttt{Mat::eye(J.cols, J.cols, J.type()))}\\
@ -370,20 +370,20 @@ implements the core of Levenberg-Marquardt optimization algorithm.
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#filter2d}{filter2D()}} & Non-separable linear filter \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#filter2d}{filter2D()}} & Non-separable linear filter \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#sepfilter2d}{sepFilter2D()}} & Separable linear filter \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#sepfilter2d}{sepFilter2D()}} & Separable linear filter \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#blur}{boxFilter()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#gaussianblur}{GaussianBlur()}},
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#medianblur}{medianBlur()}},
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#bilateralfilter}{bilateralFilter()}}
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#blur}{boxFilter()}}, \texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#gaussianblur}{GaussianBlur()}},
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#medianblur}{medianBlur()}},
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#bilateralfilter}{bilateralFilter()}}
& Smooth the image with one of the linear or non-linear filters \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#sobel}{Sobel()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#scharr}{Scharr()}}
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#sobel}{Sobel()}}, \texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#scharr}{Scharr()}}
& Compute the spatial image derivatives \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#laplacian}{Laplacian()}} & compute Laplacian: $\Delta I = \frac{\partial ^ 2 I}{\partial x^2} + \frac{\partial ^ 2 I}{\partial y^2}$ \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#laplacian}{Laplacian()}} & compute Laplacian: $\Delta I = \frac{\partial ^ 2 I}{\partial x^2} + \frac{\partial ^ 2 I}{\partial y^2}$ \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#erode}{erode()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/filtering.html\#dilate}{dilate()}} & Morphological operations \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#erode}{erode()}}, \texttt{\href{http://docs.opencv.org/modules/imgproc/doc/filtering.html\#dilate}{dilate()}} & Morphological operations \\
\end{tabular}
@ -398,17 +398,17 @@ Exa\=mple. Filter image in-place with a 3x3 high-pass kernel\\
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#resize}{resize()}} & Resize image \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html\#resize}{resize()}} & Resize image \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#getrectsubpix}{getRectSubPix()}} & Extract an image patch \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html\#getrectsubpix}{getRectSubPix()}} & Extract an image patch \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#warpaffine}{warpAffine()}} & Warp image affinely\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html\#warpaffine}{warpAffine()}} & Warp image affinely\\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#warpperspective}{warpPerspective()}} & Warp image perspectively\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html\#warpperspective}{warpPerspective()}} & Warp image perspectively\\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#remap}{remap()}} & Generic image warping\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html\#remap}{remap()}} & Generic image warping\\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#convertmaps}{convertMaps()}} & Optimize maps for a faster remap() execution\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html\#convertmaps}{convertMaps()}} & Optimize maps for a faster remap() execution\\
\end{tabular}
@ -422,21 +422,21 @@ Example. Decimate image by factor of $\sqrt{2}$:\\
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#cvtcolor}{cvtColor()}} & Convert image from one color space to another \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#cvtcolor}{cvtColor()}} & Convert image from one color space to another \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#threshold}{threshold()}}, \texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#adaptivethreshold}{adaptivethreshold()}} & Convert grayscale image to binary image using a fixed or a variable threshold \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#threshold}{threshold()}}, \texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#adaptivethreshold}{adaptivethreshold()}} & Convert grayscale image to binary image using a fixed or a variable threshold \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#floodfill}{floodFill()}} & Find a connected component using region growing algorithm\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#floodfill}{floodFill()}} & Find a connected component using region growing algorithm\\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#integral}{integral()}} & Compute integral image \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#integral}{integral()}} & Compute integral image \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#distancetransform}{distanceTransform()}}
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#distancetransform}{distanceTransform()}}
& build distance map or discrete Voronoi diagram for a binary image. \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#watershed}{watershed()}},
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html\#grabcut}{grabCut()}}
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#watershed}{watershed()}},
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#grabcut}{grabCut()}}
& marker-based image segmentation algorithms.
See the samples \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/watershed.cpp}{watershed.cpp}} and \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/grabcut.cpp}{grabcut.cpp}}.
See the samples \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/watershed.cpp}{watershed.cpp}} and \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/grabcut.cpp}{grabcut.cpp}}.
\end{tabular}
@ -445,13 +445,13 @@ Example. Decimate image by factor of $\sqrt{2}$:\\
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#calchist}{calcHist()}} & Compute image(s) histogram \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/histograms.html\#calchist}{calcHist()}} & Compute image(s) histogram \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#calcbackproject}{calcBackProject()}} & Back-project the histogram \\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/histograms.html\#calcbackproject}{calcBackProject()}} & Back-project the histogram \\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#equalizehist}{equalizeHist()}} & Normalize image brightness and contrast\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/histograms.html\#equalizehist}{equalizeHist()}} & Normalize image brightness and contrast\\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/histograms.html\#comparehist}{compareHist()}} & Compare two histograms\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/histograms.html\#comparehist}{compareHist()}} & Compare two histograms\\
\end{tabular}
@ -464,12 +464,12 @@ Example. Compute Hue-Saturation histogram of an image:\\
\end{tabbing}
\subsection{Contours}
See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/contours2.cpp}{contours2.cpp}} and \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/squares.cpp}{squares.cpp}}
See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/contours2.cpp}{contours2.cpp}} and \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/squares.cpp}{squares.cpp}}
samples on what are the contours and how to use them.
\section{Data I/O}
\href{http://opencv.itseez.com/modules/core/doc/xml_yaml_persistence.html\#xml-yaml-file-storages-writing-to-a-file-storage}{XML/YAML storages} are collections (possibly nested) of scalar values, structures and heterogeneous lists.
\href{http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html\#xml-yaml-file-storages-writing-to-a-file-storage}{XML/YAML storages} are collections (possibly nested) of scalar values, structures and heterogeneous lists.
\begin{tabbing}
\textbf{Wr}\=\textbf{iting data to YAML (or XML)}\\
@ -509,7 +509,7 @@ samples on what are the contours and how to use them.
\texttt{Rect r; r.x = (int)tm["x"], r.y = (int)tm["y"];}\\
\texttt{r.width = (int)tm["width"], r.height = (int)tm["height"];}\\
\texttt{int lbp\_val = 0;}\\
\texttt{FileNodeIterator it = tm["lbp"].begin();}\\
@ -521,9 +521,9 @@ samples on what are the contours and how to use them.
\begin{tabbing}
\textbf{Wr}\=\textbf{iting and reading raster images}\\
\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html\#imwrite}{imwrite}("myimage.jpg", image);}\\
\texttt{Mat image\_color\_copy = \href{http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 1);}\\
\texttt{Mat image\_grayscale\_copy = \href{http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 0);}\\
\texttt{\href{http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html\#imwrite}{imwrite}("myimage.jpg", image);}\\
\texttt{Mat image\_color\_copy = \href{http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 1);}\\
\texttt{Mat image\_grayscale\_copy = \href{http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 0);}\\
\end{tabbing}
\emph{The functions can read/write images in the following formats: \textbf{BMP (.bmp), JPEG (.jpg, .jpeg), TIFF (.tif, .tiff), PNG (.png), PBM/PGM/PPM (.p?m), Sun Raster (.sr), JPEG 2000 (.jp2)}. Every format supports 8-bit, 1- or 3-channel images. Some formats (PNG, JPEG 2000) support 16 bits per channel.}
@ -544,72 +544,72 @@ samples on what are the contours and how to use them.
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#namedwindow}{namedWindow(winname,flags)}} & \ \ \ \ \ \ \ \ \ \ Create named highgui window \\
\texttt{\href{http://docs.opencv.org/modules/highgui/doc/user_interface.html\#namedwindow}{namedWindow(winname,flags)}} & \ \ \ \ \ \ \ \ \ \ Create named highgui window \\
\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#destroywindow}{destroyWindow(winname)}} & \ \ \ Destroy the specified window \\
\texttt{\href{http://docs.opencv.org/modules/highgui/doc/user_interface.html\#destroywindow}{destroyWindow(winname)}} & \ \ \ Destroy the specified window \\
\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#imshow}{imshow(winname, mtx)}} & Show image in the window \\
\texttt{\href{http://docs.opencv.org/modules/highgui/doc/user_interface.html\#imshow}{imshow(winname, mtx)}} & Show image in the window \\
\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#waitkey}{waitKey(delay)}} & Wait for a key press during the specified time interval (or forever). Process events while waiting. \emph{Do not forget to call this function several times a second in your code.} \\
\texttt{\href{http://docs.opencv.org/modules/highgui/doc/user_interface.html\#waitkey}{waitKey(delay)}} & Wait for a key press during the specified time interval (or forever). Process events while waiting. \emph{Do not forget to call this function several times a second in your code.} \\
\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#createtrackbar}{createTrackbar(...)}} & Add trackbar (slider) to the specified window \\
\texttt{\href{http://docs.opencv.org/modules/highgui/doc/user_interface.html\#createtrackbar}{createTrackbar(...)}} & Add trackbar (slider) to the specified window \\
\texttt{\href{http://opencv.itseez.com/modules/highgui/doc/user_interface.html\#setmousecallback}{setMouseCallback(...)}} & \ \ Set the callback on mouse clicks and movements in the specified window \\
\texttt{\href{http://docs.opencv.org/modules/highgui/doc/user_interface.html\#setmousecallback}{setMouseCallback(...)}} & \ \ Set the callback on mouse clicks and movements in the specified window \\
\end{tabular}
See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/camshiftdemo.cpp}{camshiftdemo.cpp}} and other \href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/}{OpenCV samples} on how to use the GUI functions.
See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/camshiftdemo.cpp}{camshiftdemo.cpp}} and other \href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/}{OpenCV samples} on how to use the GUI functions.
\section{Camera Calibration, Pose Estimation and Depth Estimation}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#calibratecamera}{calibrateCamera()}} & Calibrate camera from several views of a calibration pattern. \\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#calibratecamera}{calibrateCamera()}} & Calibrate camera from several views of a calibration pattern. \\
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#findchessboardcorners}{findChessboardCorners()}} & \ \ \ \ \ \ Find feature points on the checkerboard calibration pattern. \\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#findchessboardcorners}{findChessboardCorners()}} & \ \ \ \ \ \ Find feature points on the checkerboard calibration pattern. \\
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#solvepnp}{solvePnP()}} & Find the object pose from the known projections of its feature points. \\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#solvepnp}{solvePnP()}} & Find the object pose from the known projections of its feature points. \\
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#stereocalibrate}{stereoCalibrate()}} & Calibrate stereo camera. \\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#stereocalibrate}{stereoCalibrate()}} & Calibrate stereo camera. \\
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#stereorectify}{stereoRectify()}} & Compute the rectification transforms for a calibrated stereo camera.\\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#stereorectify}{stereoRectify()}} & Compute the rectification transforms for a calibrated stereo camera.\\
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html\#initundistortrectifymap}{initUndistortRectifyMap()}} & \ \ \ \ \ \ Compute rectification map (for \texttt{remap()}) for each stereo camera head.\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html\#initundistortrectifymap}{initUndistortRectifyMap()}} & \ \ \ \ \ \ Compute rectification map (for \texttt{remap()}) for each stereo camera head.\\
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#StereoBM}{StereoBM}}, \texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#StereoSGBM}{StereoSGBM}} & The stereo correspondence engines to be run on rectified stereo pairs.\\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#StereoBM}{StereoBM}}, \texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#StereoSGBM}{StereoSGBM}} & The stereo correspondence engines to be run on rectified stereo pairs.\\
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#reprojectimageto3d}{reprojectImageTo3D()}} & Convert disparity map to 3D point cloud.\\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#reprojectimageto3d}{reprojectImageTo3D()}} & Convert disparity map to 3D point cloud.\\
\texttt{\href{http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#findhomography}{findHomography()}} & Find best-fit perspective transformation between two 2D point sets. \\
\texttt{\href{http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\#findhomography}{findHomography()}} & Find best-fit perspective transformation between two 2D point sets. \\
\end{tabular}
To calibrate a camera, you can use \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/calibration.cpp}{calibration.cpp}} or
\texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_calib.cpp}{stereo\_calib.cpp}} samples.
To calibrate a camera, you can use \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/calibration.cpp}{calibration.cpp}} or
\texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/stereo\_calib.cpp}{stereo\_calib.cpp}} samples.
To get the disparity maps and the point clouds, use
\texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_match.cpp}{stereo\_match.cpp}} sample.
\texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/stereo\_match.cpp}{stereo\_match.cpp}} sample.
\section{Object Detection}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.itseez.com/modules/imgproc/doc/object_detection.html\#matchtemplate}{matchTemplate}} & Compute proximity map for given template.\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/object_detection.html\#matchtemplate}{matchTemplate}} & Compute proximity map for given template.\\
\texttt{\href{http://opencv.itseez.com/modules/objdetect/doc/cascade_classification.html\#cascadeclassifier}{CascadeClassifier}} & Viola's Cascade of Boosted classifiers using Haar or LBP features. Suits for detecting faces, facial features and some other objects without diverse textures. See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/facedetect.cpp}{facedetect.cpp}}\\
\texttt{\href{http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html\#cascadeclassifier}{CascadeClassifier}} & Viola's Cascade of Boosted classifiers using Haar or LBP features. Suits for detecting faces, facial features and some other objects without diverse textures. See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/c/facedetect.cpp}{facedetect.cpp}}\\
\texttt{{HOGDescriptor}} & N. Dalal's object detector using Histogram-of-Oriented-Gradients (HOG) features. Suits for detecting people, cars and other objects with well-defined silhouettes. See \texttt{\href{http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/peopledetect.cpp}{peopledetect.cpp}}\\
\texttt{{HOGDescriptor}} & N. Dalal's object detector using Histogram-of-Oriented-Gradients (HOG) features. Suits for detecting people, cars and other objects with well-defined silhouettes. See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/peopledetect.cpp}{peopledetect.cpp}}\\
\end{tabular}
%
%
% feature detection:
% features2d toolbox
%
%
% object detection:
% using a classifier running on a sliding window: cascadeclassifier + hog.
% using salient point features: features2d -> matching
%
%
% statistical data processing:
% clustering (k-means),
% classification + regression (SVM, boosting, k-nearest),

View File

@ -9,7 +9,7 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use :point:`Point <>` to define 2D points in an image.
* Use :point:`Point <>` to define 2D points in an image.
* Use :scalar:`Scalar <>` and why it is useful
* Draw a **line** by using the OpenCV function :line:`line <>`
* Draw an **ellipse** by using the OpenCV function :ellipse:`ellipse <>`
@ -30,15 +30,15 @@ Point
It represents a 2D point, specified by its image coordinates :math:`x` and :math:`y`. We can define it as:
.. code-block:: cpp
Point pt;
pt.x = 10;
pt.x = 10;
pt.y = 8;
or
.. code-block:: cpp
Point pt = Point(10, 8);
Scalar
@ -48,7 +48,7 @@ Scalar
* Let's see an example, if we are asked for a color argument and we give:
.. code-block:: cpp
Scalar( a, b, c )
We would be defining a RGB color such as: *Red = c*, *Green = b* and *Blue = a*
@ -56,12 +56,12 @@ Scalar
Code
=====
* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp>`_
* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp>`_
Explanation
=============
#. Since we plan to draw two examples (an atom and a rook), we have to create 02 images and two windows to display them.
#. Since we plan to draw two examples (an atom and a rook), we have to create 02 images and two windows to display them.
.. code-block:: cpp
@ -69,7 +69,7 @@ Explanation
char atom_window[] = "Drawing 1: Atom";
char rook_window[] = "Drawing 2: Rook";
/// Create black empty images
/// Create black empty images
Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
@ -79,7 +79,7 @@ Explanation
/// 1. Draw a simple atom:
/// 1.a. Creating ellipses
/// 1.a. Creating ellipses
MyEllipse( atom_image, 90 );
MyEllipse( atom_image, 0 );
MyEllipse( atom_image, 45 );
@ -105,7 +105,7 @@ Explanation
-1,
8 );
/// 2.c. Create a few lines
/// 2.c. Create a few lines
MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
MyLine( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
MyLine( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
@ -113,15 +113,15 @@ Explanation
#. Let's check what is inside each of these functions:
* *MyLine*
.. code-block:: cpp
* *MyLine*
.. code-block:: cpp
void MyLine( Mat img, Point start, Point end )
{
int thickness = 2;
int lineType = 8;
line( img,
line( img,
start,
end,
Scalar( 0, 0, 0 ),
@ -136,12 +136,12 @@ Explanation
* Draw a line from Point **start** to Point **end**
* The line is displayed in the image **img**
* The line color is defined by **Scalar( 0, 0, 0)** which is the RGB value correspondent to **Black**
* The line thickness is set to **thickness** (in this case 2)
* The line thickness is set to **thickness** (in this case 2)
* The line is a 8-connected one (**lineType** = 8)
* *MyEllipse*
.. code-block:: cpp
.. code-block:: cpp
void MyEllipse( Mat img, double angle )
{
@ -152,15 +152,15 @@ Explanation
Point( w/2.0, w/2.0 ),
Size( w/4.0, w/16.0 ),
angle,
0,
0,
360,
Scalar( 255, 0, 0 ),
thickness,
lineType );
lineType );
}
From the code above, we can observe that the function :ellipse:`ellipse <>` draws an ellipse such that:
.. container:: enumeratevisibleitemswithsquare
* The ellipse is displayed in the image **img**
@ -169,7 +169,7 @@ Explanation
* The ellipse extends an arc between **0** and **360** degrees
* The color of the figure will be **Scalar( 255, 255, 0)** which means blue in RGB value.
* The ellipse's **thickness** is 2.
* *MyFilledCircle*
@ -180,11 +180,11 @@ Explanation
int thickness = -1;
int lineType = 8;
circle( img,
circle( img,
center,
w/32.0,
Scalar( 0, 0, 255 ),
thickness,
thickness,
lineType );
}
@ -193,9 +193,9 @@ Explanation
.. container:: enumeratevisibleitemswithsquare
* The image where the circle will be displayed (**img**)
* The center of the circle denoted as the Point **center**
* The center of the circle denoted as the Point **center**
* The radius of the circle: **w/32.0**
* The color of the circle: **Scalar(0, 0, 255)** which means *Red* in BGR
* The color of the circle: **Scalar(0, 0, 255)** which means *Red* in BGR
* Since **thickness** = -1, the circle will be drawn filled.
* *MyPolygon*
@ -237,18 +237,18 @@ Explanation
npt,
1,
Scalar( 255, 255, 255 ),
lineType );
lineType );
}
To draw a filled polygon we use the function :fill_poly:`fillPoly <>`. We note that:
.. container:: enumeratevisibleitemswithsquare
* The polygon will be drawn on **img**
* The vertices of the polygon are the set of points in **ppt**
* The total number of vertices to be drawn are **npt**
* The number of polygons to be drawn is only **1**
* The color of the polygon is defined by **Scalar( 255, 255, 255)**, which is the BGR value for *white*
* The color of the polygon is defined by **Scalar( 255, 255, 255)**, which is the BGR value for *white*
* *rectangle*
@ -277,4 +277,4 @@ Compiling and running your program should give you a result like this:
.. image:: images/Drawing_1_Tutorial_Result_0.png
:alt: Drawing Tutorial 1 - Final Result
:align: center
:align: center

View File

@ -19,10 +19,10 @@ Code
.. container:: enumeratevisibleitemswithsquare
* In the previous tutorial (:ref:`Drawing_1`) we drew diverse geometric figures, giving as input parameters such as coordinates (in the form of :point:`Points <>`), color, thickness, etc. You might have noticed that we gave specific values for these arguments.
* In this tutorial, we intend to use *random* values for the drawing parameters. Also, we intend to populate our image with a big number of geometric figures. Since we will be initializing them in a random fashion, this process will be automatic and made by using *loops* .
* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp>`_ .
* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp>`_ .
Explanation
============
@ -43,7 +43,7 @@ Explanation
Mat image = Mat::zeros( window_height, window_width, CV_8UC3 );
/// Show it in a window during DELAY ms
imshow( window_name, image );
imshow( window_name, image );
#. Then we proceed to draw crazy stuff. After taking a look at the code, you can see that it is mainly divided in 8 sections, defined as functions:
@ -110,22 +110,22 @@ Explanation
* The *for* loop will repeat **NUMBER** times. Since the function :line:`line <>` is inside this loop, that means that **NUMBER** lines will be generated.
* The line extremes are given by *pt1* and *pt2*. For *pt1* we can see that:
.. code-block:: cpp
pt1.x = rng.uniform( x_1, x_2 );
pt1.x = rng.uniform( x_1, x_2 );
pt1.y = rng.uniform( y_1, y_2 );
* We know that **rng** is a *Random number generator* object. In the code above we are calling **rng.uniform(a,b)**. This generates a radombly uniformed distribution between the values **a** and **b** (inclusive in **a**, exclusive in **b**).
* We know that **rng** is a *Random number generator* object. In the code above we are calling **rng.uniform(a,b)**. This generates a radombly uniformed distribution between the values **a** and **b** (inclusive in **a**, exclusive in **b**).
* From the explanation above, we deduce that the extremes *pt1* and *pt2* will be random values, so the lines positions will be quite impredictable, giving a nice visual effect (check out the Result section below).
* As another observation, we notice that in the :line:`line <>` arguments, for the *color* input we enter:
.. code-block:: cpp
randomColor(rng)
randomColor(rng)
Let's check the function implementation:
.. code-block:: cpp
@ -138,7 +138,7 @@ Explanation
As we can see, the return value is an *Scalar* with 3 randomly initialized values, which are used as the *R*, *G* and *B* parameters for the line color. Hence, the color of the lines will be random too!
#. The explanation above applies for the other functions generating circles, ellipses, polygones, etc. The parameters such as *center* and *vertices* are also generated randomly.
#. The explanation above applies for the other functions generating circles, ellipses, polygones, etc. The parameters such as *center* and *vertices* are also generated randomly.
#. Before finishing, we also should take a look at the functions *Display_Random_Text* and *Displaying_Big_End*, since they both have a few interesting features:
@ -158,7 +158,7 @@ Explanation
putText( image, "Testing text rendering", org, rng.uniform(0,8),
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@ -172,7 +172,7 @@ Explanation
.. code-block:: cpp
putText( image, "Testing text rendering", org, rng.uniform(0,8),
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
So, what does the function :put_text:`putText <>` do? In our example:
@ -197,7 +197,7 @@ Explanation
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
int lineType = 8;
Mat image2;
for( int i = 0; i < 255; i += 2 )
@ -205,7 +205,7 @@ Explanation
image2 = image - Scalar::all(i);
putText( image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType );
imshow( window_name, image2 );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@ -222,8 +222,8 @@ Explanation
So, **image2** is the substraction of **image** and **Scalar::all(i)**. In fact, what happens here is that every pixel of **image2** will be the result of substracting every pixel of **image** minus the value of **i** (remember that for each pixel we are considering three values such as R, G and B, so each of them will be affected)
Also remember that the substraction operation *always* performs internally a **saturate** operation, which means that the result obtained will always be inside the allowed range (no negative and between 0 and 255 for our example).
Also remember that the substraction operation *always* performs internally a **saturate** operation, which means that the result obtained will always be inside the allowed range (no negative and between 0 and 255 for our example).
Result
========
@ -234,7 +234,7 @@ As you just saw in the Code section, the program will sequentially execute diver
.. image:: images/Drawing_2_Tutorial_Result_0.jpg
:alt: Drawing Tutorial 2 - Final Result 0
:align: center
:align: center
#. Then, a new set of figures, these time *rectangles* will follow.
@ -242,13 +242,13 @@ As you just saw in the Code section, the program will sequentially execute diver
.. image:: images/Drawing_2_Tutorial_Result_2.jpg
:alt: Drawing Tutorial 2 - Final Result 2
:align: center
:align: center
#. Now, *polylines* with 03 segments will appear on screen, again in random configurations.
.. image:: images/Drawing_2_Tutorial_Result_3.jpg
:alt: Drawing Tutorial 2 - Final Result 3
:align: center
:align: center
#. Filled polygons (in this example triangles) will follow.
@ -256,7 +256,7 @@ As you just saw in the Code section, the program will sequentially execute diver
.. image:: images/Drawing_2_Tutorial_Result_5.jpg
:alt: Drawing Tutorial 2 - Final Result 5
:align: center
:align: center
#. Near the end, the text *"Testing Text Rendering"* will appear in a variety of fonts, sizes, colors and positions.
@ -264,4 +264,4 @@ As you just saw in the Code section, the program will sequentially execute diver
.. image:: images/Drawing_2_Tutorial_Result_7.jpg
:alt: Drawing Tutorial 2 - Final Result 7
:align: center
:align: center

View File

@ -2,6 +2,7 @@
.. |Author_BernatG| unicode:: Bern U+00E1 t U+0020 G U+00E1 bor
.. |Author_AndreyK| unicode:: Andrey U+0020 Kamaev
.. |Author_LeonidBLB| unicode:: Leonid U+0020 Beynenson
.. |Author_VsevolodG| unicode:: Vsevolod U+0020 Glumov
.. |Author_VictorE| unicode:: Victor U+0020 Eruhimov
.. |Author_ArtemM| unicode:: Artem U+0020 Myagkov
.. |Author_FernandoI| unicode:: Fernando U+0020 Iglesias U+0020 Garc U+00ED a

View File

@ -15,7 +15,7 @@ In this tutorial you will learn how to:
* Use :surf_descriptor_extractor:`SurfDescriptorExtractor<>` and its function :descriptor_extractor:`compute<>` to perform the required calculations.
* Use a :brute_force_matcher:`BruteForceMatcher<>` to match the features vector
* Use the function :draw_matches:`drawMatches<>` to draw the detected matches.
Theory
======
@ -23,9 +23,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include <stdio.h>
#include <iostream>
@ -45,7 +45,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
@ -74,7 +74,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );
@ -93,9 +93,9 @@ Explanation
Result
======
#. Here is the result after applying the BruteForce matcher between the two original images:
.. image:: images/Feature_Description_BruteForce_Result.jpg
:align: center
:height: 200pt

View File

@ -14,7 +14,7 @@ In this tutorial you will learn how to:
* Use the :surf_feature_detector:`SurfFeatureDetector<>` and its function :feature_detector_detect:`detect<>` to perform the detection process
* Use the function :draw_keypoints:`drawKeypoints<>` to draw the detected keypoints
Theory
======
@ -22,14 +22,14 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
@ -44,7 +44,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
@ -61,8 +61,8 @@ This tutorial code's is shown lines below. You can also download it from `here <
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//-- Show detected (drawn) keypoints
imshow("Keypoints 1", img_keypoints_1 );
@ -82,9 +82,9 @@ Explanation
Result
======
#. Here is the result of the feature detection applied to the first image:
.. image:: images/Feature_Detection_Result_a.jpg
:align: center
:height: 125pt
@ -92,6 +92,6 @@ Result
#. And here is the result for the second image:
.. image:: images/Feature_Detection_Result_b.jpg
:align: center
:height: 200pt
:align: center
:height: 200pt

View File

@ -19,9 +19,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include <stdio.h>
#include <iostream>
@ -41,7 +41,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
@ -79,7 +79,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
@ -87,13 +87,13 @@ This tutorial code's is shown lines below. You can also download it from `here <
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance < 2*min_dist )
{ good_matches.push_back( matches[i]); }
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
@ -115,9 +115,9 @@ Explanation
Result
======
#. Here is the result of the feature detection applied to the first image:
.. image:: images/Featur_FlannMatcher_Result.jpg
:align: center
:height: 250pt

View File

@ -12,7 +12,7 @@ In this tutorial you will learn how to:
* Use the function :find_homography:`findHomography<>` to find the transform between matched keypoints.
* Use the function :perspective_transform:`perspectiveTransform<>` to map the points.
Theory
======
@ -20,9 +20,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include <stdio.h>
#include <iostream>
@ -43,7 +43,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
@ -81,21 +81,21 @@ This tutorial code's is shown lines below. You can also download it from `here <
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
@ -103,7 +103,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
@ -143,6 +143,6 @@ Result
#. And here is the result for the detected object (highlighted in green)
.. image:: images/Feature_Homography_Result.jpg
:align: center
:height: 200pt
:align: center
:height: 200pt

View File

@ -19,9 +19,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -55,7 +55,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow( source_window, src );
@ -72,7 +72,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 ) { maxCorners = 1; }
/// Parameters for Shi-Tomasi algorithm
vector<Point2f> corners;
double qualityLevel = 0.01;
@ -86,7 +86,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack( src_gray,
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
@ -95,18 +95,18 @@ This tutorial code's is shown lines below. You can also download it from `here <
blockSize,
useHarrisDetector,
k );
/// Draw corners detected
cout<<"** Number of corners detected: "<<corners.size()<<endl;
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),
rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
imshow( source_window, copy );
/// Set the neeed parameters to find the refined corners
Size winSize = Size( 5, 5 );
@ -118,7 +118,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Write them down
for( int i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}
@ -129,10 +129,10 @@ Result
======
.. image:: images/Corner_Subpixeles_Original_Image.jpg
:align: center
:align: center
Here is the result:
.. image:: images/Corner_Subpixeles_Result.jpg
:align: center
:align: center

View File

@ -11,7 +11,7 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :corner_eigenvals_and_vecs:`cornerEigenValsAndVecs <>` to find the eigenvalues and eigenvectors to determine if a pixel is a corner.
* Use the OpenCV function :corner_min_eigenval:`cornerMinEigenVal <>` to find the minimum eigenvalues for corner detection.
* Use the OpenCV function :corner_min_eigenval:`cornerMinEigenVal <>` to find the minimum eigenvalues for corner detection.
* To implement our own version of the Harris detector as well as the Shi-Tomasi detector, by using the two functions above.
Theory
@ -20,9 +20,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -34,9 +34,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
using namespace std;
/// Global variables
Mat src, src_gray;
Mat src, src_gray;
Mat myHarris_dst; Mat myHarris_copy; Mat Mc;
Mat myShiTomasi_dst; Mat myShiTomasi_copy;
Mat myShiTomasi_dst; Mat myShiTomasi_copy;
int myShiTomasi_qualityLevel = 50;
int myHarris_qualityLevel = 50;
@ -70,7 +70,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize, BORDER_DEFAULT );
/* calculate Mc */
/* calculate Mc */
for( int j = 0; j < src_gray.rows; j++ )
{ for( int i = 0; i < src_gray.cols; i++ )
{
@ -81,25 +81,25 @@ This tutorial code's is shown lines below. You can also download it from `here <
}
minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
namedWindow( myHarris_window, CV_WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel,
myHarris_function );
createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel,
myHarris_function );
myHarris_function( 0, 0 );
/// My Shi-Tomasi -- Using cornerMinEigenVal
myShiTomasi_dst = Mat::zeros( src_gray.size(), CV_32FC1 );
myShiTomasi_dst = Mat::zeros( src_gray.size(), CV_32FC1 );
cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize, BORDER_DEFAULT );
minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel,
myShiTomasi_function );
namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel,
myShiTomasi_function );
myShiTomasi_function( 0, 0 );
waitKey(0);
return(0);
}
@ -114,9 +114,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
for( int j = 0; j < src_gray.rows; j++ )
{ for( int i = 0; i < src_gray.cols; i++ )
{
if( myShiTomasi_dst.at<float>(j,i) > myShiTomasi_minVal + ( myShiTomasi_maxVal -
if( myShiTomasi_dst.at<float>(j,i) > myShiTomasi_minVal + ( myShiTomasi_maxVal -
myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel )
{ circle( myShiTomasi_copy, Point(i,j), 4, Scalar( rng.uniform(0,255),
{ circle( myShiTomasi_copy, Point(i,j), 4, Scalar( rng.uniform(0,255),
rng.uniform(0,255), rng.uniform(0,255) ), -1, 8, 0 ); }
}
}
@ -135,9 +135,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
{
if( Mc.at<float>(j,i) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )
*myHarris_qualityLevel/max_qualityLevel )
{ circle( myHarris_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255),
{ circle( myHarris_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255),
rng.uniform(0,255) ), -1, 8, 0 ); }
}
}
}
imshow( myHarris_window, myHarris_copy );
}
@ -151,9 +151,9 @@ Result
======
.. image:: images/My_Harris_corner_detector_Result.jpg
:align: center
:align: center
.. image:: images/My_Shi_Tomasi_corner_detector_Result.jpg
:align: center
:align: center

View File

@ -18,9 +18,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -56,7 +56,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
@ -70,10 +70,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
* @function goodFeaturesToTrack_Demo.cpp
* @brief Apply Shi-Tomasi corner detector
*/
void goodFeaturesToTrack_Demo( int, void* )
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 ) { maxCorners = 1; }
/// Parameters for Shi-Tomasi algorithm
vector<Point2f> corners;
double qualityLevel = 0.01;
@ -87,7 +87,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack( src_gray,
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
@ -96,18 +96,18 @@ This tutorial code's is shown lines below. You can also download it from `here <
blockSize,
useHarrisDetector,
k );
/// Draw corners detected
cout<<"** Number of corners detected: "<<corners.size()<<endl;
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),
rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
imshow( source_window, copy );
}
Explanation
@ -117,6 +117,6 @@ Result
======
.. image:: images/Feature_Detection_Result_a.jpg
:align: center
:align: center

View File

@ -10,7 +10,7 @@ In this tutorial you will learn:
.. container:: enumeratevisibleitemswithsquare
* What features are and why they are important
* What features are and why they are important
* Use the function :corner_harris:`cornerHarris <>` to detect corners using the Harris-Stephens method.
Theory
@ -56,7 +56,7 @@ How does it work?
.. container:: enumeratevisibleitemswithsquare
* Let's look for corners. Since corners represents a variation in the gradient in the image, we will look for this "variation".
* Let's look for corners. Since corners represents a variation in the gradient in the image, we will look for this "variation".
* Consider a grayscale image :math:`I`. We are going to sweep a window :math:`w(x,y)` (with displacements :math:`u` in the x direction and :math:`v` in the right direction) :math:`I` and will calculate the variation of intensity.
@ -66,10 +66,10 @@ How does it work?
where:
* :math:`w(x,y)` is the window at position :math:`(x,y)`
* :math:`w(x,y)` is the window at position :math:`(x,y)`
* :math:`I(x,y)` is the intensity at :math:`(x,y)`
* :math:`I(x+u,y+v)` is the intensity at the moved window :math:`(x+u,y+v)`
* Since we are looking for windows with corners, we are looking for windows with a large variation in intensity. Hence, we have to maximize the equation above, specifically the term:
.. math::
@ -89,36 +89,36 @@ How does it work?
.. math::
E(u,v) \approx \sum _{x,y} u^{2}I_{x}^{2} + 2uvI_{x}I_{y} + v^{2}I_{y}^{2}
* Which can be expressed in a matrix form as:
.. math::
E(u,v) \approx \begin{bmatrix}
u & v
u & v
\end{bmatrix}
\left (
\displaystyle \sum_{x,y}
w(x,y)
\begin{bmatrix}
I_x^{2} & I_{x}I_{y} \\
I_xI_{y} & I_{y}^{2}
I_xI_{y} & I_{y}^{2}
\end{bmatrix}
\right )
\right )
\begin{bmatrix}
u \\
v
\end{bmatrix}
v
\end{bmatrix}
* Let's denote:
.. math::
M = \displaystyle \sum_{x,y}
w(x,y)
w(x,y)
\begin{bmatrix}
I_x^{2} & I_{x}I_{y} \\
I_xI_{y} & I_{y}^{2}
I_xI_{y} & I_{y}^{2}
\end{bmatrix}
* So, our equation now is:
@ -126,34 +126,34 @@ How does it work?
.. math::
E(u,v) \approx \begin{bmatrix}
u & v
u & v
\end{bmatrix}
M
\begin{bmatrix}
u \\
v
\end{bmatrix}
v
\end{bmatrix}
* A score is calculated for each window, to determine if it can possibly contain a corner:
.. math::
R = det(M) - k(trace(M))^{2}
R = det(M) - k(trace(M))^{2}
where:
* det(M) = :math:`\lambda_{1}\lambda_{2}`
* trace(M) = :math:`\lambda_{1}+\lambda_{2}`
a window with a score :math:`R` greater than a certain value is considered a "corner"
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -161,7 +161,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace cv;
using namespace std;
/// Global variables
@ -186,7 +186,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
cornerHarris_demo( 0, 0 );
waitKey(0);
@ -204,25 +204,25 @@ This tutorial code's is shown lines below. You can also download it from `here <
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
/// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
convertScaleAbs( dst_norm, dst_norm_scaled );
/// Drawing a circle around corners
for( int j = 0; j < dst_norm.rows ; j++ )
{ for( int i = 0; i < dst_norm.cols; i++ )
{
if( (int) dst_norm.at<float>(j,i) > thresh )
{
circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
{
circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
}
}
}
/// Showing the result
}
}
/// Showing the result
namedWindow( corners_window, CV_WINDOW_AUTOSIZE );
imshow( corners_window, dst_norm_scaled );
}
@ -237,11 +237,11 @@ Result
The original image:
.. image:: images/Harris_Detector_Original_Image.jpg
:align: center
:align: center
The detected corners are surrounded by a small black circle
.. image:: images/Harris_Detector_Result.jpg
:align: center
:align: center

View File

@ -22,7 +22,7 @@ Cool Theory
Morphological Operations
--------------------------
* In short: A set of operations that process images based on shapes. Morphological operations apply a *structuring element* to an input image and generate an output image.
* In short: A set of operations that process images based on shapes. Morphological operations apply a *structuring element* to an input image and generate an output image.
* The most basic morphological operations are two: Erosion and Dilation. They have a wide array of uses, i.e. :
@ -36,7 +36,7 @@ Morphological Operations
.. image:: images/Morphology_1_Tutorial_Theory_Original_Image.png
:alt: Original image
:align: center
:align: center
Dilation
^^^^^^^^^
@ -49,7 +49,7 @@ Dilation
.. image:: images/Morphology_1_Tutorial_Theory_Dilation.png
:alt: Dilation result - Theory example
:align: center
:align: center
The background (bright) dilates around the black regions of the letter.
@ -58,21 +58,21 @@ Erosion
* This operation is the sister of dilation. What this does is to compute a local minimum over the area of the kernel.
* As the kernel :math:`B` is scanned over the image, we compute the minimal pixel value overlapped by :math:`B` and replace the image pixel under the anchor point with that minimal value.
* As the kernel :math:`B` is scanned over the image, we compute the minimal pixel value overlapped by :math:`B` and replace the image pixel under the anchor point with that minimal value.
* Analagously to the example for dilation, we can apply the erosion operator to the original image (shown above). You can see in the result below that the bright areas of the image (the background, apparently), get thinner, whereas the dark zones (the "writing"( gets bigger.
.. image:: images/Morphology_1_Tutorial_Theory_Erosion.png
:alt: Erosion result - Theory example
:align: center
:align: center
Code
======
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -104,29 +104,29 @@ This tutorial code's is shown lines below. You can also download it from `here <
if( !src.data )
{ return -1; }
/// Create windows
namedWindow( "Erosion Demo", CV_WINDOW_AUTOSIZE );
namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE );
cvMoveWindow( "Dilation Demo", src.cols, 0 );
/// Create Erosion Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
&erosion_elem, max_elem,
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
&erosion_elem, max_elem,
Erosion );
createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo",
createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo",
&erosion_size, max_kernel_size,
Erosion );
/// Create Dilation Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
&dilation_elem, max_elem,
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
&dilation_elem, max_elem,
Dilation );
createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",
createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",
&dilation_size, max_kernel_size,
Dilation );
Dilation );
/// Default start
Erosion( 0, 0 );
@ -144,13 +144,13 @@ This tutorial code's is shown lines below. You can also download it from `here <
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( erosion_type,
Mat element = getStructuringElement( erosion_type,
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
Point( erosion_size, erosion_size ) );
Point( erosion_size, erosion_size ) );
/// Apply the erosion operation
erode( src, erosion_dst, element );
imshow( "Erosion Demo", erosion_dst );
imshow( "Erosion Demo", erosion_dst );
}
/** @function Dilation */
@ -161,12 +161,12 @@ This tutorial code's is shown lines below. You can also download it from `here <
else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( dilation_type,
Mat element = getStructuringElement( dilation_type,
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
Point( dilation_size, dilation_size ) );
Point( dilation_size, dilation_size ) );
/// Apply the dilation operation
dilate( src, dilation_dst, element );
imshow( "Dilation Demo", dilation_dst );
imshow( "Dilation Demo", dilation_dst );
}
@ -182,12 +182,12 @@ Explanation
* Create a set of 02 Trackbars for each operation:
* The first trackbar "Element" returns either **erosion_elem** or **dilation_elem**
* The second trackbar "Kernel size" return **erosion_size** or **dilation_size** for the corresponding operation.
* The second trackbar "Kernel size" return **erosion_size** or **dilation_size** for the corresponding operation.
* Every time we move any slider, the user's function **Erosion** or **Dilation** will be called and it will update the output image based on the current trackbar values.
Let's analyze these two functions:
#. **erosion:**
.. code-block:: cpp
@ -200,32 +200,32 @@ Explanation
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( erosion_type,
Mat element = getStructuringElement( erosion_type,
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
Point( erosion_size, erosion_size ) );
Point( erosion_size, erosion_size ) );
/// Apply the erosion operation
erode( src, erosion_dst, element );
imshow( "Erosion Demo", erosion_dst );
imshow( "Erosion Demo", erosion_dst );
}
* The function that performs the *erosion* operation is :erode:`erode <>`. As we can see, it receives three arguments:
* *src*: The source image
* *erosion_dst*: The output image
* *element*: This is the kernel we will use to perform the operation. If we do not specify, the default is a simple :math:`3x3` matrix. Otherwise, we can specify its shape. For this, we need to use the function :get_structuring_element:`getStructuringElement <>`:
.. code-block:: cpp
Mat element = getStructuringElement( erosion_type,
Mat element = getStructuringElement( erosion_type,
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
Point( erosion_size, erosion_size ) );
Point( erosion_size, erosion_size ) );
We can choose any of three shapes for our kernel:
.. container:: enumeratevisibleitemswithsquare
+ Rectangular box: MORPH_RECT
+ Cross: MORPH_CROSS
+ Cross: MORPH_CROSS
+ Ellipse: MORPH_ELLIPSE
Then, we just have to specify the size of our kernel and the *anchor point*. If not specified, it is assumed to be in the center.
@ -233,8 +233,8 @@ Explanation
* That is all. We are ready to perform the erosion of our image.
.. note::
Additionally, there is another parameter that allows you to perform multiple erosions (iterations) at once. We are not using it in this simple tutorial, though. You can check out the Reference for more details.
Additionally, there is another parameter that allows you to perform multiple erosions (iterations) at once. We are not using it in this simple tutorial, though. You can check out the Reference for more details.
#. **dilation:**
@ -250,12 +250,12 @@ The code is below. As you can see, it is completely similar to the snippet of co
else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( dilation_type,
Mat element = getStructuringElement( dilation_type,
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
Point( dilation_size, dilation_size ) );
Point( dilation_size, dilation_size ) );
/// Apply the dilation operation
dilate( src, dilation_dst, element );
imshow( "Dilation Demo", dilation_dst );
imshow( "Dilation Demo", dilation_dst );
}
@ -267,10 +267,10 @@ Results
.. image:: images/Morphology_1_Tutorial_Original_Image.jpg
:alt: Original image
:align: center
:align: center
We get the results below. Varying the indices in the Trackbars give different output images, naturally. Try them out! You can even try to add a third Trackbar to control the number of iterations.
.. image:: images/Morphology_1_Tutorial_Cover.jpg
:alt: Dilation and Erosion application
:align: center
:align: center

View File

@ -19,11 +19,11 @@ Theory
======
.. note::
The explanation below belongs to the book `Computer Vision: Algorithms and Applications <http://szeliski.org/Book/>`_ by Richard Szeliski and to *LearningOpenCV*
The explanation below belongs to the book `Computer Vision: Algorithms and Applications <http://szeliski.org/Book/>`_ by Richard Szeliski and to *LearningOpenCV*
.. container:: enumeratevisibleitemswithsquare
* *Smoothing*, also called *blurring*, is a simple and frequently used image processing operation.
* *Smoothing*, also called *blurring*, is a simple and frequently used image processing operation.
* There are many reasons for smoothing. In this tutorial we will focus on smoothing in order to reduce noise (other uses will be seen in the following tutorials).
@ -33,7 +33,7 @@ Theory
g(i,j) = \sum_{k,l} f(i+k, j+l) h(k,l)
:math:`h(k,l)` is called the *kernel*, which is nothing more than the coefficients of the filter.
It helps to visualize a *filter* as a window of coefficients sliding across the image.
@ -44,19 +44,19 @@ Normalized Box Filter
.. container:: enumeratevisibleitemswithsquare
* This filter is the simplest of all! Each output pixel is the *mean* of its kernel neighbors ( all of them contribute with equal weights)
* This filter is the simplest of all! Each output pixel is the *mean* of its kernel neighbors ( all of them contribute with equal weights)
* The kernel is below:
.. math::
K = \dfrac{1}{K_{width} \cdot K_{height}} \begin{bmatrix}
1 & 1 & 1 & ... & 1 \\
1 & 1 & 1 & ... & 1 \\
. & . & . & ... & 1 \\
. & . & . & ... & 1 \\
1 & 1 & 1 & ... & 1
\end{bmatrix}
\end{bmatrix}
Gaussian Filter
@ -69,16 +69,16 @@ Gaussian Filter
* Just to make the picture clearer, remember how a 1D Gaussian kernel look like?
.. image:: images/Smoothing_Tutorial_theory_gaussian_0.jpg
:align: center
:align: center
Assuming that an image is 1D, you can notice that the pixel located in the middle would have the biggest weight. The weight of its neighbors decreases as the spatial distance between them and the center pixel increases.
Assuming that an image is 1D, you can notice that the pixel located in the middle would have the biggest weight. The weight of its neighbors decreases as the spatial distance between them and the center pixel increases.
.. note::
Remember that a 2D Gaussian can be represented as :
.. math::
G_{0}(x, y) = A e^{ \dfrac{ -(x - \mu_{x})^{2} }{ 2\sigma^{2}_{x} } + \dfrac{ -(y - \mu_{y})^{2} }{ 2\sigma^{2}_{y} } }
where :math:`\mu` is the mean (the peak) and :math:`\sigma` represents the variance (per each of the variables :math:`x` and :math:`y`)
@ -97,9 +97,9 @@ Bilateral Filter
* So far, we have explained some filters which main goal is to *smooth* an input image. However, sometimes the filters do not only dissolve the noise, but also smooth away the *edges*. To avoid this (at certain extent at least), we can use a bilateral filter.
* In an analogous way as the Gaussian filter, the bilateral filter also considers the neighboring pixels with weights assigned to each of them. These weights have two components, the first of which is the same weighting used by the Gaussian filter. The second component takes into account the difference in intensity between the neighboring pixels and the evaluated one.
* In an analogous way as the Gaussian filter, the bilateral filter also considers the neighboring pixels with weights assigned to each of them. These weights have two components, the first of which is the same weighting used by the Gaussian filter. The second component takes into account the difference in intensity between the neighboring pixels and the evaluated one.
* For a more detailed explanation you can check `this link <http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html>`_
* For a more detailed explanation you can check `this link <http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html>`_
Code
@ -108,14 +108,14 @@ Code
.. container:: enumeratevisibleitemswithsquare
* **What does this program do?**
.. container:: enumeratevisibleitemswithsquare
* Loads an image
* Applies 4 different kinds of filters (explained in Theory) and show the filtered images sequentially
* **Downloadable code**:
Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp>`_
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp>`_
* **Code at glance:**
@ -140,29 +140,29 @@ Code
int display_caption( char* caption );
int display_dst( int delay );
/**
* function main
/**
* function main
*/
int main( int argc, char** argv )
int main( int argc, char** argv )
{
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Load the source image
src = imread( "../images/lena.jpg", 1 );
src = imread( "../images/lena.jpg", 1 );
if( display_caption( "Original Image" ) != 0 ) { return 0; }
dst = src.clone();
if( display_dst( DELAY_CAPTION ) != 0 ) { return 0; }
/// Applying Homogeneous blur
/// Applying Homogeneous blur
if( display_caption( "Homogeneous Blur" ) != 0 ) { return 0; }
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
{ blur( src, dst, Size( i, i ), Point(-1,-1) );
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
/// Applying Gaussian blur
/// Applying Gaussian blur
if( display_caption( "Gaussian Blur" ) != 0 ) { return 0; }
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
@ -193,8 +193,8 @@ Code
int display_caption( char* caption )
{
dst = Mat::zeros( src.size(), src.type() );
putText( dst, caption,
Point( src.cols/4, src.rows/2),
putText( dst, caption,
Point( src.cols/4, src.rows/2),
CV_FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
imshow( window_name, dst );
@ -208,7 +208,7 @@ Code
imshow( window_name, dst );
int c = waitKey ( delay );
if( c >= 0 ) { return -1; }
return 0;
return 0;
}
@ -216,7 +216,7 @@ Code
Explanation
=============
#. Let's check the OpenCV functions that involve only the smoothing procedure, since the rest is already known by now.
#. Let's check the OpenCV functions that involve only the smoothing procedure, since the rest is already known by now.
#. **Normalized Block Filter:**
@ -237,10 +237,10 @@ Explanation
+ *dst*: Destination image
+ *Size( w,h )*: Defines the size of the kernel to be used ( of width *w* pixels and height *h* pixels)
+ *Size( w,h )*: Defines the size of the kernel to be used ( of width *w* pixels and height *h* pixels)
+ *Point(-1, -1)*: Indicates where the anchor point (the pixel evaluated) is located with respect to the neighborhood. If there is a negative value, then the center of the kernel is considered the anchor point.
+ *Point(-1, -1)*: Indicates where the anchor point (the pixel evaluated) is located with respect to the neighborhood. If there is a negative value, then the center of the kernel is considered the anchor point.
#. **Gaussian Filter:**
It is performed by the function :gaussian_blur:`GaussianBlur <>` :
@ -262,9 +262,9 @@ Explanation
+ *Size(w, h)*: The size of the kernel to be used (the neighbors to be considered). :math:`w` and :math:`h` have to be odd and positive numbers otherwise thi size will be calculated using the :math:`\sigma_{x}` and :math:`\sigma_{y}` arguments.
+ :math:`\sigma_{x}`: The standard deviation in x. Writing :math:`0` implies that :math:`\sigma_{x}` is calculated using kernel size.
+ :math:`\sigma_{y}`: The standard deviation in y. Writing :math:`0` implies that :math:`\sigma_{y}` is calculated using kernel size.
#. **Median Filter:**
@ -283,12 +283,12 @@ Explanation
+ *src*: Source image
+ *dst*: Destination image, must be the same type as *src*
+ *i*: Size of the kernel (only one because we use a square window). Must be odd.
+ *i*: Size of the kernel (only one because we use a square window). Must be odd.
#. **Bilateral Filter**
Provided by OpenCV function :bilateral_filter:`bilateralFilter <>`
.. code-block:: cpp
@ -296,7 +296,7 @@ Explanation
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
{ bilateralFilter ( src, dst, i, i*2, i/2 );
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
We use 5 arguments:
.. container:: enumeratevisibleitemswithsquare
@ -306,9 +306,9 @@ Explanation
+ *dst*: Destination image
+ *d*: The diameter of each pixel neighborhood.
+ :math:`\sigma_{Color}`: Standard deviation in the color space.
+ :math:`\sigma_{Space}`: Standard deviation in the coordinate space (in pixel terms)
@ -317,10 +317,10 @@ Results
.. container:: enumeratevisibleitemswithsquare
* The code opens an image (in this case *lena.jpg*) and display it under the effects of the 4 filters explained.
* The code opens an image (in this case *lena.jpg*) and display it under the effects of the 4 filters explained.
* Here is a snapshot of the image smoothed using *medianBlur*:
.. image:: images/Smoothing_Tutorial_Result_Median_Filter.jpg
:alt: Smoothing with a median filter
:align: center
:align: center

View File

@ -14,7 +14,7 @@ In this tutorial you will learn:
* What is Back Projection and why it is useful
* How to use the OpenCV function :calc_back_project:`calcBackProject <>` to calculate Back Projection
* How to mix different channels of an image by using the OpenCV function :mix_channels:`mixChannels <>`
@ -27,8 +27,8 @@ What is Back Projection?
.. container:: enumeratevisibleitemswithsquare
* Back Projection is a way of recording how well the pixels of a given image fit the distribution of pixels in a histogram model.
* To make it simpler: For Back Projection, you calculate the histogram model of a feature and then use it to find this feature in an image.
* To make it simpler: For Back Projection, you calculate the histogram model of a feature and then use it to find this feature in an image.
* Application example: If you have a histogram of flesh color (say, a Hue-Saturation histogram ), then you can use it to find flesh color areas in an image:
@ -42,9 +42,9 @@ How does it work?
* Let's say you have gotten a skin histogram (Hue-Saturation) based on the image below. The histogram besides is going to be our *model histogram* (which we know represents a sample of skin tonality). You applied some mask to capture only the histogram of the skin area:
====== ======
|T0| |T1|
====== ======
====== ======
|T0| |T1|
====== ======
.. |T0| image:: images/Back_Projection_Theory0.jpg
:align: middle
@ -55,9 +55,9 @@ How does it work?
* Now, let's imagine that you get another hand image (Test Image) like the one below: (with its respective histogram):
====== ======
|T2| |T3|
====== ======
====== ======
|T2| |T3|
====== ======
.. |T2| image:: images/Back_Projection_Theory2.jpg
:align: middle
@ -70,7 +70,7 @@ How does it work?
a. In each pixel of our Test Image (i.e. :math:`p(i,j)` ), collect the data and find the correspondent bin location for that pixel (i.e. :math:`( h_{i,j}, s_{i,j} )` ).
b. Lookup the *model histogram* in the correspondent bin - :math:`( h_{i,j}, s_{i,j} )` - and read the bin value.
b. Lookup the *model histogram* in the correspondent bin - :math:`( h_{i,j}, s_{i,j} )` - and read the bin value.
c. Store this bin value in a new image (*BackProjection*). Also, you may consider to normalize the *model histogram* first, so the output for the Test Image can be visible for you.
@ -88,7 +88,7 @@ Code
.. container:: enumeratevisibleitemswithsquare
* **What does this program do?**
.. container:: enumeratevisibleitemswithsquare
* Loads an image
@ -99,9 +99,9 @@ Code
* **Downloadable code**:
a. Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp>`_ for the basic version (explained in this tutorial).
b. For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the skin area) you can check the `improved demo <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp>`_
c. ...or you can always check out the classical `camshiftdemo <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/camshiftdemo.cpp>`_ in samples.
a. Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp>`_ for the basic version (explained in this tutorial).
b. For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the skin area) you can check the `improved demo <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp>`_
c. ...or you can always check out the classical `camshiftdemo <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/camshiftdemo.cpp>`_ in samples.
* **Code at glance:**
@ -116,7 +116,7 @@ Code
using namespace std;
/// Global Variables
Mat src; Mat hsv; Mat hue;
Mat src; Mat hsv; Mat hue;
int bins = 25;
/// Function Headers
@ -133,7 +133,7 @@ Code
/// Use only the Hue value
hue.create( hsv.size(), hsv.depth() );
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
/// Create Trackbar to enter the number of bins
char* window_image = "Source image";
@ -146,7 +146,7 @@ Code
/// Wait until user exits the program
waitKey(0);
return 0;
return 0;
}
@ -157,7 +157,7 @@ Code
void Hist_and_Backproj(int, void* )
{
MatND hist;
int histSize = MAX( bins, 2 );
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
@ -168,16 +168,16 @@ Code
/// Get Backprojection
MatND backproj;
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
/// Draw the backproj
imshow( "BackProj", backproj );
/// Draw the histogram
int w = 400; int h = 400;
int bin_w = cvRound( (double) w / histSize );
int bin_w = cvRound( (double) w / histSize );
Mat histImg = Mat::zeros( w, h, CV_8UC3 );
for( int i = 0; i < bins; i ++ )
for( int i = 0; i < bins; i ++ )
{ rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 0, 0, 255 ), -1 ); }
imshow( "Histogram", histImg );
@ -190,7 +190,7 @@ Explanation
.. code-block:: cpp
Mat src; Mat hsv; Mat hue;
Mat src; Mat hsv; Mat hue;
int bins = 25;
#. Read the input image and transform it to HSV format:
@ -206,7 +206,7 @@ Explanation
hue.create( hsv.size(), hsv.depth() );
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
as you see, we use the function :mix_channels:`mixChannels` to get only the channel 0 (Hue) from the hsv image. It gets the following parameters:
@ -214,15 +214,15 @@ Explanation
+ **&hsv:** The source array from which the channels will be copied
+ **1:** The number of source arrays
+ **&hue:** The destination array of the copied channels
+ **&hue:** The destination array of the copied channels
+ **1:** The number of destination arrays
+ **ch[] = {0,0}:** The array of index pairs indicating how the channels are copied. In this case, the Hue(0) channel of &hsv is being copied to the 0 channel of &hue (1-channel)
+ **1:** Number of index pairs
+ **1:** Number of index pairs
#. Create a Trackbar for the user to enter the bin values. Any change on the Trackbar means a call to the **Hist_and_Backproj** callback function.
.. code-block:: cpp
char* window_image = "Source image";
namedWindow( window_image, CV_WINDOW_AUTOSIZE );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
@ -235,7 +235,7 @@ Explanation
imshow( window_image, src );
waitKey(0);
return 0;
return 0;
#. **Hist_and_Backproj function:** Initialize the arguments needed for :calc_hist:`calcHist <>`. The number of bins comes from the Trackbar:
@ -245,7 +245,7 @@ Explanation
void Hist_and_Backproj(int, void* )
{
MatND hist;
int histSize = MAX( bins, 2 );
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
@ -264,7 +264,7 @@ Explanation
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
all the arguments are known (the same as used to calculate the histogram), only we add the backproj matrix, which will store the backprojection of the source image (&hue)
#. Display backproj:
.. code-block:: cpp
@ -276,10 +276,10 @@ Explanation
.. code-block:: cpp
int w = 400; int h = 400;
int bin_w = cvRound( (double) w / histSize );
int bin_w = cvRound( (double) w / histSize );
Mat histImg = Mat::zeros( w, h, CV_8UC3 );
for( int i = 0; i < bins; i ++ )
for( int i = 0; i < bins; i ++ )
{ rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 0, 0, 255 ), -1 ); }
imshow( "Histogram", histImg );
@ -291,9 +291,9 @@ Results
#. Here are the output by using a sample image ( guess what? Another hand ). You can play with the bin values and you will observe how it affects the results:
====== ====== ======
|R0| |R1| |R2|
====== ====== ======
====== ====== ======
|R0| |R1| |R2|
====== ====== ======
.. |R0| image:: images/Back_Projection1_Source_Image.jpg
:align: middle

View File

@ -13,7 +13,7 @@ In this tutorial you will learn how to:
* Use the OpenCV function :split:`split <>` to divide an image into its correspondent planes.
* To calculate histograms of arrays of images by using the OpenCV function :calc_hist:`calcHist <>`
* To normalize an array by using the function :normalize:`normalize <>`
@ -34,7 +34,7 @@ What are histograms?
.. image:: images/Histogram_Calculation_Theory_Hist0.jpg
:align: center
:align: center
* What happens if we want to *count* this data in an organized way? Since we know that the *range* of information value for this case is 256 values, we can segment our range in subparts (called **bins**) like:
@ -42,22 +42,22 @@ What are histograms?
\begin{array}{l}
[0, 255] = { [0, 15] \cup [16, 31] \cup ....\cup [240,255] } \\
range = { bin_{1} \cup bin_{2} \cup ....\cup bin_{n = 15} }
\end{array}
\end{array}
and we can keep count of the number of pixels that fall in the range of each :math:`bin_{i}`. Applying this to the example above we get the image below ( axis x represents the bins and axis y the number of pixels in each of them).
.. image:: images/Histogram_Calculation_Theory_Hist1.jpg
:align: center
:align: center
* This was just a simple example of how an histogram works and why it is useful. An histogram can keep count not only of color intensities, but of whatever image features that we want to measure (i.e. gradients, directions, etc).
* This was just a simple example of how an histogram works and why it is useful. An histogram can keep count not only of color intensities, but of whatever image features that we want to measure (i.e. gradients, directions, etc).
* Let's identify some parts of the histogram:
a. **dims**: The number of parameters you want to collect data of. In our example, **dims = 1** because we are only counting the intensity values of each pixel (in a greyscale image).
b. **bins**: It is the number of **subdivisions** in each dim. In our example, **bins = 16**
c. **range**: The limits for the values to be measured. In this case: **range = [0,255]**
c. **range**: The limits for the values to be measured. In this case: **range = [0,255]**
* What if you want to count two features? In this case your resulting histogram would be a 3D plot (in which x and y would be :math:`bin_{x}` and :math:`bin_{y}` for each feature and z would be the number of counts for each combination of :math:`(bin_{x}, bin_{y})`. The same would apply for more features (of course it gets trickier).
@ -65,7 +65,7 @@ What OpenCV offers you
-----------------------
For simple purposes, OpenCV implements the function :calc_hist:`calcHist <>`, which calculates the histogram of a set of arrays (usually images or image planes). It can operate with up to 32 dimensions. We will see it in the code below!
Code
====
@ -73,7 +73,7 @@ Code
.. container:: enumeratevisibleitemswithsquare
* **What does this program do?**
.. container:: enumeratevisibleitemswithsquare
* Loads an image
@ -82,7 +82,7 @@ Code
* Plot the three histograms in a window
* **Downloadable code**:
Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp>`_
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp>`_
* **Code at glance:**
@ -181,7 +181,7 @@ Explanation
if( !src.data )
{ return -1; }
#. Separate the source image in its three R,G and B planes. For this we use the OpenCV function :split:`split <>`:
#. Separate the source image in its three R,G and B planes. For this we use the OpenCV function :split:`split <>`:
.. code-block:: cpp
@ -195,7 +195,7 @@ Explanation
a. Establish number of bins (5, 10...):
.. code-block:: cpp
int histSize = 256; //from 0 to 255
b. Set the range of values (as we said, between 0 and 255 )
@ -219,25 +219,25 @@ Explanation
Mat b_hist, g_hist, r_hist;
e. We proceed to calculate the histograms by using the OpenCV function :calc_hist:`calcHist <>`:
.. code-block:: cpp
/// Compute the histograms:
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
where the arguments are:
.. container:: enumeratevisibleitemswithsquare
+ **&bgr_planes[0]:** The source array(s)
+ **1**: The number of source arrays (in this case we are using 1. We can enter here also a list of arrays )
+ **0**: The channel (*dim*) to be measured. In this case it is just the intensity (each array is single-channel) so we just write 0.
+ **Mat()**: A mask to be used on the source array ( zeros indicating pixels to be ignored ). If not defined it is not used
+ **b_hist**: The Mat object where the histogram will be stored
+ **1**: The histogram dimensionality.
+ **histSize:** The number of bins per each used dimension
+ **1**: The histogram dimensionality.
+ **histSize:** The number of bins per each used dimension
+ **histRange:** The range of values to be measured per each dimension
+ **uniform** and **accumulate**: The bin sizes are the same and the histogram is cleared at the beginning.
@ -264,7 +264,7 @@ Explanation
this function receives these arguments:
.. container:: enumeratevisibleitemswithsquare
+ **b_hist:** Input array
+ **b_hist:** Output normalized array (can be the same)
+ **0** and**histImage.rows**: For this example, they are the lower and upper limits to normalize the values of **r_hist**
@ -291,7 +291,7 @@ Explanation
}
we use the expression:
we use the expression:
.. code-block:: cpp
@ -315,7 +315,7 @@ Explanation
waitKey(0);
return 0;
Result
======
@ -323,10 +323,10 @@ Result
#. Using as input argument an image like the shown below:
.. image:: images/Histogram_Calculation_Original_Image.jpg
:align: center
:align: center
#. Produces the following histogram:
.. image:: images/Histogram_Calculation_Result.jpg
:align: center
:align: center

View File

@ -25,43 +25,43 @@ Theory
a. **Correlation ( CV\_COMP\_CORREL )**
.. math::
d(H_1,H_2) = \frac{\sum_I (H_1(I) - \bar{H_1}) (H_2(I) - \bar{H_2})}{\sqrt{\sum_I(H_1(I) - \bar{H_1})^2 \sum_I(H_2(I) - \bar{H_2})^2}}
d(H_1,H_2) = \frac{\sum_I (H_1(I) - \bar{H_1}) (H_2(I) - \bar{H_2})}{\sqrt{\sum_I(H_1(I) - \bar{H_1})^2 \sum_I(H_2(I) - \bar{H_2})^2}}
where
.. math::
\bar{H_k} = \frac{1}{N} \sum _J H_k(J)
\bar{H_k} = \frac{1}{N} \sum _J H_k(J)
and :math:`N` is the total number of histogram bins.
b. **Chi-Square ( CV\_COMP\_CHISQR )**
.. math::
d(H_1,H_2) = \sum _I \frac{\left(H_1(I)-H_2(I)\right)^2}{H_1(I)}
d(H_1,H_2) = \sum _I \frac{\left(H_1(I)-H_2(I)\right)^2}{H_1(I)}
c. **Intersection ( method=CV\_COMP\_INTERSECT )**
.. math::
d(H_1,H_2) = \sum _I \min (H_1(I), H_2(I))
d(H_1,H_2) = \sum _I \min (H_1(I), H_2(I))
d. **Bhattacharyya distance ( CV\_COMP\_BHATTACHARYYA )**
.. math::
d(H_1,H_2) = \sqrt{1 - \frac{1}{\sqrt{\bar{H_1} \bar{H_2} N^2}} \sum_I \sqrt{H_1(I) \cdot H_2(I)}}
d(H_1,H_2) = \sqrt{1 - \frac{1}{\sqrt{\bar{H_1} \bar{H_2} N^2}} \sum_I \sqrt{H_1(I) \cdot H_2(I)}}
Code
====
@ -69,7 +69,7 @@ Code
.. container:: enumeratevisibleitemswithsquare
* **What does this program do?**
.. container:: enumeratevisibleitemswithsquare
* Loads a *base image* and 2 *test images* to be compared with it.
@ -79,8 +79,8 @@ Code
* Compare the histogram of the *base image* with respect to the 2 test histograms, the histogram of the lower half base image and with the same base image histogram.
* Display the numerical matching parameters obtained.
* **Downloadable code**:
Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp>`_
* **Downloadable code**:
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp>`_
* **Code at glance:**
@ -105,7 +105,7 @@ Code
/// Load three images with different environment settings
if( argc < 4 )
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
return -1;
}
src_base = imread( argv[1], 1 );
@ -117,7 +117,7 @@ Code
cvtColor( src_test1, hsv_test1, CV_BGR2HSV );
cvtColor( src_test2, hsv_test2, CV_BGR2HSV );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
/// Using 30 bins for hue and 32 for saturation
int h_bins = 50; int s_bins = 60;
@ -153,14 +153,14 @@ Code
/// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ )
{ int compare_method = i;
{ int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
}
printf( "Done \n" );
@ -171,7 +171,7 @@ Code
Explanation
===========
#. Declare variables such as the matrices to store the base image and the two other images to compare ( RGB and HSV )
#. Declare variables such as the matrices to store the base image and the two other images to compare ( RGB and HSV )
.. code-block:: cpp
@ -186,7 +186,7 @@ Explanation
if( argc < 4 )
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
return -1;
}
src_base = imread( argv[1], 1 );
@ -205,7 +205,7 @@ Explanation
.. code-block:: cpp
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
#. Initialize the arguments to calculate the histograms (bins, ranges and channels H and S ).
@ -233,7 +233,7 @@ Explanation
#. Calculate the Histograms for the base image, the 2 test images and the half-down base image:
.. code-block:: cpp
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
@ -252,24 +252,24 @@ Explanation
.. code-block:: cpp
for( int i = 0; i < 4; i++ )
{ int compare_method = i;
{ int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
Results
========
#. We use as input the following images:
============ ============ ============
============ ============ ============
|Base_0| |Test_1| |Test_2|
============ ============ ============
============ ============ ============
.. |Base_0| image:: images/Histogram_Comparison_Source_0.jpg
:align: middle
@ -289,10 +289,10 @@ Results
=============== =============== =============== =============== ===============
*Method* Base - Base Base - Half Base - Test 1 Base - Test 2
=============== =============== =============== =============== ===============
*Correlation* 1.000000 0.930766 0.182073 0.120447
*Chi-square* 0.000000 4.940466 21.184536 49.273437
*Intersection* 24.391548 14.959809 3.889029 5.775088
*Bhattacharyya* 0.000000 0.222609 0.646576 0.801869
*Correlation* 1.000000 0.930766 0.182073 0.120447
*Chi-square* 0.000000 4.940466 21.184536 49.273437
*Intersection* 24.391548 14.959809 3.889029 5.775088
*Bhattacharyya* 0.000000 0.222609 0.646576 0.801869
=============== =============== =============== =============== ===============

View File

@ -12,7 +12,7 @@ In this tutorial you will learn:
* What an image histogram is and why it is useful
* To equalize histograms of images by using the OpenCV function:equalize_hist:`equalizeHist <>`
* To equalize histograms of images by using the OpenCV function:equalize_hist:`equalizeHist <>`
@ -24,12 +24,12 @@ What is an Image Histogram?
.. container:: enumeratevisibleitemswithsquare
* It is a graphical representation of the intensity distribution of an image.
* It is a graphical representation of the intensity distribution of an image.
* It quantifies the number of pixels for each intensity value considered.
.. image:: images/Histogram_Equalization_Theory_0.jpg
:align: center
:align: center
What is Histogram Equalization?
@ -42,30 +42,30 @@ What is Histogram Equalization?
* To make it clearer, from the image above, you can see that the pixels seem clustered around the middle of the available range of intensities. What Histogram Equalization does is to *stretch out* this range. Take a look at the figure below: The green circles indicate the *underpopulated* intensities. After applying the equalization, we get an histogram like the figure in the center. The resulting image is shown in the picture at right.
.. image:: images/Histogram_Equalization_Theory_1.jpg
:align: center
:align: center
How does it work?
-----------------
.. container:: enumeratevisibleitemswithsquare
* Equalization implies *mapping* one distribution (the given histogram) to another distribution (a wider and more uniform distribution of intensity values) so the intensity values are spreaded over the whole range.
* Equalization implies *mapping* one distribution (the given histogram) to another distribution (a wider and more uniform distribution of intensity values) so the intensity values are spreaded over the whole range.
* To accomplish the equalization effect, the remapping should be the *cumulative distribution function (cdf)* (more details, refer to *Learning OpenCV*). For the histogram :math:`H(i)`, its *cumulative distribution* :math:`H^{'}(i)` is:
.. math::
H^{'}(i) = \sum_{0 \le j < i} H(j)
H^{'}(i) = \sum_{0 \le j < i} H(j)
To use this as a remapping function, we have to normalize :math:`H^{'}(i)` such that the maximum value is 255 ( or the maximum value for the intensity of the image ). From the example above, the cumulative function is:
.. image:: images/Histogram_Equalization_Theory_2.jpg
:align: center
:align: center
* Finally, we use a simple remapping procedure to obtain the intensity values of the equalized image:
.. math::
equalized( x, y ) = H^{'}( src(x,y) )
Code
@ -74,16 +74,16 @@ Code
.. container:: enumeratevisibleitemswithsquare
* **What does this program do?**
.. container:: enumeratevisibleitemswithsquare
* Loads an image
* Convert the original image to grayscale
* Convert the original image to grayscale
* Equalize the Histogram by using the OpenCV function :equalize_hist:`EqualizeHist <>`
* Display the source and equalized images in a window.
* **Downloadable code**:
Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp>`_
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp>`_
* **Code at glance:**
@ -117,15 +117,15 @@ Code
/// Apply Histogram Equalization
equalizeHist( src, dst );
/// Display results
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( equalized_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
imshow( equalized_window, dst );
/// Wait until user exits the program
/// Wait until user exits the program
waitKey(0);
return 0;
@ -134,7 +134,7 @@ Code
Explanation
===========
#. Declare the source and destination images as well as the windows names:
#. Declare the source and destination images as well as the windows names:
.. code-block:: cpp
@ -144,7 +144,7 @@ Explanation
char* equalized_window = "Equalized Image";
#. Load the source image:
.. code-block:: cpp
src = imread( argv[1], 1 );
@ -164,7 +164,7 @@ Explanation
.. code-block:: cpp
equalizeHist( src, dst );
As it can be easily seen, the only arguments are the original image and the output (equalized) image.
#. Display both images (original and equalized) :
@ -176,9 +176,9 @@ Explanation
imshow( source_window, src );
imshow( equalized_window, dst );
#. Wait until user exists the program
.. code-block:: cpp
waitKey(0);
@ -191,19 +191,19 @@ Results
#. To appreciate better the results of equalization, let's introduce an image with not much contrast, such as:
.. image:: images/Histogram_Equalization_Original_Image.jpg
:align: center
:align: center
which, by the way, has this histogram:
.. image:: images/Histogram_Equalization_Original_Histogram.jpg
:align: center
:align: center
notice that the pixels are clustered around the center of the histogram.
#. After applying the equalization with our program, we get this result:
.. image:: images/Histogram_Equalization_Equalized_Image.jpg
:align: center
:align: center
this image has certainly more contrast. Check out its new histogram like this:

View File

@ -31,81 +31,81 @@ How does it work?
* We need two primary components:
a. **Source image (I):** The image in which we expect to find a match to the template image
b. **Template image (T):** The patch image which will be compared to the template image
a. **Source image (I):** The image in which we expect to find a match to the template image
b. **Template image (T):** The patch image which will be compared to the template image
our goal is to detect the highest matching area:
.. image:: images/Template_Matching_Template_Theory_Summary.jpg
:align: center
:align: center
* To identify the matching area, we have to *compare* the template image against the source image by sliding it:
.. image:: images/Template_Matching_Template_Theory_Sliding.jpg
:align: center
:align: center
* By **sliding**, we mean moving the patch one pixel at a time (left to right, up to down). At each location, a metric is calculated so it represents how "good" or "bad" the match at that location is (or how similar the patch is to that particular area of the source image).
* For each location of **T** over **I**, you *store* the metric in the *result matrix* **(R)**. Each location :math:`(x,y)` in **R** contains the match metric:
.. image:: images/Template_Matching_Template_Theory_Result.jpg
:align: center
:align: center
the image above is the result **R** of sliding the patch with a metric **TM_CCORR_NORMED**. The brightest locations indicate the highest matches. As you can see, the location marked by the red circle is probably the one with the highest value, so that location (the rectangle formed by that point as a corner and width and height equal to the patch image) is considered the match.
the image above is the result **R** of sliding the patch with a metric **TM_CCORR_NORMED**. The brightest locations indicate the highest matches. As you can see, the location marked by the red circle is probably the one with the highest value, so that location (the rectangle formed by that point as a corner and width and height equal to the patch image) is considered the match.
* In practice, we use the function :min_max_loc:`minMaxLoc <>` to locate the highest value (or lower, depending of the type of matching method) in the *R* matrix.
Which are the matching methods available in OpenCV?
----------------------------------------------------
Good question. OpenCV implements Template matching in the function :match_template:`matchTemplate <>`. The available methods are 6:
a. **method=CV\_TM\_SQDIFF**
.. math::
R(x,y)= \sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2
R(x,y)= \sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2
b. **method=CV\_TM\_SQDIFF\_NORMED**
.. math::
R(x,y)= \frac{\sum_{x',y'} (T(x',y')-I(x+x',y+y'))^2}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
R(x,y)= \frac{\sum_{x',y'} (T(x',y')-I(x+x',y+y'))^2}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
c. **method=CV\_TM\_CCORR**
.. math::
R(x,y)= \sum _{x',y'} (T(x',y') \cdot I(x+x',y+y'))
d. **method=CV\_TM\_CCORR\_NORMED**
.. math::
R(x,y)= \frac{\sum_{x',y'} (T(x',y') \cdot I'(x+x',y+y'))}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
R(x,y)= \sum _{x',y'} (T(x',y') \cdot I(x+x',y+y'))
d. **method=CV\_TM\_CCORR\_NORMED**
.. math::
R(x,y)= \frac{\sum_{x',y'} (T(x',y') \cdot I'(x+x',y+y'))}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
e. **method=CV\_TM\_CCOEFF**
.. math::
R(x,y)= \sum _{x',y'} (T'(x',y') \cdot I(x+x',y+y'))
R(x,y)= \sum _{x',y'} (T'(x',y') \cdot I(x+x',y+y'))
where
.. math::
\begin{array}{l} T'(x',y')=T(x',y') - 1/(w \cdot h) \cdot \sum _{x'',y''} T(x'',y'') \\ I'(x+x',y+y')=I(x+x',y+y') - 1/(w \cdot h) \cdot \sum _{x'',y''} I(x+x'',y+y'') \end{array}
\begin{array}{l} T'(x',y')=T(x',y') - 1/(w \cdot h) \cdot \sum _{x'',y''} T(x'',y'') \\ I'(x+x',y+y')=I(x+x',y+y') - 1/(w \cdot h) \cdot \sum _{x'',y''} I(x+x'',y+y'') \end{array}
f. **method=CV\_TM\_CCOEFF\_NORMED**
.. math::
R(x,y)= \frac{ \sum_{x',y'} (T'(x',y') \cdot I'(x+x',y+y')) }{ \sqrt{\sum_{x',y'}T'(x',y')^2 \cdot \sum_{x',y'} I'(x+x',y+y')^2} }
R(x,y)= \frac{ \sum_{x',y'} (T'(x',y') \cdot I'(x+x',y+y')) }{ \sqrt{\sum_{x',y'}T'(x',y')^2 \cdot \sum_{x',y'} I'(x+x',y+y')^2} }
Code
@ -115,7 +115,7 @@ Code
.. container:: enumeratevisibleitemswithsquare
* **What does this program do?**
.. container:: enumeratevisibleitemswithsquare
* Loads an input image and a image patch (*template*)
@ -125,13 +125,13 @@ Code
* Draw a rectangle around the area corresponding to the highest match
* **Downloadable code**:
Click `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp>`_
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp>`_
* **Code at glance:**
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
@ -160,7 +160,7 @@ Code
/// Create windows
namedWindow( image_window, CV_WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar
char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar( trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod );
@ -180,11 +180,11 @@ Code
/// Source image to display
Mat img_display;
img.copyTo( img_display );
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
/// Do the Matching and Normalize
@ -194,18 +194,18 @@ Code
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
else
{ matchLoc = maxLoc; }
/// Show me what you got
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
imshow( image_window, img_display );
imshow( result_window, result );
@ -241,7 +241,7 @@ Explanation
namedWindow( image_window, CV_WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE );
#. Create the Trackbar to enter the kind of matching method to be used. When a change is detected the callback function **MatchingMethod** is called.
.. code-block:: cpp
@ -255,7 +255,7 @@ Explanation
waitKey(0);
return 0;
#. Let's check out the callback function. First, it makes a copy of the source image:
.. code-block:: cpp
@ -267,12 +267,12 @@ Explanation
#. Next, it creates the result matrix that will store the matching results for each template location. Observe in detail the size of the result matrix (which matches all possible locations for it)
.. code-block:: cpp
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
#. Perform the template matching operation:
.. code-block:: cpp
@ -287,18 +287,18 @@ Explanation
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
#. We localize the minimum and maximum values in the result matrix **R** by using :min_max_loc:`minMaxLoc <>`.
#. We localize the minimum and maximum values in the result matrix **R** by using :min_max_loc:`minMaxLoc <>`.
.. code-block:: cpp
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
the function calls as arguments:
.. container:: enumeratevisibleitemswithsquare
.. container:: enumeratevisibleitemswithsquare
+ **result:** The source array
+ **&minVal** and **&maxVal:** Variables to save the minimum and maximum values in **result**
@ -309,18 +309,18 @@ Explanation
#. For the first two methods ( CV\_SQDIFF and CV\_SQDIFF\_NORMED ) the best match are the lowest values. For all the others, higher values represent better matches. So, we save the corresponding value in the **matchLoc** variable:
.. code-block:: cpp
if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
else
{ matchLoc = maxLoc; }
#. Display the source image and the result matrix. Draw a rectangle around the highest possible matching area:
.. code-block:: cpp
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
imshow( image_window, img_display );
imshow( result_window, result );
@ -333,19 +333,19 @@ Results
.. image:: images/Template_Matching_Original_Image.jpg
:align: center
and a template image:
.. image:: images/Template_Matching_Template_Image.jpg
:align: center
:align: center
#. Generate the following result matrices (first row are the standard methods SQDIFF, CCORR and CCOEFF, second row are the same methods in its normalized version). In the first column, the darkest is the better match, for the other two columns, the brighter a location, the higher the match.
============ ============ ============
============ ============ ============
|Result_0| |Result_2| |Result_4|
============ ============ ============
============ ============ ============
|Result_1| |Result_3| |Result_5|
============ ============ ============
============ ============ ============
.. |Result_0| image:: images/Template_Matching_Correl_Result_0.jpg
:align: middle

View File

@ -19,7 +19,7 @@ Theory
* **Low error rate:** Meaning a good detection of only existent edges.
* **Good localization:** The distance between edge pixels detected and real edge pixels have to be minimized.
* **Minimal response:** Only one detector response per edge.
* **Minimal response:** Only one detector response per edge.
Steps
------
@ -27,39 +27,39 @@ Steps
#. Filter out any noise. The Gaussian filter is used for this purpose. An example of a Gaussian kernel of :math:`size = 5` that might be used is shown below:
.. math::
K = \dfrac{1}{159}\begin{bmatrix}
2 & 4 & 5 & 4 & 2 \\
4 & 9 & 12 & 9 & 4 \\
5 & 12 & 15 & 12 & 5 \\
4 & 9 & 12 & 9 & 4 \\
2 & 4 & 5 & 4 & 2
\end{bmatrix}
2 & 4 & 5 & 4 & 2
\end{bmatrix}
#. Find the intensity gradient of the image. For this, we follow a procedure analogous to Sobel:
#. Find the intensity gradient of the image. For this, we follow a procedure analogous to Sobel:
a. Apply a pair of convolution masks (in :math:`x` and :math:`y` directions:
.. math::
G_{x} = \begin{bmatrix}
-1 & 0 & +1 \\
-2 & 0 & +2 \\
-1 & 0 & +1
-1 & 0 & +1
\end{bmatrix}
G_{y} = \begin{bmatrix}
-1 & -2 & -1 \\
0 & 0 & 0 \\
+1 & +2 & +1
\end{bmatrix}
+1 & +2 & +1
\end{bmatrix}
b. Find the gradient strength and direction with:
.. math::
\begin{array}{l}
G = \sqrt{ G_{x}^{2} + G_{y}^{2} } \\
G = \sqrt{ G_{x}^{2} + G_{y}^{2} } \\
\theta = \arctan(\dfrac{ G_{y} }{ G_{x} })
\end{array}
@ -71,22 +71,22 @@ Steps
a. If a pixel gradient is higher than the *upper* threshold, the pixel is accepted as an edge
b. If a pixel gradient value is below the *lower* threshold, then it is rejected.
c. If the pixel gradient is between the two thresholds, then it will be accepted only if it is connected to a pixel that is above the *upper* threshold.
c. If the pixel gradient is between the two thresholds, then it will be accepted only if it is connected to a pixel that is above the *upper* threshold.
Canny recommended a *upper*:*lower* ratio between 2:1 and 3:1.
#. For more details, you can always consult your favorite Computer Vision book.
#. For more details, you can always consult your favorite Computer Vision book.
Code
=====
#. **What does this program do?**
* Asks the user to enter a numerical value to set the lower threshold for our *Canny Edge Detector* (by means of a Trackbar)
* Applies the *Canny Detector* and generates a **mask** (bright lines representing the edges on a black background).
* Applies the mask obtained on the original image and display it in a window.
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp>`_
.. code-block:: cpp
@ -123,7 +123,7 @@ Code
/// Using Canny's output as a mask, we display our result
dst = Scalar::all(0);
src.copyTo( dst, detected_edges);
imshow( window_name, dst );
}
@ -194,7 +194,7 @@ Explanation
{ return -1; }
#. Create a matrix of the same type and size of *src* (to be *dst*)
.. code-block:: cpp
dst.create( src.size(), src.type() );
@ -249,9 +249,9 @@ Explanation
.. code-block:: cpp
dst = Scalar::all(0);
#. Finally, we will use the function :copy_to:`copyTo <>` to map only the areas of the image that are identified as edges (on a black background).
#. Finally, we will use the function :copy_to:`copyTo <>` to map only the areas of the image that are identified as edges (on a black background).
.. code-block:: cpp
src.copyTo( dst, detected_edges);
@ -280,8 +280,8 @@ Result
:alt: Result after running Canny
:width: 200pt
:align: center
* Notice how the image is superposed to the black background on the edge regions.

View File

@ -10,8 +10,8 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :copy_make_border:`copyMakeBorder <>` to set the borders (extra padding to your image).
* Use the OpenCV function :copy_make_border:`copyMakeBorder <>` to set the borders (extra padding to your image).
Theory
========
@ -19,14 +19,14 @@ Theory
The explanation below belongs to the book **Learning OpenCV** by Bradski and Kaehler.
#. In our previous tutorial we learned to use convolution to operate on images. One problem that naturally arises is how to handle the boundaries. How can we convolve them if the evaluated points are at the edge of the image?
#. In our previous tutorial we learned to use convolution to operate on images. One problem that naturally arises is how to handle the boundaries. How can we convolve them if the evaluated points are at the edge of the image?
#. What most of OpenCV functions do is to copy a given image onto another slightly larger image and then automatically pads the boundary (by any of the methods explained in the sample code just below). This way, the convolution can be performed over the needed pixels without problems (the extra padding is cut after the operation is done).
#. In this tutorial, we will briefly explore two ways of defining the extra padding (border) for an image:
a. **BORDER_CONSTANT**: Pad the image with a constant value (i.e. black or :math:`0`
b. **BORDER_REPLICATE**: The row or column at the very edge of the original is replicated to the extra border.
This will be seen more clearly in the Code section.
@ -37,20 +37,20 @@ Code
======
#. **What does this program do?**
* Load an image
* Load an image
* Let the user choose what kind of padding use in the input image. There are two options:
#. *Constant value border*: Applies a padding of a constant value for the whole border. This value will be updated randomly each 0.5 seconds.
#. *Constant value border*: Applies a padding of a constant value for the whole border. This value will be updated randomly each 0.5 seconds.
#. *Replicated border*: The border will be replicated from the pixel values at the edges of the original image.
The user chooses either option by pressing 'c' (constant) or 'r' (replicate)
* The program finishes when the user presses 'ESC'
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -59,7 +59,7 @@ Code
using namespace cv;
/// Global Variables
/// Global Variables
Mat src, dst;
int top, bottom, left, right;
int borderType;
@ -75,10 +75,10 @@ Code
/// Load an image
src = imread( argv[1] );
if( !src.data )
{ return -1;
printf(" No data entered, please enter the path to an image file \n");
printf(" No data entered, please enter the path to an image file \n");
}
/// Brief how-to for this program
@ -92,12 +92,12 @@ Code
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
left = (int) (0.05*src.cols); right = (int) (0.05*src.cols);
dst = src;
imshow( window_name, dst );
while( true )
{
c = waitKey(500);
@ -140,14 +140,14 @@ Explanation
.. code-block:: cpp
src = imread( argv[1] );
if( !src.data )
{ return -1;
printf(" No data entered, please enter the path to an image file \n");
printf(" No data entered, please enter the path to an image file \n");
}
#. After giving a short intro of how to use the program, we create a window:
.. code-block:: cpp
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
@ -156,13 +156,13 @@ Explanation
.. code-block:: cpp
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
left = (int) (0.05*src.cols); right = (int) (0.05*src.cols);
#. The program begins a *while* loop. If the user presses 'c' or 'r', the *borderType* variable takes the value of *BORDER_CONSTANT* or *BORDER_REPLICATE* respectively:
.. code-block:: cpp
while( true )
{
c = waitKey(500);
@ -185,7 +185,7 @@ Explanation
#. Finally, we call the function :copy_make_border:`copyMakeBorder <>` to apply the respective padding:
.. code-block:: cpp
copyMakeBorder( src, dst, top, bottom, left, right, borderType, value );
The arguments are:
@ -199,7 +199,7 @@ Explanation
#. We display our output image in the image created previously
.. code-block:: cpp
imshow( window_name, dst );
@ -213,12 +213,12 @@ Results
.. container:: enumeratevisibleitemswithsquare
* By default, it begins with the border set to BORDER_CONSTANT. Hence, a succession of random colored borders will be shown.
* If you press 'r', the border will become a replica of the edge pixels.
* If you press 'r', the border will become a replica of the edge pixels.
* If you press 'c', the random colored borders will appear again
* If you press 'ESC' the program will exit.
Below some screenshot showing how the border changes color and how the *BORDER_REPLICATE* option looks:
.. image:: images/CopyMakeBorder_Tutorial_Results.jpg
:alt: Final result after copyMakeBorder application

View File

@ -10,8 +10,8 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :filter2d:`filter2D <>` to create your own linear filters.
* Use the OpenCV function :filter2d:`filter2D <>` to create your own linear filters.
Theory
=======
@ -21,15 +21,15 @@ Theory
Convolution
------------
In a very general sense, convolution is an operation between every part of an image and an operator (kernel).
In a very general sense, convolution is an operation between every part of an image and an operator (kernel).
What is a kernel?
------------------
A kernel is essentially a fixed size array of numerical coefficeints along with an *anchor point* in that array, which is tipically located at the center.
A kernel is essentially a fixed size array of numerical coefficeints along with an *anchor point* in that array, which is tipically located at the center.
.. image:: images/filter_2d_tutorial_kernel_theory.png
:alt: kernel example
:align: center
:align: center
How does convolution with a kernel work?
-----------------------------------------
@ -38,7 +38,7 @@ Assume you want to know the resulting value of a particular location in the imag
#. Place the kernel anchor on top of a determined pixel, with the rest of the kernel overlaying the corresponding local pixels in the image.
#. Multiply the kernel coefficients by the corresponding image pixel values and sum the result.
#. Multiply the kernel coefficients by the corresponding image pixel values and sum the result.
#. Place the result to the location of the *anchor* in the input image.
@ -47,35 +47,35 @@ Assume you want to know the resulting value of a particular location in the imag
Expressing the procedure above in the form of an equation we would have:
.. math::
H(x,y) = \sum_{i=0}^{M_{i} - 1} \sum_{j=0}^{M_{j}-1} I(x+i - a_{i}, y + j - a_{j})K(i,j)
Fortunately, OpenCV provides you with the function :filter2d:`filter2D <>` so you do not have to code all these operations.
Fortunately, OpenCV provides you with the function :filter2d:`filter2D <>` so you do not have to code all these operations.
Code
======
#. **What does this program do?**
* Loads an image
* Performs a *normalized box filter*. For instance, for a kernel of size :math:`size = 3`, the kernel would be:
.. math::
K = \dfrac{1}{3 \cdot 3} \begin{bmatrix}
1 & 1 & 1 \\
1 & 1 & 1 \\
1 & 1 & 1
\end{bmatrix}
1 & 1 & 1
\end{bmatrix}
The program will perform the filter operation with kernels of sizes 3, 5, 7, 9 and 11.
* The filter output (with each kernel) will be shown during 500 milliseconds
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -93,7 +93,7 @@ Code
Mat kernel;
Point anchor;
double delta;
int ddepth;
int ddepth;
int kernel_size;
char* window_name = "filter2D Demo";
@ -107,7 +107,7 @@ Code
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
anchor = Point( -1, -1 );
delta = 0;
@ -131,7 +131,7 @@ Code
imshow( window_name, dst );
ind++;
}
return 0;
}
@ -171,12 +171,12 @@ Explanation
kernel_size = 3 + 2*( ind%5 );
kernel = Mat::ones( kernel_size, kernel_size, CV_32F )/ (float)(kernel_size*kernel_size);
The first line is to update the *kernel_size* to odd values in the range: :math:`[3,11]`. The second line actually builds the kernel by setting its value to a matrix filled with :math:`1's` and normalizing it by dividing it between the number of elements.
The first line is to update the *kernel_size* to odd values in the range: :math:`[3,11]`. The second line actually builds the kernel by setting its value to a matrix filled with :math:`1's` and normalizing it by dividing it between the number of elements.
#. After setting the kernel, we can generate the filter by using the function :filter2d:`filter2D <>`:
.. code-block:: cpp
filter2D(src, dst, ddepth , kernel, anchor, delta, BORDER_DEFAULT );
The arguments denote:

View File

@ -17,16 +17,16 @@ Hough Circle Transform
* The Hough Circle Transform works in a *roughly* analogous way to the Hough Line Transform explained in the previous tutorial.
* In the line detection case, a line was defined by two parameters :math:`(r, \theta)`. In the circle case, we need three parameters to define a circle:
.. math::
C : ( x_{center}, y_{center}, r )
C : ( x_{center}, y_{center}, r )
where :math:`(x_{center}, y_{center})` define the center position (gree point) and :math:`r` is the radius, which allows us to completely define a circle, as it can be seen below:
.. image:: images/Hough_Circle_Tutorial_Theory_0.jpg
:alt: Result of detecting circles with Hough Transform
:align: center
:align: center
* For sake of efficiency, OpenCV implements a detection method slightly trickier than the standard Hough Transform: *The Hough gradient method*. For more details, please check the book *Learning OpenCV* or your favorite Computer Vision bibliography
@ -34,19 +34,19 @@ Code
======
#. **What does this program do?**
* Loads an image and blur it to reduce the noise
* Applies the *Hough Circle Transform* to the blurred image .
* Applies the *Hough Circle Transform* to the blurred image .
* Display the detected circle in a window.
.. |TutorialHoughCirclesSimpleDownload| replace:: here
.. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/houghlines.cpp
.. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghlines.cpp
.. |TutorialHoughCirclesFancyDownload| replace:: here
.. _TutorialHoughCirclesFancyDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
.. _TutorialHoughCirclesFancyDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
#. The sample code that we will explain can be downloaded from |TutorialHoughCirclesSimpleDownload|_. A slightly fancier version (which shows both Hough standard and probabilistic with trackbars for changing the threshold values) can be found |TutorialHoughCirclesFancyDownload|_.
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -66,7 +66,7 @@ Code
if( !src.data )
{ return -1; }
/// Convert it to gray
/// Convert it to gray
cvtColor( src, src_gray, CV_BGR2GRAY );
/// Reduce the noise so we avoid false circle detection
@ -88,7 +88,7 @@ Code
circle( src, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
/// Show your results
/// Show your results
namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo", src );
@ -117,7 +117,7 @@ Explanation
cvtColor( src, src_gray, CV_BGR2GRAY );
#. Apply a Gaussian blur to reduce noise and avoid false circle detection:
.. code-block:: cpp
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
@ -138,10 +138,10 @@ Explanation
* *dp = 1*: The inverse ratio of resolution
* *min_dist = src_gray.rows/8*: Minimum distance between detected centers
* *param_1 = 200*: Upper threshold for the internal Canny edge detector
* *param_2* = 100*: Threshold for center detection.
* *min_radius = 0*: Minimum radio to be detected. If unknown, put zero as default.
* *param_2* = 100*: Threshold for center detection.
* *min_radius = 0*: Minimum radio to be detected. If unknown, put zero as default.
* *max_radius = 0*: Maximum radius to be detected. If unknown, put zero as default
#. Draw the detected circles:
.. code-block:: cpp
@ -154,14 +154,14 @@ Explanation
circle( src, center, 3, Scalar(0,255,0), -1, 8, 0 );
// circle outline
circle( src, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
}
You can see that we will draw the circle(s) on red and the center(s) with a small green dot
#. Display the detected circle(s):
.. code-block:: cpp
namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo", src );
@ -175,8 +175,8 @@ Explanation
Result
=======
The result of running the code above with a test image is shown below:
The result of running the code above with a test image is shown below:
.. image:: images/Hough_Circle_Tutorial_Result.jpg
:alt: Result of detecting circles with Hough Transform
:align: center
:align: center

View File

@ -9,7 +9,7 @@ Goal
In this tutorial you will learn how to:
* Use the OpenCV functions :hough_lines:`HoughLines <>` and :hough_lines_p:`HoughLinesP <>` to detect lines in an image.
Theory
=======
@ -18,60 +18,60 @@ Theory
Hough Line Transform
---------------------
#. The Hough Line Transform is a transform used to detect straight lines.
#. The Hough Line Transform is a transform used to detect straight lines.
#. To apply the Transform, first an edge detection pre-processing is desirable.
How does it work?
^^^^^^^^^^^^^^^^^^
#. As you know, a line in the image space can be expressed with two variables. For example:
a. In the **Cartesian coordinate system:** Parameters: :math:`(m,b)`.
b. In the **Polar coordinate system:** Parameters: :math:`(r,\theta)`
.. image:: images/Hough_Lines_Tutorial_Theory_0.jpg
:alt: Line variables
:align: center
:align: center
For Hough Transforms, we will express lines in the *Polar system*. Hence, a line equation can be written as:
For Hough Transforms, we will express lines in the *Polar system*. Hence, a line equation can be written as:
.. math::
y = \left ( -\dfrac{\cos \theta}{\sin \theta} \right ) x + \left ( \dfrac{r}{\sin \theta} \right )
y = \left ( -\dfrac{\cos \theta}{\sin \theta} \right ) x + \left ( \dfrac{r}{\sin \theta} \right )
Arranging the terms: :math:`r = x \cos \theta + y \sin \theta`
#. In general for each point :math:`(x_{0}, y_{0})`, we can define the family of lines that goes through that point as:
.. math::
r_{\theta} = x_{0} \cdot \cos \theta + y_{0} \cdot \sin \theta
Meaning that each pair :math:`(r_{\theta},\theta)` represents each line that passes by :math:`(x_{0}, y_{0})`.
Meaning that each pair :math:`(r_{\theta},\theta)` represents each line that passes by :math:`(x_{0}, y_{0})`.
#. If for a given :math:`(x_{0}, y_{0})` we plot the family of lines that goes through it, we get a sinusoid. For instance, for :math:`x_{0} = 8` and :math:`y_{0} = 6` we get the following plot (in a plane :math:`\theta` - :math:`r`):
.. image:: images/Hough_Lines_Tutorial_Theory_1.jpg
:alt: Polar plot of a the family of lines of a point
:align: center
:align: center
We consider only points such that :math:`r > 0` and :math:`0< \theta < 2 \pi`.
We consider only points such that :math:`r > 0` and :math:`0< \theta < 2 \pi`.
#. We can do the same operation above for all the points in an image. If the curves of two different points intersect in the plane :math:`\theta` - :math:`r`, that means that both points belong to a same line. For instance, following with the example above and drawing the plot for two more points: :math:`x_{1} = 9`, :math:`y_{1} = 4` and :math:`x_{2} = 12`, :math:`y_{2} = 3`, we get:
.. image:: images/Hough_Lines_Tutorial_Theory_2.jpg
:alt: Polar plot of the family of lines for three points
:align: center
:align: center
The three plots intersect in one single point :math:`(0.925, 9.6)`, these coordinates are the parameters (:math:`\theta, r`) or the line in which :math:`(x_{0}, y_{0})`, :math:`(x_{1}, y_{1})` and :math:`(x_{2}, y_{2})` lay.
The three plots intersect in one single point :math:`(0.925, 9.6)`, these coordinates are the parameters (:math:`\theta, r`) or the line in which :math:`(x_{0}, y_{0})`, :math:`(x_{1}, y_{1})` and :math:`(x_{2}, y_{2})` lay.
#. What does all the stuff above mean? It means that in general, a line can be *detected* by finding the number of intersections between curves.The more curves intersecting means that the line represented by that intersection have more points. In general, we can define a *threshold* of the minimum number of intersections needed to *detect* a line.
#. This is what the Hough Line Transform does. It keeps track of the intersection between curves of every point in the image. If the number of intersections is above some *threshold*, then it declares it as a line with the parameters :math:`(\theta, r_{\theta})` of the intersection point.
Standard and Probabilistic Hough Line Transform
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
OpenCV implements two kind of Hough Line Transforms:
OpenCV implements two kind of Hough Line Transforms:
a. **The Standard Hough Transform**
@ -88,21 +88,21 @@ b. **The Probabilistic Hough Line Transform**
Code
======
.. |TutorialHoughLinesSimpleDownload| replace:: here
.. _TutorialHoughLinesSimpleDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/houghlines.cpp
.. |TutorialHoughLinesFancyDownload| replace:: here
.. _TutorialHoughLinesFancyDownload: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp
.. |TutorialHoughLinesSimpleDownload| replace:: here
.. _TutorialHoughLinesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghlines.cpp
.. |TutorialHoughLinesFancyDownload| replace:: here
.. _TutorialHoughLinesFancyDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp
#. **What does this program do?**
* Loads an image
* Applies either a *Standard Hough Line Transform* or a *Probabilistic Line Transform*.
* Applies either a *Standard Hough Line Transform* or a *Probabilistic Line Transform*.
* Display the original image and the detected line in two windows.
#. The sample code that we will explain can be downloaded from |TutorialHoughLinesSimpleDownload|_. A slightly fancier version (which shows both Hough standard and probabilistic with trackbars for changing the threshold values) can be found |TutorialHoughLinesFancyDownload|_.
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -207,9 +207,9 @@ Explanation
* *rho* : The resolution of the parameter :math:`r` in pixels. We use **1** pixel.
* *theta*: The resolution of the parameter :math:`\theta` in radians. We use **1 degree** (CV_PI/180)
* *threshold*: The minimum number of intersections to "*detect*" a line
* *srn* and *stn*: Default parameters to zero. Check OpenCV reference for more info.
* *srn* and *stn*: Default parameters to zero. Check OpenCV reference for more info.
b. And then you display the result by drawing the lines.
b. And then you display the result by drawing the lines.
.. code-block:: cpp
@ -236,14 +236,14 @@ Explanation
HoughLinesP(dst, lines, 1, CV_PI/180, 50, 50, 10 );
with the arguments:
* *dst*: Output of the edge detector. It should be a grayscale image (although in fact it is a binary one)
* *lines*: A vector that will store the parameters :math:`(x_{start}, y_{start}, x_{end}, y_{end})` of the detected lines
* *rho* : The resolution of the parameter :math:`r` in pixels. We use **1** pixel.
* *theta*: The resolution of the parameter :math:`\theta` in radians. We use **1 degree** (CV_PI/180)
* *threshold*: The minimum number of intersections to "*detect*" a line
* *minLinLength*: The minimum number of points that can form a line. Lines with less than this number of points are disregarded.
* *maxLineGap*: The maximum gap between two points to be considered in the same line.
* *minLinLength*: The minimum number of points that can form a line. Lines with less than this number of points are disregarded.
* *maxLineGap*: The maximum gap between two points to be considered in the same line.
b. And then you display the result by drawing the lines.
@ -256,7 +256,7 @@ Explanation
}
#. Display the original image and the detected lines:
#. Display the original image and the detected lines:
.. code-block:: cpp
@ -274,20 +274,20 @@ Result
=======
.. note::
The results below are obtained using the slightly fancier version we mentioned in the *Code* section. It still implements the same stuff as above, only adding the Trackbar for the Threshold.
Using an input image such as:
.. image:: images/Hough_Lines_Tutorial_Original_Image.jpg
:alt: Result of detecting lines with Hough Transform
:align: center
:align: center
We get the following result by using the Probabilistic Hough Line Transform:
.. image:: images/Hough_Lines_Tutorial_Result.jpg
:alt: Result of detecting lines with Hough Transform
:align: center
:align: center
You may observe that the number of lines detected vary while you change the *threshold*. The explanation is sort of evident: If you establish a higher threshold, fewer lines will be detected (since you will need more points to declare a line detected).
You may observe that the number of lines detected vary while you change the *threshold*. The explanation is sort of evident: If you establish a higher threshold, fewer lines will be detected (since you will need more points to declare a line detected).

View File

@ -36,7 +36,7 @@ Laplacian Operator
-------------------
#. From the explanation above, we deduce that the second derivative can be used to *detect edges*. Since images are "*2D*", we would need to take the derivative in both dimensions. Here, the Laplacian operator comes handy.
#. The *Laplacian operator* is defined by:
.. math::
@ -49,13 +49,13 @@ Code
======
#. **What does this program do?**
* Loads an image
* Remove noise by applying a Gaussian blur and then convert the original image to grayscale
* Remove noise by applying a Gaussian blur and then convert the original image to grayscale
* Applies a Laplacian operator to the grayscale image and stores the output image
* Display the result in a window
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp>`_
.. code-block:: cpp
@ -70,7 +70,7 @@ Code
int main( int argc, char** argv )
{
Mat src, src_gray, dst;
int kernel_size = 3;
int kernel_size = 3;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
@ -116,7 +116,7 @@ Explanation
.. code-block:: cpp
Mat src, src_gray, dst;
int kernel_size = 3;
int kernel_size = 3;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
@ -136,7 +136,7 @@ Explanation
.. code-block:: cpp
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
#. Convert the image to grayscale using :cvt_color:`cvtColor <>`
.. code-block:: cpp

View File

@ -16,14 +16,14 @@ Theory
What is remapping?
------------------
* It is the process of taking pixels from one place in the image and locating them in another position in a new image.
* It is the process of taking pixels from one place in the image and locating them in another position in a new image.
* To accomplish the mapping process, it might be necessary to do some interpolation for non-integer pixel locations, since there will not always be a one-to-one-pixel correspondence between source and destination images.
* We can express the remap for every pixel location :math:`(x,y)` as:
.. math::
g(x,y) = f ( h(x,y) )
where :math:`g()` is the remapped image, :math:`f()` the source image and :math:`h(x,y)` is the mapping function that operates on :math:`(x,y)`.
@ -34,7 +34,7 @@ What is remapping?
h(x,y) = (I.cols - x, y )
What would happen? It is easily seen that the image would flip in the :math:`x` direction. For instance, consider the input image:
What would happen? It is easily seen that the image would flip in the :math:`x` direction. For instance, consider the input image:
.. image:: images/Remap_Tutorial_Theory_0.jpg
:alt: Original test image
@ -54,12 +54,12 @@ Code
====
#. **What does this program do?**
* Loads an image
* Each second, apply 1 of 4 different remapping processes to the image and display them indefinitely in a window.
* Wait for the user to exit the program
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp>`_
.. code-block:: cpp
@ -91,7 +91,7 @@ Code
dst.create( src.size(), src.type() );
map_x.create( src.size(), CV_32FC1 );
map_y.create( src.size(), CV_32FC1 );
/// Create window
namedWindow( remap_window, CV_WINDOW_AUTOSIZE );
@ -106,7 +106,7 @@ Code
/// Update map_x & map_y. Then apply remap
update_map();
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );
/// Display results
imshow( remap_window, dst );
@ -126,7 +126,7 @@ Code
{ for( int i = 0; i < src.cols; i++ )
{
switch( ind )
{
{
case 0:
if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )
{
@ -169,7 +169,7 @@ Explanation
int ind = 0;
#. Load an image:
.. code-block:: cpp
src = imread( argv[1], 1 );
@ -181,7 +181,7 @@ Explanation
dst.create( src.size(), src.type() );
map_x.create( src.size(), CV_32FC1 );
map_y.create( src.size(), CV_32FC1 );
#. Create a window to display results
.. code-block:: cpp
@ -189,7 +189,7 @@ Explanation
namedWindow( remap_window, CV_WINDOW_AUTOSIZE );
#. Establish a loop. Each 1000 ms we update our mapping matrices (*mat_x* and *mat_y*) and apply them to our source image:
.. code-block:: cpp
while( true )
@ -202,19 +202,19 @@ Explanation
/// Update map_x & map_y. Then apply remap
update_map();
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );
/// Display results
imshow( remap_window, dst );
}
The function that applies the remapping is :remap:`remap <>`. We give the following arguments:
* **src**: Source image
* **dst**: Destination image of same size as *src*
* **map_x**: The mapping function in the x direction. It is equivalent to the first component of :math:`h(i,j)`
* **map_y**: Same as above, but in y direction. Note that *map_y* and *map_x* are both of the same size as *src*
* **CV_INTER_LINEAR**: The type of interpolation to use for non-integer pixels. This is by default.
* **CV_INTER_LINEAR**: The type of interpolation to use for non-integer pixels. This is by default.
* **BORDER_CONSTANT**: Default
How do we update our mapping matrices *mat_x* and *mat_y*? Go on reading:
@ -225,25 +225,25 @@ Explanation
.. math::
h(i,j) = ( 2*i - src.cols/2 + 0.5, 2*j - src.rows/2 + 0.5)
h(i,j) = ( 2*i - src.cols/2 + 0.5, 2*j - src.rows/2 + 0.5)
for all pairs :math:`(i,j)` such that: :math:`\dfrac{src.cols}{4}<i<\dfrac{3 \cdot src.cols}{4}` and :math:`\dfrac{src.rows}{4}<j<\dfrac{3 \cdot src.rows}{4}`
for all pairs :math:`(i,j)` such that: :math:`\dfrac{src.cols}{4}<i<\dfrac{3 \cdot src.cols}{4}` and :math:`\dfrac{src.rows}{4}<j<\dfrac{3 \cdot src.rows}{4}`
b. Turn the image upside down: :math:`h( i, j ) = (i, src.rows - j)`
c. Reflect the image from left to right: :math:`h(i,j) = ( src.cols - i, j )`
d. Combination of b and c: :math:`h(i,j) = ( src.cols - i, src.rows - j )`
This is expressed in the following snippet. Here, *map_x* represents the first coordinate of *h(i,j)* and *map_y* the second coordinate.
.. code-block:: cpp
for( int j = 0; j < src.rows; j++ )
{ for( int i = 0; i < src.cols; i++ )
{
switch( ind )
{
{
case 0:
if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )
{
@ -292,7 +292,7 @@ Result
:align: center
#. Turning it upside down:
.. image:: images/Remap_Tutorial_Result_1.jpg
:alt: Result 0 for remapping
:width: 250pt

View File

@ -12,8 +12,8 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :sobel:`Sobel <>` to calculate the derivatives from an image.
* Use the OpenCV function :scharr:`Scharr <>` to calculate a more accurate derivative for a kernel of size :math:`3 \cdot 3`
* Use the OpenCV function :scharr:`Scharr <>` to calculate a more accurate derivative for a kernel of size :math:`3 \cdot 3`
Theory
========
@ -29,8 +29,8 @@ Theory
.. image:: images/Sobel_Derivatives_Tutorial_Theory_0.jpg
:alt: How intensity changes in an edge
:align: center
You can easily notice that in an *edge*, the pixel intensity *changes* in a notorious way. A good way to express *changes* is by using *derivatives*. A high change in gradient indicates a major change in the image.
You can easily notice that in an *edge*, the pixel intensity *changes* in a notorious way. A good way to express *changes* is by using *derivatives*. A high change in gradient indicates a major change in the image.
#. To be more graphical, let's assume we have a 1D-image. An edge is shown by the "jump" in intensity in the plot below:
@ -51,9 +51,9 @@ Theory
Sobel Operator
---------------
#. The Sobel Operator is a discrete differentiation operator. It computes an approximation of the gradient of an image intensity function.
#. The Sobel Operator is a discrete differentiation operator. It computes an approximation of the gradient of an image intensity function.
#. The Sobel Operator combines Gaussian smoothing and differentiation.
#. The Sobel Operator combines Gaussian smoothing and differentiation.
Formulation
^^^^^^^^^^^^
@ -64,21 +64,21 @@ Assuming that the image to be operated is :math:`I`:
a. **Horizontal changes**: This is computed by convolving :math:`I` with a kernel :math:`G_{x}` with odd size. For example for a kernel size of 3, :math:`G_{x}` would be computed as:
.. math::
G_{x} = \begin{bmatrix}
-1 & 0 & +1 \\
-2 & 0 & +2 \\
-1 & 0 & +1
-1 & 0 & +1
\end{bmatrix} * I
b. **Vertical changes**: This is computed by convolving :math:`I` with a kernel :math:`G_{y}` with odd size. For example for a kernel size of 3, :math:`G_{y}` would be computed as:
.. math::
G_{y} = \begin{bmatrix}
-1 & -2 & -1 \\
0 & 0 & 0 \\
+1 & +2 & +1
+1 & +2 & +1
\end{bmatrix} * I
#. At each point of the image we calculate an approximation of the *gradient* in that point by combining both results above:
@ -90,7 +90,7 @@ Assuming that the image to be operated is :math:`I`:
Although sometimes the following simpler equation is used:
.. math::
G = |G_{x}| + |G_{y}|
@ -103,14 +103,14 @@ Assuming that the image to be operated is :math:`I`:
G_{x} = \begin{bmatrix}
-3 & 0 & +3 \\
-10 & 0 & +10 \\
-3 & 0 & +3
\end{bmatrix}
-3 & 0 & +3
\end{bmatrix}
G_{y} = \begin{bmatrix}
-3 & -10 & -3 \\
0 & 0 & 0 \\
+3 & +10 & +3
\end{bmatrix}
+3 & +10 & +3
\end{bmatrix}
You can check out more information of this function in the OpenCV reference (:scharr:`Scharr <>`). Also, in the sample code below, you will notice that above the code for :sobel:`Sobel <>` function there is also code for the :scharr:`Scharr <>` function commented. Uncommenting it (and obviously commenting the Sobel stuff) should give you an idea of how this function works.
@ -118,12 +118,12 @@ Code
=====
#. **What does this program do?**
* Applies the *Sobel Operator* and generates as output an image with the detected *edges* bright on a darker background.
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp>`_
.. code-block:: cpp
* Applies the *Sobel Operator* and generates as output an image with the detected *edges* bright on a darker background.
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp>`_
.. code-block:: cpp
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -137,7 +137,7 @@ Code
{
Mat src, src_gray;
Mat grad;
Mat grad;
char* window_name = "Sobel Demo - Simple Edge Detector";
int scale = 1;
int delta = 0;
@ -162,15 +162,15 @@ Code
/// Generate grad_x and grad_y
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
/// Gradient X
//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
convertScaleAbs( grad_x, abs_grad_x );
/// Gradient Y
/// Gradient Y
//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
convertScaleAbs( grad_y, abs_grad_y );
/// Total Gradient (approximate)
@ -192,7 +192,7 @@ Explanation
.. code-block:: cpp
Mat src, src_gray;
Mat grad;
Mat grad;
char* window_name = "Sobel Demo - Simple Edge Detector";
int scale = 1;
int delta = 0;
@ -203,12 +203,12 @@ Explanation
.. code-block:: cpp
src = imread( argv[1] );
if( !src.data )
{ return -1; }
#. First, we apply a :gaussian_blur:`GaussianBlur <>` to our image to reduce the noise ( kernel size = 3 )
.. code-block:: cpp
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
@ -220,27 +220,27 @@ Explanation
cvtColor( src, src_gray, CV_RGB2GRAY );
#. Second, we calculate the "*derivatives*" in *x* and *y* directions. For this, we use the function :sobel:`Sobel <>` as shown below:
.. code-block:: cpp
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
/// Gradient X
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
/// Gradient Y
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
/// Gradient Y
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
The function takes the following arguments:
* *src_gray*: In our example, the input image. Here it is *CV_8U*
* *grad_x*/*grad_y*: The output image.
* *src_gray*: In our example, the input image. Here it is *CV_8U*
* *grad_x*/*grad_y*: The output image.
* *ddepth*: The depth of the output image. We set it to *CV_16S* to avoid overflow.
* *x_order*: The order of the derivative in **x** direction.
* *y_order*: The order of the derivative in **y** direction.
* *x_order*: The order of the derivative in **x** direction.
* *y_order*: The order of the derivative in **y** direction.
* *scale*, *delta* and *BORDER_DEFAULT*: We use default values.
Notice that to calculate the gradient in *x* direction we use: :math:`x_{order}= 1` and :math:`y_{order} = 0`. We do analogously for the *y* direction.
Notice that to calculate the gradient in *x* direction we use: :math:`x_{order}= 1` and :math:`y_{order} = 0`. We do analogously for the *y* direction.
#. We convert our partial results back to *CV_8U*:
@ -248,7 +248,7 @@ Explanation
convertScaleAbs( grad_x, abs_grad_x );
convertScaleAbs( grad_y, abs_grad_y );
#. Finally, we try to approximate the *gradient* by adding both directional gradients (note that this is not an exact calculation at all! but it is good for our purposes).
@ -268,7 +268,7 @@ Results
========
#. Here is the output of applying our basic detector to *lena.jpg*:
.. image:: images/Sobel_Derivatives_Tutorial_Result.jpg
:alt: Result of applying Sobel operator to lena.jpg

View File

@ -19,49 +19,49 @@ Theory
What is an Affine Transformation?
----------------------------------
#. It is any transformation that can be expressed in the form of a *matrix multiplication* (linear transformation) followed by a *vector addition* (translation).
#. It is any transformation that can be expressed in the form of a *matrix multiplication* (linear transformation) followed by a *vector addition* (translation).
#. From the above, We can use an Affine Transformation to express:
#. From the above, We can use an Affine Transformation to express:
a. Rotations (linear transformation)
b. Translations (vector addition)
c. Scale operations (linear transformation)
you can see that, in essence, an Affine Transformation represents a **relation** between two images.
#. The usual way to represent an Affine Transform is by using a :math:`2 \times 3` matrix.
you can see that, in essence, an Affine Transformation represents a **relation** between two images.
.. math::
#. The usual way to represent an Affine Transform is by using a :math:`2 \times 3` matrix.
.. math::
A = \begin{bmatrix}
a_{00} & a_{01} \\
a_{00} & a_{01} \\
a_{10} & a_{11}
\end{bmatrix}_{2 \times 2}
B = \begin{bmatrix}
b_{00} \\
b_{00} \\
b_{10}
\end{bmatrix}_{2 \times 1}
M = \begin{bmatrix}
A & B
A & B
\end{bmatrix}
=
=
\begin{bmatrix}
a_{00} & a_{01} & b_{00} \\
a_{10} & a_{11} & b_{10}
a_{00} & a_{01} & b_{00} \\
a_{10} & a_{11} & b_{10}
\end{bmatrix}_{2 \times 3}
Considering that we want to transform a 2D vector :math:`X = \begin{bmatrix}x \\ y\end{bmatrix}` by using :math:`A` and :math:`B`, we can do it equivalently with:
:math:`T = A \cdot \begin{bmatrix}x \\ y\end{bmatrix} + B` or :math:`T = M \cdot [x, y, 1]^{T}`
.. math::
T = \begin{bmatrix}
a_{00}x + a_{01}y + b_{00} \\
a_{00}x + a_{01}y + b_{00} \\
a_{10}x + a_{11}y + b_{10}
\end{bmatrix}
\end{bmatrix}
How do we get an Affine Transformation?
@ -80,20 +80,20 @@ How do we get an Affine Transformation?
:width: 350pt
:align: center
the points 1, 2 and 3 (forming a triangle in image 1) are mapped into image 2, still forming a triangle, but now they have changed notoriously. If we find the Affine Transformation with these 3 points (you can choose them as you like), then we can apply this found relation to the whole pixels in the image.
the points 1, 2 and 3 (forming a triangle in image 1) are mapped into image 2, still forming a triangle, but now they have changed notoriously. If we find the Affine Transformation with these 3 points (you can choose them as you like), then we can apply this found relation to the whole pixels in the image.
Code
====
#. **What does this program do?**
* Loads an image
* Applies an Affine Transform to the image. This Transform is obtained from the relation between three points. We use the function :warp_affine:`warpAffine <>` for that purpose.
* Applies a Rotation to the image after being transformed. This rotation is with respect to the image center
* Waits until the user exits the program
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp>`_
.. code-block:: cpp
@ -123,14 +123,14 @@ Code
/// Load the image
src = imread( argv[1], 1 );
/// Set the dst image the same type and size as src
/// Set the dst image the same type and size as src
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
/// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( src.cols - 1, 0 );
srcTri[2] = Point2f( 0, src.rows - 1 );
dstTri[0] = Point2f( src.cols*0.0, src.rows*0.33 );
dstTri[1] = Point2f( src.cols*0.85, src.rows*0.25 );
dstTri[2] = Point2f( src.cols*0.15, src.rows*0.7 );
@ -153,7 +153,7 @@ Code
/// Rotate the warped image
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
@ -193,7 +193,7 @@ Explanation
#. Initialize the destination image as having the same size and type as the source:
.. code-block:: cpp
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
#. **Affine Transform:** As we explained lines above, we need two sets of 3 points to derive the affine transform relation. Take a look:
@ -203,11 +203,11 @@ Explanation
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( src.cols - 1, 0 );
srcTri[2] = Point2f( 0, src.rows - 1 );
dstTri[0] = Point2f( src.cols*0.0, src.rows*0.33 );
dstTri[1] = Point2f( src.cols*0.85, src.rows*0.25 );
dstTri[2] = Point2f( src.cols*0.15, src.rows*0.7 );
You may want to draw the points to make a better idea of how they change. Their locations are approximately the same as the ones depicted in the example figure (in the Theory section). You may note that the size and orientation of the triangle defined by the 3 points change.
#. Armed with both sets of points, we calculate the Affine Transform by using OpenCV function :get_affine_transform:`getAffineTransform <>`:
@ -264,7 +264,7 @@ Explanation
#. Finally, we display our results in two windows plus the original image for good measure:
.. code-block:: cpp
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
@ -292,7 +292,7 @@ Result
:alt: Original image
:width: 250pt
:align: center
after applying the first Affine Transform we obtain:
.. image:: images/Warp_Affine_Tutorial_Result_Warp.jpg

View File

@ -11,8 +11,8 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :morphology_ex:`morphologyEx <>` to apply Morphological Transformation such as:
+ Opening
+ Opening
+ Closing
+ Morphological Gradient
+ Top Hat
@ -24,12 +24,12 @@ Theory
.. note::
The explanation below belongs to the book **Learning OpenCV** by Bradski and Kaehler.
In the previous tutorial we covered two basic Morphology operations:
In the previous tutorial we covered two basic Morphology operations:
.. container:: enumeratevisibleitemswithsquare
* Erosion
* Dilation.
* Dilation.
Based on these two we can effectuate more sophisticated transformations to our images. Here we discuss briefly 05 operations offered by OpenCV:
@ -39,7 +39,7 @@ Opening
* It is obtained by the erosion of an image followed by a dilation.
.. math::
dst = open( src, element) = dilate( erode( src, element ) )
* Useful for removing small objects (it is assumed that the objects are bright on a dark foreground)
@ -48,7 +48,7 @@ Opening
.. image:: images/Morphology_2_Tutorial_Theory_Opening.png
:alt: Opening
:align: center
:align: center
Closing
---------
@ -56,14 +56,14 @@ Closing
* It is obtained by the dilation of an image followed by an erosion.
.. math::
dst = close( src, element ) = erode( dilate( src, element ) )
* Useful to remove small holes (dark regions).
* Useful to remove small holes (dark regions).
.. image:: images/Morphology_2_Tutorial_Theory_Closing.png
:alt: Closing example
:align: center
:align: center
Morphological Gradient
@ -79,7 +79,7 @@ Morphological Gradient
.. image:: images/Morphology_2_Tutorial_Theory_Gradient.png
:alt: Gradient
:align: center
:align: center
Top Hat
@ -88,12 +88,12 @@ Top Hat
* It is the difference between an input image and its opening.
.. math::
dst = tophat( src, element ) = src - open( src, element )
.. image:: images/Morphology_2_Tutorial_Theory_TopHat.png
:alt: Top Hat
:align: center
:align: center
Black Hat
----------
@ -101,19 +101,19 @@ Black Hat
* It is the difference between the closing and its input image
.. math::
dst = blackhat( src, element ) = close( src, element ) - src
.. image:: images/Morphology_2_Tutorial_Theory_BlackHat.png
:alt: Black Hat
:align: center
:align: center
Code
======
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -145,7 +145,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
if( !src.data )
{ return -1; }
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
@ -153,12 +153,12 @@ This tutorial code's is shown lines below. You can also download it from `here <
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
/// Create Trackbar to select kernel type
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
Morphology_Operations );
/// Create Trackbar to choose kernel size
createTrackbar( "Kernel size:\n 2n +1", window_name,
createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size,
Morphology_Operations );
@ -169,7 +169,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
return 0;
}
/**
/**
* @function Morphology_Operations
*/
void Morphology_Operations( int, void* )
@ -177,11 +177,11 @@ This tutorial code's is shown lines below. You can also download it from `here <
// Since MORPH_X : 2,3,4,5 and 6
int operation = morph_operator + 2;
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
/// Apply the specified morphology operation
morphologyEx( src, dst, operation, element );
imshow( window_name, dst );
imshow( window_name, dst );
}
@ -200,34 +200,34 @@ Explanation
.. code-block:: cpp
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat",
window_name, &morph_operator, max_operator,
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat",
window_name, &morph_operator, max_operator,
Morphology_Operations );
* The second trackbar **"Element"** returns **morph_elem**, which indicates what kind of structure our kernel is:
* The second trackbar **"Element"** returns **morph_elem**, which indicates what kind of structure our kernel is:
.. code-block:: cpp
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
Morphology_Operations );
* The final trackbar **"Kernel Size"** returns the size of the kernel to be used (**morph_size**)
.. code-block:: cpp
createTrackbar( "Kernel size:\n 2n +1", window_name,
createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size,
Morphology_Operations );
* Every time we move any slider, the user's function **Morphology_Operations** will be called to effectuate a new morphology operation and it will update the output image based on the current trackbar values.
.. code-block:: cpp
/**
/**
* @function Morphology_Operations
*/
void Morphology_Operations( int, void* )
@ -235,11 +235,11 @@ Explanation
// Since MORPH_X : 2,3,4,5 and 6
int operation = morph_operator + 2;
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
/// Apply the specified morphology operation
morphologyEx( src, dst, operation, element );
imshow( window_name, dst );
imshow( window_name, dst );
}
@ -259,11 +259,11 @@ Explanation
.. code-block:: cpp
int operation = morph_operator + 2;
int operation = morph_operator + 2;
* **element**: The kernel to be used. We use the function :get_structuring_element:`getStructuringElement <>` to define our own structure.
Results
========
@ -272,11 +272,11 @@ Results
.. image:: images/Morphology_2_Tutorial_Original_Image.jpg
:alt: Morphology 2: Original image
:align: center
:align: center
* And here are two snapshots of the display window. The first picture shows the output after using the operator **Opening** with a cross kernel. The second picture (right side, shows the result of using a **Blackhat** operator with an ellipse kernel.
.. image:: images/Morphology_2_Tutorial_Cover.jpg
:alt: Morphology 2: Result sample
:align: center
:align: center

View File

@ -11,7 +11,7 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV functions :pyr_up:`pyrUp <>` and :pyr_down:`pyrDown <>` to downsample or upsample a given image.
Theory
=======
@ -21,9 +21,9 @@ Theory
.. container:: enumeratevisibleitemswithsquare
* Usually we need to convert an image to a size different than its original. For this, there are two possible options:
#. *Upsize* the image (zoom in) or
#. *Downsize* it (zoom out).
#. *Upsize* the image (zoom in) or
#. *Downsize* it (zoom out).
* Although there is a *geometric transformation* function in OpenCV that -literally- resize an image (:resize:`resize <>`, which we will show in a future tutorial), in this section we analyze first the use of **Image Pyramids**, which are widely applied in a huge range of vision applications.
@ -39,20 +39,20 @@ Image Pyramid
* **Gaussian pyramid:** Used to downsample images
* **Laplacian pyramid:** Used to reconstruct an upsampled image from an image lower in the pyramid (with less resolution)
* **Laplacian pyramid:** Used to reconstruct an upsampled image from an image lower in the pyramid (with less resolution)
* In this tutorial we'll use the *Gaussian pyramid*.
Gaussian Pyramid
^^^^^^^^^^^^^^^^^
* Imagine the pyramid as a set of layers in which the higher the layer, the smaller the size.
* Imagine the pyramid as a set of layers in which the higher the layer, the smaller the size.
.. image:: images/Pyramids_Tutorial_Pyramid_Theory.png
:alt: Pyramid figure
:align: center
:align: center
* Every layer is numbered from bottom to top, so layer :math:`(i+1)` (denoted as :math:`G_{i+1}` is smaller than layer :math:`i` (:math:`G_{i}`).
* Every layer is numbered from bottom to top, so layer :math:`(i+1)` (denoted as :math:`G_{i+1}` is smaller than layer :math:`i` (:math:`G_{i}`).
* To produce layer :math:`(i+1)` in the Gaussian pyramid, we do the following:
@ -60,9 +60,9 @@ Gaussian Pyramid
.. math::
\frac{1}{16} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}
\frac{1}{16} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}
* Remove every even-numbered row and column.
* Remove every even-numbered row and column.
* You can easily notice that the resulting image will be exactly one-quarter the area of its predecessor. Iterating this process on the input image :math:`G_{0}` (original image) produces the entire pyramid.
@ -72,7 +72,7 @@ Gaussian Pyramid
* Perform a convolution with the same kernel shown above (multiplied by 4) to approximate the values of the "missing pixels"
* These two procedures (downsampling and upsampling as explained above) are implemented by the OpenCV functions :pyr_up:`pyrUp <>` and :pyr_down:`pyrDown <>`, as we will see in an example with the code below:
* These two procedures (downsampling and upsampling as explained above) are implemented by the OpenCV functions :pyr_up:`pyrUp <>` and :pyr_down:`pyrDown <>`, as we will see in an example with the code below:
.. note::
When we reduce the size of an image, we are actually *losing* information of the image.
@ -80,9 +80,9 @@ Gaussian Pyramid
Code
======
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -115,7 +115,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
{ printf(" No data! -- Exiting the program \n");
return -1; }
tmp = src;
tmp = src;
dst = tmp;
/// Create window
@ -124,7 +124,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Loop
while( true )
{
{
int c;
c = waitKey(10);
@ -132,7 +132,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
{ break; }
if( (char)c == 'u' )
{ pyrUp( tmp, dst, Size( tmp.cols*2, tmp.rows*2 ) );
printf( "** Zoom In: Image x 2 \n" );
printf( "** Zoom In: Image x 2 \n" );
}
else if( (char)c == 'd' )
{ pyrDown( tmp, dst, Size( tmp.cols/2, tmp.rows/2 ) );
@ -141,7 +141,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
imshow( window_name, dst );
tmp = dst;
}
}
return 0;
}
@ -160,13 +160,13 @@ Explanation
{ printf(" No data! -- Exiting the program \n");
return -1; }
* Create a Mat object to store the result of the operations (*dst*) and one to save temporal results (*tmp*).
* Create a Mat object to store the result of the operations (*dst*) and one to save temporal results (*tmp*).
.. code-block:: cpp
Mat src, dst, tmp;
/* ... */
tmp = src;
tmp = src;
dst = tmp;
@ -183,7 +183,7 @@ Explanation
.. code-block:: cpp
while( true )
{
{
int c;
c = waitKey(10);
@ -191,7 +191,7 @@ Explanation
{ break; }
if( (char)c == 'u' )
{ pyrUp( tmp, dst, Size( tmp.cols*2, tmp.rows*2 ) );
printf( "** Zoom In: Image x 2 \n" );
printf( "** Zoom In: Image x 2 \n" );
}
else if( (char)c == 'd' )
{ pyrDown( tmp, dst, Size( tmp.cols/2, tmp.rows/2 ) );
@ -200,12 +200,12 @@ Explanation
imshow( window_name, dst );
tmp = dst;
}
}
Our program exits if the user presses *ESC*. Besides, it has two options:
* **Perform upsampling (after pressing 'u')**
* **Perform upsampling (after pressing 'u')**
.. code-block:: cpp
@ -217,7 +217,7 @@ Explanation
* *dst*: The destination image (to be shown on screen, supposedly the double of the input image)
* *Size( tmp.cols*2, tmp.rows*2 )* : The destination size. Since we are upsampling, :pyr_up:`pyrUp <>` expects a size double than the input image (in this case *tmp*).
* **Perform downsampling (after pressing 'd')**
* **Perform downsampling (after pressing 'd')**
.. code-block:: cpp
@ -232,7 +232,7 @@ Explanation
* Notice that it is important that the input image can be divided by a factor of two (in both dimensions). Otherwise, an error will be shown.
* Finally, we update the input image **tmp** with the current image displayed, so the subsequent operations are performed on it.
.. code-block:: cpp
tmp = dst;
@ -245,19 +245,19 @@ Results
* After compiling the code above we can test it. The program calls an image **chicky_512.jpg** that comes in the *tutorial_code/image* folder. Notice that this image is :math:`512 \times 512`, hence a downsample won't generate any error (:math:`512 = 2^{9}`). The original image is shown below:
.. image:: images/Pyramids_Tutorial_Original_Image.jpg
:alt: Pyramids: Original image
:align: center
:alt: Pyramids: Original image
:align: center
* First we apply two successive :pyr_down:`pyrDown <>` operations by pressing 'd'. Our output is:
.. image:: images/Pyramids_Tutorial_PyrDown_Result.jpg
:alt: Pyramids: PyrDown Result
:align: center
:align: center
* Note that we should have lost some resolution due to the fact that we are diminishing the size of the image. This is evident after we apply :pyr_up:`pyrUp <>` twice (by pressing 'u'). Our output is now:
.. image:: images/Pyramids_Tutorial_PyrUp_Result.jpg
:alt: Pyramids: PyrUp Result
:align: center
:align: center

View File

@ -11,9 +11,9 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :bounding_rect:`boundingRect <>`
* Use the OpenCV function :bounding_rect:`boundingRect <>`
* Use the OpenCV function :min_enclosing_circle:`minEnclosingCircle <>`
Theory
======
@ -21,9 +21,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -73,7 +73,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Approximate contours to polygons + get bounding rects and circles
@ -83,18 +83,18 @@ This tutorial code's is shown lines below. You can also download it from `here <
vector<float>radius( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
minEnclosingCircle( contours_poly[i], center[i], radius[i] );
}
}
/// Draw polygonal contour + bonding rects + circles
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );
circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 );
}
@ -112,13 +112,13 @@ Result
#. Here it is:
========== ==========
|BRC_0| |BRC_1|
========== ==========
========== ==========
|BRC_0| |BRC_1|
========== ==========
.. |BRC_0| image:: images/Bounding_Rects_Circles_Source_Image.jpg
:align: middle
.. |BRC_1| image:: images/Bounding_Rects_Circles_Result.jpg
:align: middle
:align: middle

View File

@ -11,9 +11,9 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :min_area_rect:`minAreaRect <>`
* Use the OpenCV function :min_area_rect:`minAreaRect <>`
* Use the OpenCV function :fit_ellipse:`fitEllipse <>`
Theory
======
@ -21,9 +21,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -73,7 +73,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles and ellipses for each contour
@ -81,29 +81,29 @@ This tutorial code's is shown lines below. You can also download it from `here <
vector<RotatedRect> minEllipse( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ minRect[i] = minAreaRect( Mat(contours[i]) );
{ minRect[i] = minAreaRect( Mat(contours[i]) );
if( contours[i].size() > 5 )
{ minEllipse[i] = fitEllipse( Mat(contours[i]) ); }
}
}
/// Draw contours + rotated rects + ellipses
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
// contour
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
// ellipse
ellipse( drawing, minEllipse[i], color, 2, 8 );
// rotated rectangle
// rotated rectangle
Point2f rect_points[4]; minRect[i].points( rect_points );
for( int j = 0; j < 4; j++ )
line( drawing, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );
line( drawing, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
imshow( "Contours", drawing );
}
Explanation
@ -114,13 +114,13 @@ Result
#. Here it is:
========== ==========
|BRE_0| |BRE_1|
========== ==========
========== ==========
|BRE_0| |BRE_1|
========== ==========
.. |BRE_0| image:: images/Bounding_Rotated_Ellipses_Source_Image.jpg
:align: middle
.. |BRE_1| image:: images/Bounding_Rotated_Ellipses_Result.jpg
:align: middle
:align: middle

View File

@ -10,8 +10,8 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :find_contours:`findContours <>`
* Use the OpenCV function :draw_contours:`drawContours <>`
* Use the OpenCV function :find_contours:`findContours <>`
* Use the OpenCV function :draw_contours:`drawContours <>`
Theory
======
@ -19,9 +19,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -71,20 +71,20 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
imshow( "Contours", drawing );
}
Explanation
@ -95,13 +95,13 @@ Result
#. Here it is:
============= =============
|contour_0| |contour_1|
============= =============
============= =============
|contour_0| |contour_1|
============= =============
.. |contour_0| image:: images/Find_Contours_Original_Image.jpg
:align: middle
.. |contour_1| image:: images/Find_Contours_Result.jpg
:align: middle
:align: middle

View File

@ -10,7 +10,7 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :convex_hull:`convexHull <>`
* Use the OpenCV function :convex_hull:`convexHull <>`
Theory
@ -19,11 +19,11 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
@ -33,7 +33,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
@ -73,21 +73,21 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ convexHull( Mat(contours[i]), hull[i], false ); }
{ convexHull( Mat(contours[i]), hull[i], false ); }
/// Draw contours + hull results
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
}
/// Show in a window
@ -104,13 +104,13 @@ Result
#. Here it is:
========== ==========
|Hull_0| |Hull_1|
========== ==========
========== ==========
|Hull_0| |Hull_1|
========== ==========
.. |Hull_0| image:: images/Hull_Original_Image.jpg
:align: middle
.. |Hull_1| image:: images/Hull_Result.jpg
:align: middle
:align: middle

View File

@ -11,9 +11,9 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :moments:`moments <>`
* Use the OpenCV function :moments:`moments <>`
* Use the OpenCV function :contour_area:`contourArea <>`
* Use the OpenCV function :arc_length:`arcLength <>`
* Use the OpenCV function :arc_length:`arcLength <>`
Theory
======
@ -21,9 +21,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -73,7 +73,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
@ -81,7 +81,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
for( int i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
/// Get the mass centers:
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
@ -89,9 +89,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );
}
@ -103,9 +103,9 @@ This tutorial code's is shown lines below. You can also download it from `here <
printf("\t Info: Area and Contour Length \n");
for( int i = 0; i< contours.size(); i++ )
{
printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );
}
}
@ -118,9 +118,9 @@ Result
#. Here it is:
========== ========== ==========
|MU_0| |MU_1| |MU_2|
========== ========== ==========
========== ========== ==========
|MU_0| |MU_1| |MU_2|
========== ========== ==========
.. |MU_0| image:: images/Moments_Source_Image.jpg
:width: 250pt
@ -128,9 +128,9 @@ Result
.. |MU_1| image:: images/Moments_Result1.jpg
:width: 250pt
:align: middle
:align: middle
.. |MU_2| image:: images/Moments_Result2.jpg
:width: 250pt
:align: middle
:align: middle

View File

@ -10,8 +10,8 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Use the OpenCV function :point_polygon_test:`pointPolygonTest <>`
* Use the OpenCV function :point_polygon_test:`pointPolygonTest <>`
Theory
======
@ -19,9 +19,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
@ -51,13 +51,13 @@ This tutorial code's is shown lines below. You can also download it from `here <
/// Draw it in src
for( int j = 0; j < 6; j++ )
{ line( src, vert[j], vert[(j+1)%6], Scalar( 255 ), 3, 8 ); }
{ line( src, vert[j], vert[(j+1)%6], Scalar( 255 ), 3, 8 ); }
/// Get the contours
vector<vector<Point> > contours; vector<Vec4i> hierarchy;
Mat src_copy = src.clone();
findContours( src_copy, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
findContours( src_copy, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
/// Calculate the distances to the contour
Mat raw_dist( src.size(), CV_32FC1 );
@ -70,19 +70,19 @@ This tutorial code's is shown lines below. You can also download it from `here <
double minVal; double maxVal;
minMaxLoc( raw_dist, &minVal, &maxVal, 0, 0, Mat() );
minVal = abs(minVal); maxVal = abs(maxVal);
/// Depicting the distances graphically
Mat drawing = Mat::zeros( src.size(), CV_8UC3 );
for( int j = 0; j < src.rows; j++ )
{ for( int i = 0; i < src.cols; i++ )
{
{
if( raw_dist.at<float>(j,i) < 0 )
{ drawing.at<Vec3b>(j,i)[0] = 255 - (int) abs(raw_dist.at<float>(j,i))*255/minVal; }
else if( raw_dist.at<float>(j,i) > 0 )
{ drawing.at<Vec3b>(j,i)[2] = 255 - (int) raw_dist.at<float>(j,i)*255/maxVal; }
{ drawing.at<Vec3b>(j,i)[2] = 255 - (int) raw_dist.at<float>(j,i)*255/maxVal; }
else
{ drawing.at<Vec3b>(j,i)[0] = 255; drawing.at<Vec3b>(j,i)[1] = 255; drawing.at<Vec3b>(j,i)[2] = 255; }
{ drawing.at<Vec3b>(j,i)[0] = 255; drawing.at<Vec3b>(j,i)[1] = 255; drawing.at<Vec3b>(j,i)[2] = 255; }
}
}
@ -105,13 +105,13 @@ Result
#. Here it is:
========== ==========
|PPT_0| |PPT_1|
========== ==========
========== ==========
|PPT_0| |PPT_1|
========== ==========
.. |PPT_0| image:: images/Point_Polygon_Test_Source_Image.png
:align: middle
.. |PPT_1| image:: images/Point_Polygon_Test_Result.jpg
:align: middle
:align: middle

View File

@ -26,18 +26,18 @@ What is Thresholding?
* Application example: Separate out regions of an image corresponding to objects which we want to analyze. This separation is based on the variation of intensity between the object pixels and the background pixels.
* To differentiate the pixels we are interested in from the rest (which will eventually be rejected), we perform a comparison of each pixel intensity value with respect to a *threshold* (determined according to the problem to solve).
* To differentiate the pixels we are interested in from the rest (which will eventually be rejected), we perform a comparison of each pixel intensity value with respect to a *threshold* (determined according to the problem to solve).
* Once we have separated properly the important pixels, we can set them with a determined value to identify them (i.e. we can assign them a value of :math:`0` (black), :math:`255` (white) or any value that suits your needs).
.. image:: images/Threshold_Tutorial_Theory_Example.jpg
:alt: Threshold simple example
:align: center
:align: center
Types of Thresholding
-----------------------
* OpenCV offers the function :threshold:`threshold <>` to perform thresholding operations.
* OpenCV offers the function :threshold:`threshold <>` to perform thresholding operations.
* We can effectuate :math:`5` types of Thresholding operations with this function. We will explain them in the following subsections.
@ -45,7 +45,7 @@ Types of Thresholding
.. image:: images/Threshold_Tutorial_Theory_Base_Figure.png
:alt: Threshold Binary
:align: center
:align: center
Threshold Binary
^^^^^^^^^^^^^^^^^
@ -53,86 +53,86 @@ Threshold Binary
* This thresholding operation can be expressed as:
.. math::
\texttt{dst} (x,y) = \fork{\texttt{maxVal}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
\texttt{dst} (x,y) = \fork{\texttt{maxVal}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
* So, if the intensity of the pixel :math:`src(x,y)` is higher than :math:`thresh`, then the new pixel intensity is set to a :math:`MaxVal`. Otherwise, the pixels are set to :math:`0`.
.. image:: images/Threshold_Tutorial_Theory_Binary.png
:alt: Threshold Binary
:align: center
:align: center
Threshold Binary, Inverted
^^^^^^^^^^^^^^^^^^^^^^^^^^^
* This thresholding operation can be expressed as:
.. math::
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxVal}}{otherwise}
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxVal}}{otherwise}
* If the intensity of the pixel :math:`src(x,y)` is higher than :math:`thresh`, then the new pixel intensity is set to a :math:`0`. Otherwise, it is set to :math:`MaxVal`.
.. image:: images/Threshold_Tutorial_Theory_Binary_Inverted.png
:alt: Threshold Binary Inverted
:align: center
:align: center
Truncate
^^^^^^^^^
* This thresholding operation can be expressed as:
.. math::
\texttt{dst} (x,y) = \fork{\texttt{threshold}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{src}(x,y)}{otherwise}
\texttt{dst} (x,y) = \fork{\texttt{threshold}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{src}(x,y)}{otherwise}
* The maximum intensity value for the pixels is :math:`thresh`, if :math:`src(x,y)` is greater, then its value is *truncated*. See figure below:
.. image:: images/Threshold_Tutorial_Theory_Truncate.png
:alt: Threshold Truncate
:align: center
:align: center
Threshold to Zero
^^^^^^^^^^^^^^^^^^
* This operation can be expressed as:
.. math::
\texttt{dst} (x,y) = \fork{\texttt{src}(x,y)}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
\texttt{dst} (x,y) = \fork{\texttt{src}(x,y)}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
* If :math:`src(x,y)` is lower than :math:`thresh`, the new pixel value will be set to :math:`0`.
.. image:: images/Threshold_Tutorial_Theory_Zero.png
:alt: Threshold Zero
:align: center
:align: center
Threshold to Zero, Inverted
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* This operation can be expressed as:
.. math::
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{src}(x,y)}{otherwise}
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{src}(x,y)}{otherwise}
* If :math:`src(x,y)` is greater than :math:`thresh`, the new pixel value will be set to :math:`0`.
.. image:: images/Threshold_Tutorial_Theory_Zero_Inverted.png
:alt: Threshold Zero Inverted
:align: center
:align: center
Code
======
The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/ImgProc/Threshold.cpp>`_
The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Threshold.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -173,8 +173,8 @@ The tutorial code's is shown lines below. You can also download it from `here <h
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Create Trackbar to choose type of Threshold
createTrackbar( trackbar_type,
window_name, &threshold_type,
createTrackbar( trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo );
createTrackbar( trackbar_value,
@ -244,8 +244,8 @@ Explanation
.. code-block:: cpp
createTrackbar( trackbar_type,
window_name, &threshold_type,
createTrackbar( trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo );
createTrackbar( trackbar_value,
@ -293,18 +293,18 @@ Results
.. image:: images/Threshold_Tutorial_Original_Image.jpg
:alt: Threshold Original Image
:align: center
:align: center
#. First, we try to threshold our image with a *binary threhold inverted*. We expect that the pixels brighter than the :math:`thresh` will turn dark, which is what actually happens, as we can see in the snapshot below (notice from the original image, that the doggie's tongue and eyes are particularly bright in comparison with the image, this is reflected in the output image).
.. image:: images/Threshold_Tutorial_Result_Binary_Inverted.jpg
:alt: Threshold Result Binary Inverted
:align: center
:align: center
#. Now we try with the *threshold to zero*. With this, we expect that the darkest pixels (below the threshold) will become completely black, whereas the pixels with value greater than the threshold will keep its original value. This is verified by the following snapshot of the output image:
.. image:: images/Threshold_Tutorial_Result_Zero.jpg
:alt: Threshold Result Zero
:align: center
:align: center

View File

@ -0,0 +1,258 @@
.. _O4A_SDK:
OpenCV4Android SDK
******************
This tutorial was designed to help you with installation and configuration of OpenCV4Android SDK.
This guide was written with Windows 7 in mind, though it should work with any other OS supported by OpenCV4Android SDK.
This tutorial assumes you have the following installed and configured:
* JDK
* Android SDK and NDK
* Eclipse IDE
* ADT and CDT plugins for Eclipse
..
If you need help with anything of the above, you may refer to our :ref:`android_dev_intro` guide.
If you encounter any error after thoroughly following these steps, feel free to contact us via `OpenCV4Android <https://groups.google.com/group/android-opencv/>`_ discussion group or OpenCV `Q&A forum <http://answers.opencv.org>`_ . We'll do our best to help you out.
General info
============
**TODO:** rewrite this section.
OpenCV4Android SDK uses Android OpenCV Manager for library initialization. OpenCV Manager provides the following benefits:
* Compact apk-size, since all applications use the same binaries from Manager and do not store native libs within themselves;
* Hardware specific optimizations are automatically enabled on all supported platforms;
* Regular updates and bug fixes;
* Trusted OpenCV library source. All packages with OpenCV are published on Google Play;
..
For additional information on OpenCV Manager see the:
* |OpenCV4Android_Tutorial|_
* |OpenCV4Android_Reference|_
..
This package is quite close to the current OpenCV4Android distribution. If you're beginner with OpenCV, tutorial from above will help you to start.
* Library Project for Java development with Eclipse.
* C++ headers and libraries for native application development.
* Java samples, javadoc documentation.
* prebuilt binaries for ARM-v7a platform.
..
.. |OpenCV4Android_Tutorial| replace:: Tutorial
.. _OpenCV4Android_Tutorial: http://docs.opencv.org/doc/tutorials/introduction/android_binary_package/android_binary_package.html#android-binary-package
.. |OpenCV4Android_Reference| replace:: Reference Manual
.. _OpenCV4Android_Reference: http://docs.opencv.org/android/refman.html
Tegra Android Development Pack users
====================================
You may have used `Tegra Android Development Pack <http://developer.nvidia.com/tegra-android-development-pack>`_
(**TADP**) released by **NVIDIA** for Android development environment setup.
Beside Android development tools the TADP 2.0 includes OpenCV4Android SDK 2.4.2, so it can be already installed in your system and you can skip to running the ``face-detection`` sample.
More details regarding TADP can be found in the :ref:`android_dev_intro` guide.
Manual OpenCV4Android SDK setup
===============================
Get the OpenCV4Android SDK
--------------------------
#. Go to the `OpenCV dowload page on SourceForge <http://sourceforge.net/projects/opencvlibrary/files/opencv-android/>`_ and download the latest available version. Currently it's |opencv_android_bin_pack_url|_
#. Create a new folder for development for Android with OpenCV development. For this tutorial I have unpacked OpenCV to the :file:`C:\\Work\\OpenCV4Android\\` directory.
.. note:: Better to use a path without spaces in it. Otherwise you may have problems with :command:`ndk-build`.
#. Unpack the OpenCV package into the chosen directory.
You can unpack it using any popular archiver (e.g with |seven_zip|_):
.. image:: images/android_package_7zip.png
:alt: Exploring OpenCV package with 7-Zip
:align: center
On Unix you can use the following command:
.. code-block:: bash
unzip ~/Downloads/OpenCV-2.4.2-android-sdk.zip
.. |opencv_android_bin_pack| replace:: OpenCV-2.4.2-android-sdk.zip
.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.2/OpenCV-2.4.2-android-sdk.zip/download
.. |opencv_android_bin_pack_url| replace:: |opencv_android_bin_pack|
.. |seven_zip| replace:: 7-Zip
.. _seven_zip: http://www.7-zip.org/
Open OpenCV library and samples in Eclipse
------------------------------------------
#. Start *Eclipse* and choose your workspace location.
We recommend to start working with OpenCV for Android from a new clean workspace. A new Eclipse workspace can for example be created in the folder where you have unpacked OpenCV4Android SDK package:
.. image:: images/eclipse_1_choose_workspace.png
:alt: Choosing C:\Work\android-opencv\ as workspace location
:align: center
#. Import OpenCV library and samples into workspace.
OpenCV library is packed as a ready-for-use `Android Library Project
<http://developer.android.com/guide/developing/projects/index.html#LibraryProjects>`_. You can simply reference it in your projects.
Each sample included into the |opencv_android_bin_pack| is a regular Android project that already references OpenCV library.
Follow the steps below to import OpenCV and samples into the workspace:
* Right click on the :guilabel:`Package Explorer` window and choose :guilabel:`Import...` option from the context menu:
.. image:: images/eclipse_5_import_command.png
:alt: Select Import... from context menu
:align: center
* In the main panel select :menuselection:`General --> Existing Projects into Workspace` and press :guilabel:`Next` button:
.. image:: images/eclipse_6_import_existing_projects.png
:alt: General > Existing Projects into Workspace
:align: center
* In the :guilabel:`Select root directory` field locate your OpenCV package folder. Eclipse should automatically locate OpenCV library and samples:
.. image:: images/eclipse_7_select_projects.png
:alt: Locate OpenCV library and samples
:align: center
* Click :guilabel:`Finish` button to complete the import operation.
After clicking :guilabel:`Finish` button Eclipse will load all selected projects into workspace. Numerous errors will be indicated:
.. image:: images/eclipse_8_false_alarm.png
:alt: Confusing Eclipse screen with numerous errors
:align: center
However, **all these errors are only false-alarms**!
Just give a minute to Eclipse to complete initialization.
In some cases these errors disappear after :menuselection:`Project --> Clean... --> Clean all --> OK`
or after pressing :kbd:`F5` (for Refresh action) when selecting error-label-marked projects in :guilabel:`Package Explorer`.
Sometimes more advanced manipulations are required:
* The provided projects are configured for ``API 11`` target (and ``API 9`` for the library) that can be missing platform in your Android SDK.
After right click on any project select :guilabel:`Properties` and then :guilabel:`Android` on the left pane.
Click some target with `API Level` 11 or higher:
.. image:: images/eclipse_8a_target.png
:alt: Updating target
:align: center
Eclipse will rebuild your workspace and error icons will disappear one by one:
.. image:: images/eclipse_9_errors_dissapearing.png
:alt: After small help Eclipse removes error icons!
:align: center
Once Eclipse completes build you will have the clean workspace without any build errors:
.. image:: images/eclipse_10_crystal_clean.png
:alt: OpenCV package imported into Eclipse
:align: center
.. _Running_OpenCV_Samples:
Running OpenCV Samples
----------------------
At this point you should be able to build and run the samples. Keep in mind, that ``face-detection``, ``Tutorial 3`` and ``Tutorial 4`` ones include some native code and require Android NDK and CDT plugin for Eclipse to build working applications.
If you haven't installed these tools see the corresponding section of :ref:`Android_Dev_Intro`.
Also, please consider that ``Tutorial 0`` and ``Tutorial 1`` samples use Java Camera API that definitelly accessible on emulator from the Android SDK.
Other samples use OpenCV Native Camera which may not work with emulator.
.. note:: Recent *Android SDK tools, revision 19+* can run ARM v7a OS images but they available not for all Android versions.
Well, running samples from Eclipse is very simple:
* Connect your device with :command:`adb` tool from Android SDK or create an emulator with camera support.
* See `Managing Virtual Devices
<http://developer.android.com/guide/developing/devices/index.html>`_ document for help with Android Emulator.
* See `Using Hardware Devices
<http://developer.android.com/guide/developing/device.html>`_ for help with real devices (not emulators).
* Select project you want to start in :guilabel:`Package Explorer` and just press :kbd:`Ctrl + F11` or select option :menuselection:`Run --> Run` from the main menu, or click :guilabel:`Run` button on the toolbar.
.. note:: Android Emulator can take several minutes to start. So, please, be patient.
* On the first run Eclipse will ask you about the running mode for your application:
.. image:: images/eclipse_11_run_as.png
:alt: Run sample as Android Application
:align: center
* Select the :guilabel:`Android Application` option and click :guilabel:`OK` button. Eclipse will install and run the sample.
Chances are that on the first launch you will not have the `OpenCV Manager <https://docs.google.com/a/itseez.com/presentation/d/1EO_1kijgBg_BsjNp2ymk-aarg-0K279_1VZRcPplSuk/present#slide=id.p>`_ package installed.
In this case you will see the following message:
.. image:: images/android_emulator_opencv_manager_fail.png
:alt: You will see this message if you have no OpenCV Manager installed
:align: center
To get rid of the message you will need to install `OpenCV Manager` and the appropriate `OpenCV binary pack`.
Simply tap :menuselection:`Yes` if you have *Google Play Market* installed on your device/emulator. It will redirect you to the corresponding page on *Google Play Market*.
If you have no access to the *Market*, which is often the case with emulators - you will need to install the packages from OpenCV4Android SDK folder manually. Open the console/terminal and type in the following two commands:
.. code-block:: sh
:linenos:
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.2_Manager.apk
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.2_binary_pack_armv7a.apk
If you're running Windows, that will probably look like this:
.. image:: images/install_opencv_manager_with_adb.png
:alt: Run these commands in the console to install OpenCV Manager
:align: center
When done, you will be able to run OpenCV samples on your device/emulator seamlessly.
* Here is ``Tutorial 2 - Use OpenCV Camera`` sample, running on top of stock camera-preview of the emulator.
.. image:: images/emulator_canny.png
:height: 600px
:alt: Tutorial 1 Basic - 1. Add OpenCV - running Canny
:align: center
What's next
===========
Now, when you have your instance of OpenCV4Adroid SDK set up and configured, you may want to proceed to using OpenCV in your own application. You can learn how to do that in a separate *Development with OpenCV* tutorial.

View File

@ -4,7 +4,7 @@
Using OpenCV in C++ code with OpenCV4Android SDK
*********************************************
************************************************
The Android way is writing all your code in Java. But sometimes, it is not enough and you need to go to the native level and write some parts of your application in C/C++.
This is especially important when you already have some computer vision code which is written in C++ and uses OpenCV, and you want to reuse it in your Android application.

View File

@ -0,0 +1,314 @@
.. _Android_Dev_Intro:
Introduction into Android Development
*************************************
This guide was designed to help you in learning Android development basics and quickly seting up your working environment.
This guide was written with Windows 7 in mind, though it should work with any other OS supported by Android SDK.
If you encounter any error after thoroughly following these steps, feel free to contact us via `OpenCV4Android <https://groups.google.com/group/android-opencv/>`_ discussion group or OpenCV `Q&A forum <http://answers.opencv.org>`_ . We'll do our best to help you out.
Android application structure
=============================
Usually source code of an Android application has the following structure:
+ :file:`root folder of the project/`
- :file:`jni/`
- :file:`libs/`
- :file:`res/`
- :file:`src/`
- :file:`AndroidManifest.xml`
- :file:`project.properties`
- :file:`... other files ...`
where:
* the :file:`src` folder contains Java code of the application
* the :file:`res` folder contains resources of the application (images, xml files describing UI layout , etc),
* the :file:`libs` folder will contain native libraries after a successful build,
* and the :file:`jni` folder contains C/C++ application source code and NDK's build scripts :file:`Android.mk` and :file:`Application.mk`
producing the native libraries.
* :file:`AndroidManifest.xml` file presents essential information about application to the Android system
(name of the Application, name of main application's package, components of the application, required permissions, etc).
It can be created using Eclipse wizard or :command:`android` tool from Android SDK.
* :file:`project.properties` is a text file containing information about target Android platform and other build details.
This file is generated by Eclipse or can be created with :command:`android` tool included in Android SDK.
.. note:: Both files (:file:`AndroidManifest.xml` and :file:`project.properties`) are required to compile the C++ part of the application,
since NDK build system relies on them. If any of these files does not exist, compile the Java part of the project before the C++ part.
:file:`Android.mk` and :file:`Application.mk` scripts
==================================================================
The script :file:`Android.mk` usually has the following structure:
.. code-block:: make
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := <module_name>
LOCAL_SRC_FILES := <list of .c and .cpp project files>
<some variable name> := <some variable value>
...
<some variable name> := <some variable value>
include $(BUILD_SHARED_LIBRARY)
This is the minimal file :file:`Android.mk`, which builds C++ source code of an Android application. Note that the first two lines and the last line are mandatory for any :file:`Android.mk`.
Usually the file :file:`Application.mk` is optional, but in case of project using OpenCV, when STL and exceptions are used in C++, it also should be created. Example of the file :file:`Application.mk`:
.. code-block:: make
APP_STL := gnustl_static
APP_CPPFLAGS := -frtti -fexceptions
APP_ABI := armeabi-v7a
Quick environment setup for Android development
===============================================
If you are making a clean environment install, then you can try `Tegra Android Development Pack <http://developer.nvidia.com/tegra-android-development-pack>`_
(**TADP**) released by **NVIDIA**.
When unpacked, TADP will cover all of the environment setup automatically and you can skip the rest of the guide.
If you are a beginner in Android development then we also recommend you to start with TADP.
.. note:: *NVIDIA*\ 's Tegra Android Development Pack includes some special features for |Nvidia_Tegra_Platform|_ but its use is not limited to *Tegra* devices only.
* You need at least *1.6 Gb* free disk space for the install.
* TADP will download Android SDK platforms and Android NDK from Google's server, so Internet connection is required for the installation.
* TADP may ask you to flash your development kit at the end of installation process. Just skip this step if you have no |Tegra_Development_Kit|_\ .
* (``UNIX``) TADP will ask you for *root* in the middle of installation, so you need to be a member of *sudo* group.
..
.. |Nvidia_Tegra_Platform| replace:: *NVIDIA*\ s Tegra platform
.. _Nvidia_Tegra_Platform: http://developer.nvidia.com/node/19071
.. |Tegra_Development_Kit| replace:: Tegra Development Kit
.. _Tegra_Development_Kit: http://developer.nvidia.com/tegra-ventana-development-kit
.. _Android_Environment_Setup_Lite:
Manual environment setup for Android Development
================================================
Development in Java
-------------------
You need the following software to be installed in order to develop for Android in Java:
#. **Sun JDK 6**
Visit `Java SE Downloads page <http://www.oracle.com/technetwork/java/javase/downloads/>`_ and download an installer for your OS.
Here is a detailed :abbr:`JDK (Java Development Kit)` `installation guide <http://source.android.com/source/initializing.html#installing-the-jdk>`_
for Ubuntu and Mac OS (only JDK sections are applicable for OpenCV)
.. note:: OpenJDK is not suitable for Android development, since Android SDK supports only Sun JDK.
If you use Ubuntu, after installation of Sun JDK you should run the following command to set Sun java environment:
.. code-block:: bash
sudo update-java-alternatives --set java-6-sun
**TODO:** add a note on Sun/Oracle Java installation on Ubuntu 12.
#. **Android SDK**
Get the latest ``Android SDK`` from http://developer.android.com/sdk/index.html
Here is Google's `install guide <http://developer.android.com/sdk/installing.html>`_ for the SDK.
.. note:: If you choose SDK packed into a Windows installer, then you should have 32-bit JRE installed. It is not a prerequisite for Android development, but installer is a x86 application and requires 32-bit Java runtime.
.. note:: If you are running x64 version of Ubuntu Linux, then you need ia32 shared libraries for use on amd64 and ia64 systems to be installed. You can install them with the following command:
.. code-block:: bash
sudo apt-get install ia32-libs
For Red Hat based systems the following command might be helpful:
.. code-block:: bash
sudo yum install libXtst.i386
#. **Android SDK components**
You need the following SDK components to be installed:
* *Android SDK Tools, revision14* or newer.
Older revisions should also work, but they are not recommended.
* *SDK Platform Android 3.0*, ``API 11`` and *Android 2.3.1*, ``API 9``.
The minimal platform supported by OpenCV Java API is **Android 2.2** (``API 8``). This is also the minimum API Level required for the provided samples to run.
See the ``<uses-sdk android:minSdkVersion="8"/>`` tag in their **AndroidManifest.xml** files.
But for successful compilation of some samples the **target** platform should be set to Android 3.0 (API 11) or higher. It will not prevent them from running on Android 2.2.
.. image:: images/android_sdk_and_avd_manager.png
:height: 500px
:alt: Android SDK Manager
:align: center
See `Adding SDK Components <http://developer.android.com/sdk/adding-components.html>`_ for help with installing/updating SDK components.
#. **Eclipse IDE**
Check the `Android SDK System Requirements <http://developer.android.com/sdk/requirements.html>`_ document for a list of Eclipse versions that are compatible with the Android SDK.
For OpenCV 2.4.x we recommend **Eclipse 3.7 (Indigo)** or later versions. They work well for OpenCV under both Windows and Linux.
If you have no Eclipse installed, you can get it from the `official site <http://www.eclipse.org/downloads/>`_.
#. **ADT plugin for Eclipse**
These instructions are copied from `Android Developers site <http://developer.android.com/sdk/eclipse-adt.html#downloading>`_, check it out in case of any ADT-related problem.
Assuming that you have Eclipse IDE installed, as described above, follow these steps to download and install the ADT plugin:
#. Start Eclipse, then select :menuselection:`Help --> Install New Software...`
#. Click :guilabel:`Add` (in the top-right corner).
#. In the :guilabel:`Add Repository` dialog that appears, enter "ADT Plugin" for the Name and the following URL for the Location:
https://dl-ssl.google.com/android/eclipse/
#. Click :guilabel:`OK`
.. note:: If you have trouble acquiring the plugin, try using "http" in the Location URL, instead of "https" (https is preferred for security reasons).
#. In the :guilabel:`Available Software` dialog, select the checkbox next to :guilabel:`Developer Tools` and click :guilabel:`Next`.
#. In the next window, you'll see a list of the tools to be downloaded. Click :guilabel:`Next`.
#. Read and accept the license agreements, then click :guilabel:`Finish`.
.. note:: If you get a security warning saying that the authenticity or validity of the software can't be established, click :guilabel:`OK`.
#. When the installation completes, restart Eclipse.
Native development in C++
-------------------------
You need the following software to be installed in order to develop for Android in C++:
#. **Android NDK**
To compile C++ code for Android platform you need ``Android Native Development Kit`` (*NDK*).
You can get the latest version of NDK from the `download page <http://developer.android.com/sdk/ndk/index.html>`_. To install Android NDK just extract the archive to some folder on your computer. Here are `installation instructions <http://developer.android.com/sdk/ndk/index.html#installing>`_.
.. note:: Before start you can read official Android NDK documentation which is in the Android NDK archive, in the folder :file:`docs/`.
The main article about using Android NDK build system is in the :file:`ANDROID-MK.html` file.
Some additional information you can find in the :file:`APPLICATION-MK.html`, :file:`NDK-BUILD.html` files, and :file:`CPU-ARM-NEON.html`, :file:`CPLUSPLUS-SUPPORT.html`, :file:`PREBUILTS.html`.
#. **CDT plugin for Eclipse**
There are several possible ways to integrate compilation of C++ code by Android NDK into Eclipse compilation process.
We recommend the approach based on Eclipse :abbr:`CDT(C/C++ Development Tooling)` Builder.
.. important:: Make sure your Eclipse IDE has the :abbr:`CDT(C/C++ Development Tooling)` plugin installed. Menu :guilabel:`Help -> About Eclipse SDK` and push :guilabel:`Installation Details` button.
.. image:: images/eclipse_inst_details.png
:alt: Configure builders
:align: center
To install the `CDT plugin <http://eclipse.org/cdt/>`_ use menu :guilabel:`Help -> Install New Software...`,
then paste the CDT 8.0 repository URL http://download.eclipse.org/tools/cdt/releases/indigo as shown in the picture below and click :guilabel:`Add...`, name it *CDT* and click :guilabel:`OK`.
.. image:: images/eclipse_inst_cdt.png
:alt: Configure builders
:align: center
``CDT Main Features`` should be enough:
.. image:: images/eclipse_inst_cdt_2.png
:alt: Configure builders
:align: center
That's it. Compilation of C++ code is fully integrated into Eclipse building process now.
Debugging and Testing
=====================
In this section we will give you some easy-to-follow instructions on how to set up an emulator or hardware device for testing and debugging an Android project.
AVD
---
AVD (*Android Virtual Device*) is not probably the most convenient way to test an OpenCV-dependent application, but sure the most uncomplicated one to configure.
#. Assuming you already have *Android SDK* and *Eclipse IDE* installed, in Eclipse go :guilabel:`Window -> AVD Manager`.
**TBD:** how to start AVD Manager without Eclipse...
#. Press the :guilabel:`New` button in :guilabel:`AVD Manager` window.
#. :guilabel:`Create new Android Virtual Device` window will let you select some properties for your new device, like target API level, size of SD-card and other.
.. image:: images/AVD_create.png
:alt: Configure builders
:align: center
#. When you click the :guilabel:`Create AVD` button, your new AVD will be availible in :guilabel:`AVD Manager`.
#. Press :guilabel:`Start` to launch the device. Be aware that any AVD (aka Emulator) is usually much slower than a hardware Android device, so it may take up to several minutes to start.
#. Go :guilabel:`Run -> Run/Debug` in Eclipse IDE to run your application in regular or debugging mode. :guilabel:`Device Chooser` will let you choose among the running devices or to start a new one.
Hardware Device
---------------
If you have an Android device, you can use it to test and debug your applications. This way is more authentic, though a little bit harder to set up.
#. Attach the Android device to your PC with a USB cable.
#. Go to :guilabel:`Start Menu` and **right-click** on :guilabel:`Computer`. Select :guilabel:`Manage` in the context menu. You may be asked for Administrative permittions.
#. Select :guilabel:`Device Manager` in the left pane and find an unknown device in the list. You may try unplugging it and then plugging back in order to check whether it's your exact equipment appearing in the list.
#. Right-click on your device to get to :guilabel:`Properties` and select the :guilabel:`Details` tab. Select :guilabel:`Device ID` and save both numbers (*e.g in a text file*).
.. image:: images/device_details.png
:alt: Details
:align: center
#. Now open file :file:`<Android SDK folder>/extras/google/usb_driver/android_winusb.inf`. You will see some default records there, perhaps looking similar to this:
.. code-block:: ini
;Google Nexus One
%SingleAdbInterface% = USB_Install, USB\VID_18D1&PID_0D02
%CompositeAdbInterface% = USB_Install, USB\VID_18D1&PID_0D02&MI_01
#. There should be a record like this for your device and since it hasn't appeared automatically you are free to add one manually. All you should do is to add another record similar to the above, but using the numbers you saved earlier. Second number goes to the first line and the first one goes to the second line. Notice, that the record should be inserted under *[Google.NTx86]* or *[Google.NTamd64]* tag, depending on what system you're currently running. For example, in my case it was Nvidia Tegra device and the record looked like this:
.. code-block:: ini
;NVIDIA Tegra
%SingleAdbInterface% = USB_Install, USB\VID_0955&PID_7100&MI_01
%CompositeAdbInterface% = USB_Install, USB\VID_0955&PID_7100&REV_9999&MI_01
#. Assuming you have your SDK installed to :file:`C:/android-sdk/`, you may try the following command in Windows console to acertain that your device was recognised and attached:
.. code-block:: ini
C:\android-sdk\platform-tools\adb devices
.. image:: images/cmd_adb_devices.png
:alt: Console
:align: center
#. Now, in Eclipse go :guilabel:`Run -> Run/Debug` to run your application in regular or debugging mode. :guilabel:`Device Chooser` will let you choose among the devices.
Consult the official `Android Developers site <http://developer.android.com/tools/device.html>`_ for more information on configuring hardware devices to work with other operating systems.
What's next
===========
Now, when you have your development environment set up and configured, you may want to proceed to installing OpenCV4Android SDK. You can learn how to do that in a separate :ref:`O4A_SDK` tutorial.

View File

@ -0,0 +1,471 @@
.. _dev_with_OCV_on_Android:
Android development with OpenCV
*******************************
This tutorial is created to help you use OpenCV library within your Android project.
This guide was written with Windows 7 in mind, though it should work with any other OS supported by OpenCV4Android SDK.
This tutorial assumes you have the following installed and configured:
* JDK
* Android SDK and NDK
* Eclipse IDE
* ADT and CDT plugins for Eclipse
..
If you need help with anything of the above, you may refer to our :ref:`android_dev_intro` guide.
This tutorial also assumes you have OpenCV4Android SDK already installed on your development machine and OpenCV Manager on your testing device correspondingly. If you need help with any of these, you may consult our :ref:`O4A_SDK` tutorial.
If you encounter any error after thoroughly following these steps, feel free to contact us via `OpenCV4Android <https://groups.google.com/group/android-opencv/>`_ discussion group or OpenCV `Q&A forum <http://answers.opencv.org>`_ . We'll do our best to help you out.
Using OpenCV library within your Android project
================================================
In this section we will explain how to make some existing project to use OpenCV.
Starting with 2.4.2 release for Android, *OpenCV Manager* is used to provide apps with the best available version of OpenCV.
You can get more information here: :ref:`Android_OpenCV_Manager` and in these `slides <https://docs.google.com/a/itseez.com/presentation/d/1EO_1kijgBg_BsjNp2ymk-aarg-0K279_1VZRcPplSuk/present#slide=id.p>`_.
Java
----
Application development with async initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using async initialization is a **recommended** way for application development. It uses the OpenCV Manager to access OpenCV libraries externally installed in the target system.
#. Add OpenCV library project to your workspace. Use menu :guilabel:`File -> Import -> Existing project in your workspace`,
press :guilabel:`Browse` button and locate OpenCV4Android SDK (:file:`OpenCV-2.4.2-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In application project add a reference to the OpenCV Java SDK in :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.2``.
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
:align: center
To run OpenCV Manager-based application the first time you need to install packages with the `OpenCV Manager` and `OpenCV binary pack` for you platform.
You can do it using Google Play Market or manually with ``adb`` tool:
.. code-block:: sh
:linenos:
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.2_Manager.apk
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.2_binary_pack_armv7a.apk
There is a very base code snippet implementing the async initialization. It shows basic principles. See the "15-puzzle" OpenCV sample for details.
.. code-block:: java
:linenos:
public class MyActivity extends Activity implements HelperCallbackInterface
{
private BaseLoaderCallback mOpenCVCallBack = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
// Create and set View
mView = new puzzle15View(mAppContext);
setContentView(mView);
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState)
{
Log.i(TAG, "onCreate");
super.onCreate(savedInstanceState);
Log.i(TAG, "Trying to load OpenCV library");
if (!OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_2, this, mOpenCVCallBack))
{
Log.e(TAG, "Cannot connect to OpenCV Manager");
}
}
// ...
}
It this case application works with OpenCV Manager in asynchronous fashion. ``OnManagerConnected`` callback will be called in UI thread, when initialization finishes.
Please note, that it is not allowed to use OpenCV calls or load OpenCV-dependent native libs before invoking this callback.
Load your own native libraries that depend on OpenCV after the successful OpenCV initialization.
Application development with static initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
According to this approach all OpenCV binaries are included into your application package. It is designed mostly for development purposes.
This approach is deprecated for the production code, release package is recommended to communicate with OpenCV Manager via the async initialization described above.
#. Add the OpenCV library project to your workspace the same way as for the async initialization above.
Use menu :guilabel:`File -> Import -> Existing project in your workspace`, push :guilabel:`Browse` button and select OpenCV SDK path (:file:`OpenCV-2.4.2-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In the application project add a reference to the OpenCV4Android SDK in :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.2``;
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
:align: center
#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV native libs from :file:`<OpenCV-2.4.2-android-sdk>/sdk/native/libs/<target_arch>` to your project directory to folder :file:`libs/<target_arch>`.
In case of the application project **with a JNI part**, instead of manual libraries copying you need to modify your ``Android.mk`` file:
add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before ``"include path_to_OpenCV-2.4.2-android-sdk/sdk/native/jni/OpenCV.mk"``
.. code-block:: make
:linenos:
OPENCV_CAMERA_MODULES:=on
OPENCV_INSTALL_MODULES:=on
The result should look like the following:
.. code-block:: make
:linenos:
include $(CLEAR_VARS)
# OpenCV
OPENCV_CAMERA_MODULES:=on
OPENCV_INSTALL_MODULES:=on
include ../../sdk/native/jni/OpenCV.mk
After that the OpenCV libraries will be copied to your application :file:`libs` folder during the JNI part build.
Eclipse will automatically include all the libraries from the :file:`libs` folder to the application package (APK).
#. The last step of enabling OpenCV in your application is Java initialization code before call to OpenCV API.
It can be done, for example, in the static section of the ``Activity`` class:
.. code-block:: java
:linenos:
static {
if (!OpenCVLoader.initDebug()) {
// Handle initialization error
}
}
If you application includes other OpenCV-dependent native libraries you should load them **after** OpenCV initialization:
.. code-block:: java
:linenos:
static {
if (!OpenCVLoader.initDebug()) {
// Handle initialization error
} else {
System.loadLibrary("my_jni_lib1");
System.loadLibrary("my_jni_lib2");
}
}
Native/C++
----------
To build your own Android application, which uses OpenCV from native part, the following steps should be done:
#. You can use an environment variable to specify the location of OpenCV package or just hardcode absolute or relative path in the :file:`jni/Android.mk` of your projects.
#. The file :file:`jni/Android.mk` should be written for the current application using the common rules for this file.
For detailed information see the Android NDK documentation from the Android NDK archive, in the file
:file:`<path_where_NDK_is_placed>/docs/ANDROID-MK.html`
#. The line
.. code-block:: make
include C:\Work\OpenCV4Android\OpenCV-2.4.2-android-sdk\sdk\native\jni\OpenCV.mk
should be inserted into the :file:`jni/Android.mk` file **after** the line
.. code-block:: make
include $(CLEAR_VARS)
#. Several variables can be used to customize OpenCV stuff, but you **don't need** to use them when your application uses the `async initialization` via the `OpenCV Manager` API.
Note: these variables should be set **before** the ``"include .../OpenCV.mk"`` line:
.. code-block:: make
OPENCV_INSTALL_MODULES:=on
Copies necessary OpenCV dynamic libs to the project ``libs`` folder in order to include them into the APK.
.. code-block:: make
OPENCV_CAMERA_MODULES:=off
Skip native OpenCV camera related libs copying to the project ``libs`` folder.
.. code-block:: make
OPENCV_LIB_TYPE:=STATIC
Perform static link with OpenCV. By default dynamic link is used and the project JNI lib depends on ``libopencv_java.so``.
#. The file :file:`Application.mk` should exist and should contain lines:
.. code-block:: make
APP_STL := gnustl_static
APP_CPPFLAGS := -frtti -fexceptions
Also the line like this one:
.. code-block:: make
APP_ABI := armeabi-v7a
should specify the application target platforms.
In some cases a linkage error (like ``"In function 'cv::toUtf16(std::basic_string<...>... undefined reference to 'mbstowcs'"``) happens
when building an application JNI library depending on OpenCV.
The following line in the :file:`Application.mk` usually fixes it:
.. code-block:: make
APP_PLATFORM := android-9
#. Either use :ref:`manual <NDK_build_cli>` ``ndk-build`` invocation or :ref:`setup Eclipse CDT Builder <Android_NDK_integration_with_Eclipse>` to build native JNI lib before Java part [re]build and APK creation.
**TBD:** move this info from tutorial v1 to part 1 of tutorial v2.
Hello OpenCV Sample
===================
Here are basic steps to guide you trough the process of creating a simple OpenCV-centric application.
It will be capable of accessing camera output, processing it and displaying the result.
#. Open Eclipse IDE, create a new clean workspace, create a new Android project (*File -> New -> Android Project*).
#. Set name, target, package and minSDKVersion accordingly.
#. Create a new class (*File -> New -> Class*). Name it for example: *HelloOpenCVView*.
.. image:: images/dev_OCV_new_class.png
:alt: Add a new class.
:align: center
* It should extend *SurfaceView* class.
* It also should implement *SurfaceHolder.Callback*, *Runnable*.
#. Edit *HelloOpenCVView* class.
* Add an *import* line for *android.content.context*.
* Modify autogenerated stubs: *HelloOpenCVView*, *surfaceCreated*, *surfaceDestroyed* and *surfaceChanged*.
.. code-block:: java
package com.hello.opencv.test;
import android.content.Context;
public class HelloOpenCVView extends SurfaceView implements Callback, Runnable {
public HelloOpenCVView(Context context) {
super(context);
getHolder().addCallback(this);
}
public void surfaceCreated(SurfaceHolder holder) {
(new Thread(this)).start();
}
public void surfaceDestroyed(SurfaceHolder holder) {
cameraRelease();
}
public void surfaceChanged(SurfaceHolder holder, int format, int width,
int height) {
cameraSetup(width, height);
}
* Add *cameraOpen*, *cameraRelease* and *cameraSetup* voids as shown below.
* Also, don't forget to add the public void *run()* as follows:
.. code-block:: java
public void run() {
// TODO: loop { getFrame(), processFrame(), drawFrame() }
}
public boolean cameraOpen() {
return false; //TODO: open camera
}
private void cameraRelease() {
// TODO release camera
}
private void cameraSetup(int width, int height) {
// TODO setup camera
}
..
#. Create a new *Activity* (*New -> Other -> Android -> Android Activity*) and name it, for example: *HelloOpenCVActivity*. For this activity define *onCreate*, *onResume* and *onPause* voids.
.. code-block:: java
public void onCreate (Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mView = new HelloOpenCVView(this);
setContentView (mView);
}
protected void onPause() {
super.onPause();
mView.cameraRelease();
}
protected void onResume() {
super.onResume();
if( !mView.cameraOpen() ) {
// MessageBox and exit app
AlertDialog ad = new AlertDialog.Builder(this).create();
ad.setCancelable(false); // This blocks the "BACK" button
ad.setMessage("Fatal error: can't open camera!");
ad.setButton("OK", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
finish();
}
});
ad.show();
}
}
#. Add the following permissions to the AndroidManifest.xml file:
.. code-block:: xml
</application>
<uses-permission android:name="android.permission.CAMERA" />
<uses-feature android:name="android.hardware.camera" />
<uses-feature android:name="android.hardware.camera.autofocus" />
#. Reference OpenCV library within your project properties.
.. image:: images/dev_OCV_reference.png
:alt: Reference OpenCV library.
:align: center
#. We now need some code to handle the camera. Update the *HelloOpenCVView* class as follows:
.. code-block:: java
private VideoCapture mCamera;
public boolean cameraOpen() {
synchronized (this) {
cameraRelease();
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID);
if (!mCamera.isOpened()) {
mCamera.release();
mCamera = null;
Log.e("HelloOpenCVView", "Failed to open native camera");
return false;
}
}
return true;
}
public void cameraRelease() {
synchronized(this) {
if (mCamera != null) {
mCamera.release();
mCamera = null;
}
}
}
private void cameraSetup(int width, int height) {
synchronized (this) {
if (mCamera != null && mCamera.isOpened()) {
List<Size> sizes = mCamera.getSupportedPreviewSizes();
int mFrameWidth = width;
int mFrameHeight = height;
{ // selecting optimal camera preview size
double minDiff = Double.MAX_VALUE;
for (Size size : sizes) {
if (Math.abs(size.height - height) < minDiff) {
mFrameWidth = (int) size.width;
mFrameHeight = (int) size.height;
minDiff = Math.abs(size.height - height);
}
}
}
mCamera.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, mFrameWidth);
mCamera.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, mFrameHeight);
}
}
}
#. The last step would be to update the *run()* void in *HelloOpenCVView* class as follows:
.. code-block:: java
public void run() {
while (true) {
Bitmap bmp = null;
synchronized (this) {
if (mCamera == null)
break;
if (!mCamera.grab())
break;
bmp = processFrame(mCamera);
}
if (bmp != null) {
Canvas canvas = getHolder().lockCanvas();
if (canvas != null) {
canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2,
(canvas.getHeight() - bmp.getHeight()) / 2, null);
getHolder().unlockCanvasAndPost(canvas);
}
bmp.recycle();
}
}
}
protected Bitmap processFrame(VideoCapture capture) {
Mat mRgba = new Mat();
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
//process mRgba
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
try {
Utils.matToBitmap(mRgba, bmp);
} catch(Exception e) {
Log.e("processFrame", "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmp.recycle();
bmp = null;
}
return bmp;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View File

@ -9,7 +9,7 @@ Goal
In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
* Load an image (using :imread:`imread <>`)
* Create a named OpenCV window (using :named_window:`namedWindow <>`)
* Display an image in an OpenCV window (using :imshow:`imshow <>`)
@ -17,7 +17,7 @@ In this tutorial you will learn how to:
Source Code
===========
Download the source code from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp>`_.
Download the source code from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp>`_.
.. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
:language: cpp
@ -29,7 +29,7 @@ Explanation
In OpenCV 2 we have multiple modules. Each one takes care of a different area or approach towards image processing. You could already observe this in the structure of the user guide of these tutorials itself. Before you use any of them you first need to include the header files where the content of each individual module is declared.
You'll almost always end up using the:
You'll almost always end up using the:
.. container:: enumeratevisibleitemswithsquare
@ -75,23 +75,23 @@ Now we call the :imread:`imread <>` function which loads the image name specifie
:tab-width: 4
:lines: 17
.. note::
.. note::
OpenCV offers support for the image formats Windows bitmap (bmp), portable image formats (pbm, pgm, ppm) and Sun raster (sr, ras). With help of plugins (you need to specify to use them if you build yourself the library, nevertheless in the packages we ship present by default) you may also load image formats like JPEG (jpeg, jpg, jpe), JPEG 2000 (jp2 - codenamed in the CMake as Jasper), TIFF files (tiff, tif) and portable network graphics (png). Furthermore, OpenEXR is also a possibility.
OpenCV offers support for the image formats Windows bitmap (bmp), portable image formats (pbm, pgm, ppm) and Sun raster (sr, ras). With help of plugins (you need to specify to use them if you build yourself the library, nevertheless in the packages we ship present by default) you may also load image formats like JPEG (jpeg, jpg, jpe), JPEG 2000 (jp2 - codenamed in the CMake as Jasper), TIFF files (tiff, tif) and portable network graphics (png). Furthermore, OpenEXR is also a possibility.
After checking that the image data was loaded correctly, we want to display our image, so we create an OpenCV window using the :named_window:`namedWindow <>` function. These are automatically managed by OpenCV once you create them. For this you need to specify its name and how it should handle the change of the image it contains from a size point of view. It may be:
After checking that the image data was loaded correctly, we want to display our image, so we create an OpenCV window using the :named_window:`namedWindow <>` function. These are automatically managed by OpenCV once you create them. For this you need to specify its name and how it should handle the change of the image it contains from a size point of view. It may be:
.. container:: enumeratevisibleitemswithsquare
+ *CV_WINDOW_AUTOSIZE* is the only supported one if you do not use the Qt backend. In this case the window size will take up the size of the image it shows. No resize permitted!
+ *CV_WINDOW_NORMAL* on Qt you may use this to allow window resize. The image will resize itself according to the current window size. By using the | operator you also need to specify if you would like the image to keep its aspect ratio (*CV_WINDOW_KEEPRATIO*) or not (*CV_WINDOW_FREERATIO*).
+ *CV_WINDOW_AUTOSIZE* is the only supported one if you do not use the Qt backend. In this case the window size will take up the size of the image it shows. No resize permitted!
+ *CV_WINDOW_NORMAL* on Qt you may use this to allow window resize. The image will resize itself according to the current window size. By using the | operator you also need to specify if you would like the image to keep its aspect ratio (*CV_WINDOW_KEEPRATIO*) or not (*CV_WINDOW_FREERATIO*).
.. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
:language: cpp
:lines: 25
:tab-width: 4
Finally, to update the content of the OpenCV window with a new image use the :imshow:`imshow <>` function. Specify the OpenCV window name to update and the image to use during this operation:
Finally, to update the content of the OpenCV window with a new image use the :imshow:`imshow <>` function. Specify the OpenCV window name to update and the image to use during this operation:
.. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
:language: cpp
@ -110,7 +110,7 @@ Result
.. container:: enumeratevisibleitemswithsquare
* Compile your code and then run the executable giving an image path as argument. If you're on Windows the executable will of course contain an *exe* extension too. Of course assure the image file is near your program file.
* Compile your code and then run the executable giving an image path as argument. If you're on Windows the executable will of course contain an *exe* extension too. Of course assure the image file is near your program file.
.. code-block:: bash
@ -120,7 +120,7 @@ Result
.. image:: images/Display_Image_Tutorial_Result.jpg
:alt: Display Image Tutorial - Final Result
:align: center
:align: center
.. raw:: html

File diff suppressed because one or more lines are too long

View File

@ -29,7 +29,7 @@ Here you can read tutorials about how to set up your computer to work with the O
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|Usage_1| **Title:** :ref:`Linux_GCC_Usage`
@ -47,7 +47,7 @@ Here you can read tutorials about how to set up your computer to work with the O
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|Usage_2| **Title:** :ref:`Linux_Eclipse_Usage`
@ -67,7 +67,7 @@ Here you can read tutorials about how to set up your computer to work with the O
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|WinInstal| **Title:** :ref:`Windows_Installation`
@ -85,7 +85,7 @@ Here you can read tutorials about how to set up your computer to work with the O
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|WinVSHowT| **Title:** :ref:`Windows_Visual_Studio_How_To`
@ -93,7 +93,7 @@ Here you can read tutorials about how to set up your computer to work with the O
*Author:* |Author_BernatG|
You will learn what steps you need to perform in order to use the OpenCV library inside a new Microsoft Visual Studio project.
You will learn what steps you need to perform in order to use the OpenCV library inside a new Microsoft Visual Studio project.
=========== ======================================================
@ -105,8 +105,8 @@ Here you can read tutorials about how to set up your computer to work with the O
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
================ ======================================================
================ =================================================
|AndroidBinPack| **Title:** :ref:`Android_Binary_Package`
*Compatibility:* > OpenCV 2.3.1
@ -115,17 +115,13 @@ Here you can read tutorials about how to set up your computer to work with the O
You will learn how to setup OpenCV for Android platform!
================ ======================================================
.. |AndroidBinPack| image:: images/android_logo.png
:height: 90pt
:width: 90pt
================ =================================================
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
================ ======================================================
|AndroidNDKPack| **Title:** :ref:`Android_Binary_Package_with_NDK`
================ =================================================
|AndroidBinPack| **Title:** :ref:`Android_Binary_Package_with_NDK`
*Compatibility:* > OpenCV 2.3.1
@ -133,33 +129,83 @@ Here you can read tutorials about how to set up your computer to work with the O
You will learn how to work with C++ OpenCV code for Android platform
================ ======================================================
================ =================================================
.. |AndroidNDKPack| image:: images/android_logo.png
.. |AndroidBinPack| image:: images/android_logo.png
:height: 90pt
:width: 90pt
* **Android** tutorials v2 [in progress]
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
================ =================================================
|AndroidLogo| **Title:** :ref:`Android_Dev_Intro`
*Compatibility:* > OpenCV 2.4.2
*Author:* |Author_VsevolodG|
Not a tutorial, but a guide introducing Android development basics and environment setup
================ =================================================
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
================ =================================================
|AndroidLogo| **Title:** :ref:`O4A_SDK`
*Compatibility:* > OpenCV 2.4.2
*Author:* |Author_VsevolodG|
OpenCV4Android SDK: general info, installation, running samples
================ =================================================
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
================ =================================================
|AndroidLogo| **Title:** :ref:`dev_with_OCV_on_Android`
*Compatibility:* > OpenCV 2.4.2
*Author:* |Author_VsevolodG|
Development with OpenCV4Android SDK
================ =================================================
.. |AndroidLogo| image:: images/android_logo.png
:height: 90pt
:width: 90pt
* **iOS**
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|Install_2| **Title:** :ref:`iOS-Installation`
=========== ======================================================
|Install_2| **Title:** :ref:`iOS-Installation`
*Compatibility:* > OpenCV 2.3.1
*Compatibility:* > OpenCV 2.3.1
*Author:* |Author_ArtemM|
*Author:* |Author_ArtemM|
We will learn how to setup OpenCV for using it in iOS!
We will learn how to setup OpenCV for using it in iOS!
=========== ======================================================
=========== ======================================================
.. |Install_2| image:: images/ios4_logo.jpg
:width: 90pt
.. |Install_2| image:: images/ios4_logo.jpg
:width: 90pt
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
============= ======================================================
|Beginners_1| **Title:** :ref:`Display_Image`
@ -172,36 +218,39 @@ Here you can read tutorials about how to set up your computer to work with the O
============= ======================================================
.. |Beginners_1| image:: images/Display_Image_Tutorial_Result.jpg
:height: 90pt
:width: 90pt
.. |Beginners_1| image:: images/Display_Image_Tutorial_Result.jpg
:height: 90pt
:width: 90pt
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
.. cssclass:: toctableopencv
=============== ======================================================
|Beginners_2| **Title:** :ref:`Load_Save_Image`
*Compatibility:* > OpenCV 2.0
*Compatibility:* > OpenCV 2.0
*Author:* |Author_AnaH|
*Author:* |Author_AnaH|
We will learn how to save an Image in OpenCV...plus a small conversion to grayscale
=============== ======================================================
.. |Beginners_2| image:: images/Load_Save_Image_Result_1.jpg
:height: 90pt
:width: 90pt
.. |Beginners_2| image:: images/Load_Save_Image_Result_1.jpg
:height: 90pt
:width: 90pt
* **Want to contribute, and see your own work between the OpenCV tutorials?**
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=============== ======================================================
|HowToWriteT| **Title:** :ref:`howToWriteTutorial`
*Compatibility:* > OpenCV 1.0
*Author:* |Author_BernatG|
*Author:* |Author_BernatG|
If you already have a good grasp on using OpenCV and have made some projects that would be perfect presenting an OpenCV feature not yet part of these tutorials, here it is what you need to know.
@ -217,7 +266,7 @@ Here you can read tutorials about how to set up your computer to work with the O
.. We use a custom table of content format and as the table of content only imforms Sphinx about the hierarchy of the files, no need to show it.
.. toctree::
:hidden:
:hidden:
../linux_install/linux_install
../linux_gcc_cmake/linux_gcc_cmake
@ -226,6 +275,9 @@ Here you can read tutorials about how to set up your computer to work with the O
../windows_visual_studio_Opencv/windows_visual_studio_Opencv
../android_binary_package/android_binary_package
../android_binary_package/android_binary_package_using_with_NDK
../android_binary_package/android_dev_intro
../android_binary_package/O4A_SDK
../android_binary_package/dev_with_OCV_on_Android
../ios_install/ios_install
../display_image/display_image
../load_save_image/load_save_image

View File

@ -14,7 +14,7 @@ In this tutorial you will learn how to:
* :cascade_classifier_load:`load <>` to load a .xml classifier file. It can be either a Haar or a LBP classifer
* :cascade_classifier_detect_multiscale:`detectMultiScale <>` to perform the detection.
Theory
======
@ -22,9 +22,9 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp>`_ . The second version (using LBP for face detection) can be `found here <http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp>`_ . The second version (using LBP for face detection) can be `found here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp>`_
.. code-block:: cpp
.. code-block:: cpp
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -56,7 +56,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
@ -64,15 +64,15 @@ This tutorial code's is shown lines below. You can also download it from `here <
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
if( (char)c == 'c' ) { break; }
}
}
return 0;
@ -103,11 +103,11 @@ This tutorial code's is shown lines below. You can also download it from `here <
for( int j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
}
//-- Show what you got
imshow( window_name, frame );
}
@ -124,11 +124,11 @@ Result
:align: center
:height: 300pt
Remember to copy the files *haarcascade_frontalface_alt.xml* and *haarcascade_eye_tree_eyeglasses.xml* in your current directory. They are located in *opencv/data/haarcascades*
Remember to copy the files *haarcascade_frontalface_alt.xml* and *haarcascade_eye_tree_eyeglasses.xml* in your current directory. They are located in *opencv/data/haarcascades*
#. This is the result of using the file *lbpcascade_frontalface.xml* (LBP trained) for the face detection. For the eyes we keep using the file used in the tutorial.
#. This is the result of using the file *lbpcascade_frontalface.xml* (LBP trained) for the face detection. For the eyes we keep using the file used in the tutorial.
.. image:: images/Cascade_Classifier_Tutorial_Result_LBP.jpg
:align: center
:height: 300pt
:height: 300pt

View File

@ -2,7 +2,7 @@
OpenCV Tutorials
################
The following links describe a set of basic OpenCV tutorials. All the source code mentioned here is provide as part of the OpenCV regular releases, so check before you start copy & pasting the code. The list of tutorials below is automatically generated from reST files located in our SVN repository.
The following links describe a set of basic OpenCV tutorials. All the source code mentioned here is provide as part of the OpenCV regular releases, so check before you start copy & pasting the code. The list of tutorials below is automatically generated from reST files located in our GIT repository.
As always, we would be happy to hear your comments and receive your contributions on any tutorial.
@ -10,12 +10,12 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|Introduct| You will learn how to setup OpenCV on your computer!
=========== =======================================================
.. |Introduct| image:: images/introduction.jpg
:height: 80pt
:width: 80pt
@ -25,12 +25,12 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|Core| Here you will learn the about the basic building blocks of the library. A must read and know for understanding how to manipulate the images on a pixel level.
=========== =======================================================
.. |Core| image:: images/core.jpg
:height: 80pt
:width: 80pt
@ -40,12 +40,12 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|ImgProc| In this section you will learn about the image processing (manipulation) functions inside OpenCV.
=========== =======================================================
.. |ImgProc| image:: images/imgproc.jpg
:height: 80pt
:width: 80pt
@ -55,12 +55,12 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|HighGui| This section contains valuable tutorials about how to read/save your image/video files and how to use the built-in graphical user interface of the library.
|HighGui| This section contains valuable tutorials about how to read/save your image/video files and how to use the built-in graphical user interface of the library.
=========== =======================================================
.. |HighGui| image:: images/highgui.jpg
:height: 80pt
:width: 80pt
@ -70,12 +70,12 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|Calib3D| Although we got most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out from the 2D images information about the 3D world.
|Calib3D| Although we got most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out from the 2D images information about the 3D world.
=========== =======================================================
.. |Calib3D| image:: images/calib3d.jpg
:height: 80pt
:width: 80pt
@ -85,27 +85,27 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|Featur2D| Learn about how to use the feature points detectors, descriptors and matching framework found inside OpenCV.
=========== =======================================================
.. |Featur2D| image:: images/feature2D.jpg
:height: 80pt
:width: 80pt
:alt: feature2D Icon
* :ref:`Table-Of-Content-Video`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|Video| Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
|Video| Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
=========== =======================================================
.. |Video| image:: images/video.jpg
:height: 80pt
:width: 80pt
@ -115,27 +115,27 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|ObjDetect| Ever wondered how your digital camera detects peoples and faces? Look here to find out!
=========== =======================================================
.. |ObjDetect| image:: images/objdetect.jpg
:height: 80pt
:width: 80pt
:alt: objdetect Icon
* :ref:`Table-Of-Content-Ml`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|ml| Use the powerfull machine learning classes for statistical classification, regression and clustering of data.
=========== =======================================================
.. |ml| image:: images/ml.jpg
:height: 80pt
:width: 80pt
@ -145,12 +145,12 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|GPU| Squeeze out every little computation power from your system by using the power of your video card to run the OpenCV algorithms.
|GPU| Squeeze out every little computation power from your system by using the power of your video card to run the OpenCV algorithms.
=========== =======================================================
.. |GPU| image:: images/gpu.jpg
:height: 80pt
:width: 80pt
@ -160,12 +160,12 @@ As always, we would be happy to hear your comments and receive your contribution
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== =======================================================
|General| These tutorials are the bottom of the iceberg as they link together multiple of the modules presented above in order to solve complex problems.
=========== =======================================================
.. |General| image:: images/general.jpg
:height: 80pt
:width: 80pt

View File

@ -15,7 +15,7 @@ In order to use depth sensor with OpenCV you should do the following preliminary
Install OpenNI library (from here http://www.openni.org/downloadfiles) and PrimeSensor Module for OpenNI (from here https://github.com/avin2/SensorKinect). The installation should be done to default folders listed in the instructions of these products, e.g.:
.. code-block:: text
OpenNI:
Linux & MacOSX:
Libs into: /usr/lib
@ -30,7 +30,7 @@ In order to use depth sensor with OpenCV you should do the following preliminary
Bins into: c:/Program Files/Prime Sense/Sensor/Bin
If one or both products were installed to the other folders, the user should change corresponding CMake variables ``OPENNI_LIB_DIR``, ``OPENNI_INCLUDE_DIR`` or/and ``OPENNI_PRIME_SENSOR_MODULE_BIN_DIR``.
#.
Configure OpenCV with OpenNI support by setting ``WITH_OPENNI`` flag in CMake. If OpenNI is found in install folders OpenCV will be built with OpenNI library (see a status ``OpenNI`` in CMake log) whereas PrimeSensor Modules can not be found (see a status ``OpenNI PrimeSensor Modules`` in CMake log). Without PrimeSensor module OpenCV will be successfully compiled with OpenNI library, but ``VideoCapture`` object will not grab data from Kinect sensor.
@ -56,9 +56,9 @@ In order to get depth map from depth sensor use ``VideoCapture::operator >>``, e
VideoCapture capture( CV_CAP_OPENNI );
for(;;)
{
Mat depthMap;
Mat depthMap;
capture >> depthMap;
if( waitKey( 30 ) >= 0 )
break;
}
@ -70,19 +70,19 @@ For getting several data maps use ``VideoCapture::grab`` and ``VideoCapture::ret
{
Mat depthMap;
Mat rgbImage
capture.grab();
capture.retrieve( depthMap, OPENNI_DEPTH_MAP );
capture.retrieve( bgrImage, OPENNI_BGR_IMAGE );
if( waitKey( 30 ) >= 0 )
break;
}
For setting and getting some property of sensor` data generators use ``VideoCapture::set`` and ``VideoCapture::get`` methods respectively, e.g. ::
VideoCapture capture( CV_CAP_OPENNI );
VideoCapture capture( CV_CAP_OPENNI );
capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
cout << "FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
@ -100,34 +100,34 @@ Some depth sensors (for example XtionPRO) do not have image generator. In order
Flags specifing the needed generator type must be used in combination with particular generator property. The following properties of cameras available through OpenNI interfaces are supported:
*
*
For image generator:
- ``CV_CAP_PROP_OPENNI_OUTPUT_MODE`` -- Three output modes are supported: ``CV_CAP_OPENNI_VGA_30HZ`` used by default (image generator returns images in VGA resolution with 30 FPS), ``CV_CAP_OPENNI_SXGA_15HZ`` (image generator returns images in SXGA resolution with 15 FPS) and ``CV_CAP_OPENNI_SXGA_30HZ`` (image generator returns images in SXGA resolution with 30 FPS, the mode is supported by XtionPRO Live); depth generator's maps are always in VGA resolution.
*
- ``CV_CAP_PROP_OPENNI_OUTPUT_MODE`` -- Three output modes are supported: ``CV_CAP_OPENNI_VGA_30HZ`` used by default (image generator returns images in VGA resolution with 30 FPS), ``CV_CAP_OPENNI_SXGA_15HZ`` (image generator returns images in SXGA resolution with 15 FPS) and ``CV_CAP_OPENNI_SXGA_30HZ`` (image generator returns images in SXGA resolution with 30 FPS, the mode is supported by XtionPRO Live); depth generator's maps are always in VGA resolution.
*
For depth generator:
- ``CV_CAP_PROP_OPENNI_REGISTRATION`` -- Flag that registers the remapping depth map to image map by changing depth generator's view point (if the flag is ``"on"``) or sets this view point to its normal one (if the flag is ``"off"``). The registration processs resulting images are pixel-aligned,which means that every pixel in the image is aligned to a pixel in the depth image.
Next properties are available for getting only:
- ``CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH`` -- A maximum supported depth of Kinect in mm.
- ``CV_CAP_PROP_OPENNI_BASELINE`` -- Baseline value in mm.
- ``CV_CAP_PROP_OPENNI_FOCAL_LENGTH`` -- A focal length in pixels.
- ``CV_CAP_PROP_OPENNI_BASELINE`` -- Baseline value in mm.
- ``CV_CAP_PROP_OPENNI_FOCAL_LENGTH`` -- A focal length in pixels.
- ``CV_CAP_PROP_FRAME_WIDTH`` -- Frame width in pixels.
- ``CV_CAP_PROP_FRAME_HEIGHT`` -- Frame height in pixels.
- ``CV_CAP_PROP_FPS`` -- Frame rate in FPS.
*
Some typical flags combinations "generator type + property" are defined as single flags:
- ``CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE``
- ``CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE``
- ``CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH``
- ``CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION``
For more information please refer to the example of usage openni_capture.cpp_ in ``opencv/samples/cpp`` folder.
.. _openni_capture.cpp: http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/openni_capture.cpp
.. _openni_capture.cpp: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/openni_capture.cpp

View File

@ -5,12 +5,12 @@ The built framework is universal, it can be used to build app and run it on eith
Usage:
./build_framework.py <outputdir>
By cmake conventions (and especially if you work with OpenCV SVN repository),
By cmake conventions (and especially if you work with OpenCV repository),
the output dir should not be a subdirectory of OpenCV source tree.
Script will create <outputdir>, if it's missing, and a few its subdirectories:
<outputdir>
build/
iPhoneOS/
@ -29,7 +29,7 @@ import glob, re, os, os.path, shutil, string, sys
def build_opencv(srcroot, buildroot, target):
"builds OpenCV for device or simulator"
builddir = os.path.join(buildroot, target)
if not os.path.isdir(builddir):
os.makedirs(builddir)
@ -46,23 +46,23 @@ def build_opencv(srcroot, buildroot, target):
os.system("cmake %s ." % (cmakeargs,))
else:
os.system("cmake %s %s" % (cmakeargs, srcroot))
for wlib in [builddir + "/modules/world/UninstalledProducts/libopencv_world.a",
builddir + "/lib/Release/libopencv_world.a"]:
if os.path.isfile(wlib):
os.remove(wlib)
os.system("xcodebuild -parallelizeTargets -jobs 8 -sdk %s -configuration Release -target ALL_BUILD" % target.lower())
os.system("xcodebuild -sdk %s -configuration Release -target install install" % target.lower())
os.chdir(currdir)
def put_framework_together(srcroot, dstroot):
"constructs the framework directory after all the targets are built"
# find the list of targets (basically, ["iPhoneOS", "iPhoneSimulator"])
targetlist = glob.glob(os.path.join(dstroot, "build", "*"))
targetlist = [os.path.basename(t) for t in targetlist]
# set the current dir to the dst root
currdir = os.getcwd()
framework_dir = dstroot + "/opencv2.framework"
@ -70,7 +70,7 @@ def put_framework_together(srcroot, dstroot):
shutil.rmtree(framework_dir)
os.makedirs(framework_dir)
os.chdir(framework_dir)
# determine OpenCV version (without subminor part)
tdir0 = "../build/" + targetlist[0]
cfg = open(tdir0 + "/cvconfig.h", "rt")
@ -79,18 +79,18 @@ def put_framework_together(srcroot, dstroot):
opencv_version = l[l.find("\"")+1:l.rfind(".")]
break
cfg.close()
# form the directory tree
dstdir = "Versions/A"
os.makedirs(dstdir + "/Resources")
# copy headers
shutil.copytree(tdir0 + "/install/include/opencv2", dstdir + "/Headers")
# make universal static lib
wlist = " ".join(["../build/" + t + "/lib/Release/libopencv_world.a" for t in targetlist])
os.system("lipo -create " + wlist + " -o " + dstdir + "/opencv2")
# form Info.plist
srcfile = open(srcroot + "/ios/Info.plist.in", "rt")
dstfile = open(dstdir + "/Resources/Info.plist", "wt")
@ -98,29 +98,29 @@ def put_framework_together(srcroot, dstroot):
dstfile.write(l.replace("${VERSION}", opencv_version))
srcfile.close()
dstfile.close()
# copy cascades
# TODO ...
# make symbolic links
os.symlink(dstdir + "/Headers", "Headers")
os.symlink(dstdir + "/Resources", "Resources")
os.symlink(dstdir + "/opencv2", "opencv2")
os.symlink("A", "Versions/Current")
def build_framework(srcroot, dstroot):
"main function to do all the work"
for target in ["iPhoneOS", "iPhoneSimulator"]:
build_opencv(srcroot, os.path.join(dstroot, "build"), target)
put_framework_together(srcroot, dstroot)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage:\n\t./build_framework.py <outputdir>\n\n"
sys.exit(0)
build_framework(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")), os.path.abspath(sys.argv[1]))

View File

@ -67,11 +67,12 @@ StereoVar::~StereoVar()
static Mat diffX(Mat &src)
{
register int x, y, cols = src.cols - 1;
int cols = src.cols - 1;
Mat dst(src.size(), src.type());
for(y = 0; y < src.rows; y++){
for(int y = 0; y < src.rows; y++){
const float* pSrc = src.ptr<float>(y);
float* pDst = dst.ptr<float>(y);
int x = 0;
#if CV_SSE2
for (x = 0; x <= cols - 8; x += 8) {
__m128 a0 = _mm_loadu_ps(pSrc + x);

View File

@ -2446,6 +2446,6 @@ The above methods are usually enough for users. If you want to make your own alg
* Make a class and specify ``Algorithm`` as its base class.
* The algorithm parameters should be the class members. See ``Algorithm::get()`` for the list of possible types of the parameters.
* Add public virtual method ``AlgorithmInfo* info() const;`` to your class.
* Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take http://code.opencv.org/svn/opencv/trunk/opencv/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters.
* Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take http://code.opencv.org/projects/opencv/repository/revisions/master/entry/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters.
* Add some public function (e.g. ``initModule_<mymodule>()``) that calls info() of your algorithm and put it into the same source file as ``info()`` implementation. This is to force C++ linker to include this object file into the target application. See ``Algorithm::create()`` for details.

View File

@ -1024,7 +1024,7 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
__m128 s0 = _mm_or_ps(t0, t1);
__m128 det =_mm_set1_ps((float)d);
s0 = _mm_mul_ps(s0, det);
const uchar CV_DECL_ALIGNED(16) inv[16] = {0,0,0,0,0,0,0,0x80,0,0,0,0x80,0,0,0,0};
static const uchar CV_DECL_ALIGNED(16) inv[16] = {0,0,0,0,0,0,0,0x80,0,0,0,0x80,0,0,0,0};
__m128 pattern = _mm_load_ps((const float*)inv);
s0 = _mm_xor_ps(s0, pattern);//==-1*s0
s0 = _mm_shuffle_ps(s0, s0, _MM_SHUFFLE(0,2,1,3));
@ -1064,7 +1064,7 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
__m128d det = _mm_load1_pd((const double*)&d);
sm = _mm_mul_pd(sm, det);
uchar CV_DECL_ALIGNED(16) inv[8] = {0,0,0,0,0,0,0,0x80};
static const uchar CV_DECL_ALIGNED(16) inv[8] = {0,0,0,0,0,0,0,0x80};
__m128d pattern = _mm_load1_pd((double*)inv);
ss = _mm_mul_pd(ss, det);
ss = _mm_xor_pd(ss, pattern);//==-1*ss
@ -1097,24 +1097,66 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
double d = det3(Sf);
if( d != 0. )
{
float CV_DECL_ALIGNED(16) t[12];
result = true;
d = 1./d;
float t[9];
t[0] = (float)(((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d);
t[1] = (float)(((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d);
t[2] = (float)(((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d);
t[3] = (float)(((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d);
t[4] = (float)(((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d);
t[5] = (float)(((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d);
t[6] = (float)(((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d);
t[7] = (float)(((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d);
t[8] = (float)(((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d);
#if CV_SSE2
if(USE_SSE2)
{
__m128 det =_mm_set1_ps((float)d);
__m128 s0 = _mm_loadu_ps((const float*)srcdata);//s0 = Sf(0,0) Sf(0,1) Sf(0,2) ***
__m128 s1 = _mm_loadu_ps((const float*)(srcdata+srcstep));//s1 = Sf(1,0) Sf(1,1) Sf(1,2) ***
__m128 s2 = _mm_set_ps(0.f, Sf(2,2), Sf(2,1), Sf(2,0)); //s2 = Sf(2,0) Sf(2,1) Sf(2,2) ***
Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2];
Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5];
Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8];
__m128 r0 = _mm_shuffle_ps(s1,s1,_MM_SHUFFLE(3,0,2,1)); //r0 = Sf(1,1) Sf(1,2) Sf(1,0) ***
__m128 r1 = _mm_shuffle_ps(s2,s2,_MM_SHUFFLE(3,1,0,2)); //r1 = Sf(2,2) Sf(2,0) Sf(2,1) ***
__m128 r2 = _mm_shuffle_ps(s2,s2,_MM_SHUFFLE(3,0,2,1)); //r2 = Sf(2,1) Sf(2,2) Sf(2,0) ***
__m128 t0 = _mm_mul_ps(s0, r0);//t0 = Sf(0,0)*Sf(1,1) Sf(0,1)*Sf(1,2) Sf(0,2)*Sf(1,0) ***
__m128 t1 = _mm_mul_ps(s0, r1);//t1 = Sf(0,0)*Sf(2,2) Sf(0,1)*Sf(2,0) Sf(0,2)*Sf(2,1) ***
__m128 t2 = _mm_mul_ps(s1, r2);//t2 = Sf(1,0)*Sf(2,1) Sf(1,1)*Sf(2,2) Sf(1,2)*Sf(2,0) ***
__m128 r3 = _mm_shuffle_ps(s0,s0,_MM_SHUFFLE(3,0,2,1));//r3 = Sf(0,1) Sf(0,2) Sf(0,0) ***
__m128 r4 = _mm_shuffle_ps(s0,s0,_MM_SHUFFLE(3,1,0,2));//r4 = Sf(0,2) Sf(0,0) Sf(0,1) ***
__m128 t00 = _mm_mul_ps(s1, r3);//t00 = Sf(1,0)*Sf(0,1) Sf(1,1)*Sf(0,2) Sf(1,2)*Sf(0,0) ***
__m128 t11 = _mm_mul_ps(s2, r4);//t11 = Sf(2,0)*Sf(0,2) Sf(2,1)*Sf(0,0) Sf(2,2)*Sf(0,1) ***
__m128 t22 = _mm_mul_ps(s2, r0);//t22 = Sf(2,0)*Sf(1,1) Sf(2,1)*Sf(1,2) Sf(2,2)*Sf(1,0) ***
t0 = _mm_mul_ps(_mm_sub_ps(t0,t00), det);//Sf(0,0)*Sf(1,1) Sf(0,1)*Sf(1,2) Sf(0,2)*Sf(1,0) ***
//-Sf(1,0)*Sf(0,1) -Sf(1,1)*Sf(0,2) -Sf(1,2)*Sf(0,0)
t1 = _mm_mul_ps(_mm_sub_ps(t1,t11), det);//Sf(0,0)*Sf(2,2) Sf(0,1)*Sf(2,0) Sf(0,2)*Sf(2,1) ***
//-Sf(2,0)*Sf(0,2) -Sf(2,1)*Sf(0,0) -Sf(2,2)*Sf(0,1)
t2 = _mm_mul_ps(_mm_sub_ps(t2,t22), det);//Sf(1,0)*Sf(2,1) Sf(1,1)*Sf(2,2) Sf(1,2)*Sf(2,0) ***
//-Sf(2,0)*Sf(1,1) -Sf(2,1)*Sf(1,2) -Sf(2,2)*Sf(1,0)
_mm_store_ps(t, t0);
_mm_store_ps(t+4, t1);
_mm_store_ps(t+8, t2);
Df(0,0) = t[9]; Df(0,1) = t[6]; Df(0,2) = t[1];
Df(1,0) = t[10]; Df(1,1) = t[4]; Df(1,2) = t[2];
Df(2,0) = t[8]; Df(2,1) = t[5]; Df(2,2) = t[0];
}
else
#endif
{
t[0] = (float)(((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d);
t[1] = (float)(((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d);
t[2] = (float)(((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d);
t[3] = (float)(((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d);
t[4] = (float)(((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d);
t[5] = (float)(((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d);
t[6] = (float)(((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d);
t[7] = (float)(((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d);
t[8] = (float)(((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d);
Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2];
Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5];
Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8];
}
}
}
else

View File

@ -148,7 +148,7 @@ Wrapping class for feature detection using the
class FastFeatureDetector : public FeatureDetector
{
public:
FastFeatureDetector( int threshold=1, bool nonmaxSuppression=true );
FastFeatureDetector( int threshold=1, bool nonmaxSuppression=true, type=FastFeatureDetector::TYPE_9_16 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:

View File

@ -7,7 +7,7 @@ FAST
--------
Detects corners using the FAST algorithm
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true )
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true, type=FastFeatureDetector::TYPE_9_16 )
:param image: Image where keypoints (corners) are detected.
@ -17,6 +17,8 @@ Detects corners using the FAST algorithm
:param nonmaxSupression: If it is true, non-maximum suppression is applied to detected corners (keypoints).
:param type: one of the three neighborhoods as defined in the paper: ``FastFeatureDetector::TYPE_9_16``, ``FastFeatureDetector::TYPE_7_12``, ``FastFeatureDetector::TYPE_5_8``
Detects corners using the FAST algorithm by [Rosten06]_.
.. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006.

View File

@ -9,16 +9,16 @@ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ -350,7 +350,7 @@ int cornerScore<8>(const uchar* ptr, const int pixel[], int threshold)
}
int b0 = -a0;
for( k = 0; k < 12; k += 2 )
for( k = 0; k < 8; k += 2 )
{
int b = std::max((int)d[k+1], (int)d[k+2]);
b = std::max(b, (int)d[k+3]);
@ -375,7 +375,10 @@ template<int patternSize>
void FAST_t(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression)
{
Mat img = _img.getMat();
const int K = patternSize/2, N = patternSize + K + 1, quarterPatternSize = patternSize/4;
const int K = patternSize/2, N = patternSize + K + 1;
#if CV_SSE2
const int quarterPatternSize = patternSize/4;
#endif
int i, j, k, pixel[25];
makeOffsets(pixel, (int)img.step, patternSize);
for(k = patternSize; k < 25; k++)
@ -585,7 +588,7 @@ FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppressio
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression, int _type )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type(_type)
{}
void FastFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
{
Mat grayImage = image;

View File

@ -42,7 +42,7 @@ You can always determine at runtime whether the OpenCV GPU-built binaries (or PT
Utilizing Multiple GPUs
-----------------------
In the current version, each of the OpenCV GPU algorithms can use only a single GPU. So, to utilize multiple GPUs, you have to manually distribute the work between GPUs.
In the current version, each of the OpenCV GPU algorithms can use only a single GPU. So, to utilize multiple GPUs, you have to manually distribute the work between GPUs.
Switching active devie can be done using :ocv:func:`gpu::setDevice()` function. For more details please read Cuda C Programing Guide.
While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions and small images, it can be significant, which may eliminate all the advantages of having multiple GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo Block Matching algorithm has been successfully parallelized using the following algorithm:
@ -59,5 +59,5 @@ While developing algorithms for multiple GPUs, note a data passing overhead. For
With this algorithm, a dual GPU gave a 180
%
performance increase comparing to the single Fermi GPU. For a source code example, see
http://code.opencv.org/svn/opencv/trunk/opencv/samples/gpu/.
http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/gpu/.

View File

@ -324,9 +324,9 @@ Class used for background/foreground segmentation. ::
std::vector< std::vector<cv::Point> > foreground_regions;
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [FGD2003]_.
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [FGD2003]_.
The results are available through the class fields:
The results are available through the class fields:
.. ocv:member:: cv::gpu::GpuMat background
@ -489,9 +489,9 @@ Gaussian Mixture-based Background/Foreground Segmentation Algorithm. ::
...
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2004]_.
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2004]_.
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
.. ocv:member:: float backgroundRatio

View File

@ -917,6 +917,12 @@ CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTra
GpuMat& labels,
GpuMat& buf, Stream& stream = Stream::Null());
//! compute mask for Generalized Flood fill componetns labeling.
CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null());
//! performs connected componnents labeling.
CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, Stream& stream = Stream::Null());
////////////////////////////////// Histograms //////////////////////////////////
//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type.

View File

@ -1148,6 +1148,9 @@ GPU_PERF_TEST(CvtColor, cv::gpu::DeviceInfo, cv::Size, MatDepth, CvtColorInfo)
cv::gpu::GpuMat src(src_host);
cv::gpu::GpuMat dst;
if (info.code >= cv::COLOR_BayerBG2BGR && info.code <= cv::COLOR_BayerGR2BGR)
info.dcn = 4;
cv::gpu::cvtColor(src, dst, info.code, info.dcn);
TEST_CYCLE()
@ -1172,7 +1175,20 @@ INSTANTIATE_TEST_CASE_P(ImgProc, CvtColor, testing::Combine(
CvtColorInfo(3, 3, cv::COLOR_BGR2HSV),
CvtColorInfo(3, 3, cv::COLOR_HSV2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2HLS),
CvtColorInfo(3, 3, cv::COLOR_HLS2BGR))));
CvtColorInfo(3, 3, cv::COLOR_HLS2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2Lab),
CvtColorInfo(3, 3, cv::COLOR_RGB2Lab),
CvtColorInfo(3, 3, cv::COLOR_BGR2Luv),
CvtColorInfo(3, 3, cv::COLOR_RGB2Luv),
CvtColorInfo(3, 3, cv::COLOR_Lab2BGR),
CvtColorInfo(3, 3, cv::COLOR_Lab2RGB),
CvtColorInfo(3, 3, cv::COLOR_Luv2BGR),
CvtColorInfo(3, 3, cv::COLOR_Luv2RGB),
CvtColorInfo(1, 3, cv::COLOR_BayerBG2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerGB2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerRG2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerGR2BGR),
CvtColorInfo(4, 4, cv::COLOR_RGBA2mRGBA))));
//////////////////////////////////////////////////////////////////////
// SwapChannels

View File

@ -65,19 +65,19 @@ void PrintTo(const CvtColorInfo& info, ostream* os)
"BGR2HSV",
"RGB2HSV",
0,
0,
"",
"",
0,
0,
"BGR2Lab",
"RGB2Lab",
0,
0,
0,
0,
"BayerBG2BGR",
"BayerGB2BGR",
"BayerRG2BGR",
"BayerGR2BGR",
0,
0,
"BGR2Luv",
"RGB2Luv",
"BGR2HLS",
"RGB2HLS",
@ -85,18 +85,18 @@ void PrintTo(const CvtColorInfo& info, ostream* os)
"HSV2BGR",
"HSV2RGB",
0,
0,
0,
0,
"Lab2BGR",
"Lab2RGB",
"Luv2BGR",
"Luv2RGB",
"HLS2BGR",
"HLS2RGB",
0,
0,
0,
0,
"BayerBG2BGR_VNG",
"BayerGB2BGR_VNG",
"BayerRG2BGR_VNG",
"BayerGR2BGR_VNG",
"BGR2HSV_FULL",
"RGB2HSV_FULL",
@ -108,30 +108,78 @@ void PrintTo(const CvtColorInfo& info, ostream* os)
"HLS2BGR_FULL",
"HLS2RGB_FULL",
0,
0,
0,
0,
"LBGR2Lab",
"LRGB2Lab",
"LBGR2Luv",
"LRGB2Luv",
0,
0,
0,
0,
"Lab2LBGR",
"Lab2LRGB",
"Luv2LBGR",
"Luv2LRGB",
"BGR2YUV",
"RGB2YUV",
"YUV2BGR",
"YUV2RGB",
0,
0,
0,
0,
"BayerBG2GRAY",
"BayerGB2GRAY",
"BayerRG2GRAY",
"BayerGR2GRAY",
0,
0,
0,
0
//YUV 4:2:0 formats family
"YUV2RGB_NV12",
"YUV2BGR_NV12",
"YUV2RGB_NV21",
"YUV2BGR_NV21",
"YUV2RGBA_NV12",
"YUV2BGRA_NV12",
"YUV2RGBA_NV21",
"YUV2BGRA_NV21",
"YUV2RGB_YV12",
"YUV2BGR_YV12",
"YUV2RGB_IYUV",
"YUV2BGR_IYUV",
"YUV2RGBA_YV12",
"YUV2BGRA_YV12",
"YUV2RGBA_IYUV",
"YUV2BGRA_IYUV",
"YUV2GRAY_420",
//YUV 4:2:2 formats family
"YUV2RGB_UYVY",
"YUV2BGR_UYVY",
"YUV2RGB_VYUY",
"YUV2BGR_VYUY",
"YUV2RGBA_UYVY",
"YUV2BGRA_UYVY",
"YUV2RGBA_VYUY",
"YUV2BGRA_VYUY",
"YUV2RGB_YUY2",
"YUV2BGR_YUY2",
"YUV2RGB_YVYU",
"YUV2BGR_YVYU",
"YUV2RGBA_YUY2",
"YUV2BGRA_YUY2",
"YUV2RGBA_YVYU",
"YUV2BGRA_YVYU",
"YUV2GRAY_UYVY",
"YUV2GRAY_YUY2",
// alpha premultiplication
"RGBA2mRGBA",
"mRGBA2RGBA",
"COLORCVT_MAX"
};
*os << str[info.code];

View File

@ -712,6 +712,19 @@ INSTANTIATE_TEST_CASE_P(ImgProc, CvtColor, testing::Combine(
CvtColorInfo(3, 3, cv::COLOR_BGR2HSV),
CvtColorInfo(3, 3, cv::COLOR_HSV2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2HLS),
CvtColorInfo(3, 3, cv::COLOR_HLS2BGR))));
CvtColorInfo(3, 3, cv::COLOR_HLS2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2Lab),
CvtColorInfo(3, 3, cv::COLOR_RGB2Lab),
CvtColorInfo(3, 3, cv::COLOR_BGR2Luv),
CvtColorInfo(3, 3, cv::COLOR_RGB2Luv),
CvtColorInfo(3, 3, cv::COLOR_Lab2BGR),
CvtColorInfo(3, 3, cv::COLOR_Lab2RGB),
CvtColorInfo(3, 3, cv::COLOR_Luv2BGR),
CvtColorInfo(3, 3, cv::COLOR_Luv2RGB),
CvtColorInfo(1, 3, cv::COLOR_BayerBG2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerGB2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerRG2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerGR2BGR),
CvtColorInfo(4, 4, cv::COLOR_RGBA2mRGBA))));
#endif

View File

@ -65,19 +65,19 @@ void PrintTo(const CvtColorInfo& info, ostream* os)
"BGR2HSV",
"RGB2HSV",
0,
0,
"",
"",
0,
0,
"BGR2Lab",
"RGB2Lab",
0,
0,
0,
0,
"BayerBG2BGR",
"BayerGB2BGR",
"BayerRG2BGR",
"BayerGR2BGR",
0,
0,
"BGR2Luv",
"RGB2Luv",
"BGR2HLS",
"RGB2HLS",
@ -85,18 +85,18 @@ void PrintTo(const CvtColorInfo& info, ostream* os)
"HSV2BGR",
"HSV2RGB",
0,
0,
0,
0,
"Lab2BGR",
"Lab2RGB",
"Luv2BGR",
"Luv2RGB",
"HLS2BGR",
"HLS2RGB",
0,
0,
0,
0,
"BayerBG2BGR_VNG",
"BayerGB2BGR_VNG",
"BayerRG2BGR_VNG",
"BayerGR2BGR_VNG",
"BGR2HSV_FULL",
"RGB2HSV_FULL",
@ -108,30 +108,78 @@ void PrintTo(const CvtColorInfo& info, ostream* os)
"HLS2BGR_FULL",
"HLS2RGB_FULL",
0,
0,
0,
0,
"LBGR2Lab",
"LRGB2Lab",
"LBGR2Luv",
"LRGB2Luv",
0,
0,
0,
0,
"Lab2LBGR",
"Lab2LRGB",
"Luv2LBGR",
"Luv2LRGB",
"BGR2YUV",
"RGB2YUV",
"YUV2BGR",
"YUV2RGB",
0,
0,
0,
0,
"BayerBG2GRAY",
"BayerGB2GRAY",
"BayerRG2GRAY",
"BayerGR2GRAY",
0,
0,
0,
0
//YUV 4:2:0 formats family
"YUV2RGB_NV12",
"YUV2BGR_NV12",
"YUV2RGB_NV21",
"YUV2BGR_NV21",
"YUV2RGBA_NV12",
"YUV2BGRA_NV12",
"YUV2RGBA_NV21",
"YUV2BGRA_NV21",
"YUV2RGB_YV12",
"YUV2BGR_YV12",
"YUV2RGB_IYUV",
"YUV2BGR_IYUV",
"YUV2RGBA_YV12",
"YUV2BGRA_YV12",
"YUV2RGBA_IYUV",
"YUV2BGRA_IYUV",
"YUV2GRAY_420",
//YUV 4:2:2 formats family
"YUV2RGB_UYVY",
"YUV2BGR_UYVY",
"YUV2RGB_VYUY",
"YUV2BGR_VYUY",
"YUV2RGBA_UYVY",
"YUV2BGRA_UYVY",
"YUV2RGBA_VYUY",
"YUV2BGRA_VYUY",
"YUV2RGB_YUY2",
"YUV2BGR_YUY2",
"YUV2RGB_YVYU",
"YUV2BGR_YVYU",
"YUV2RGBA_YUY2",
"YUV2BGRA_YUY2",
"YUV2RGBA_YVYU",
"YUV2BGRA_YVYU",
"YUV2GRAY_UYVY",
"YUV2GRAY_YUY2",
// alpha premultiplication
"RGBA2mRGBA",
"mRGBA2RGBA",
"COLORCVT_MAX"
};
*os << str[info.code];

View File

@ -54,6 +54,17 @@ void cv::gpu::gammaCorrection(const GpuMat&, GpuMat&, bool, Stream&) { throw_nog
#else /* !defined (HAVE_CUDA) */
#include <cvt_colot_internal.h>
namespace cv { namespace gpu {
namespace device
{
template <int cn>
void Bayer2BGR_8u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template <int cn>
void Bayer2BGR_16u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
}
}}
using namespace ::cv::gpu::device;
namespace
@ -1144,13 +1155,13 @@ namespace
funcs[dcn == 4][src.channels() == 4][src.depth()](src, dst, StreamAccessor::getStream(stream));
}
void bgr_to_lab(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
void bgr_to_lab(const GpuMat& src, GpuMat& dst, int dcn, Stream& st)
{
#if (CUDA_VERSION < 5000)
(void)src;
(void)dst;
(void)dcn;
(void)stream;
(void)st;
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
#else
CV_Assert(src.depth() == CV_8U);
@ -1160,13 +1171,17 @@ namespace
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn));
NppStreamHandler h(StreamAccessor::getStream(stream));
cudaStream_t stream = StreamAccessor::getStream(st);
NppStreamHandler h(stream);
NppiSize oSizeROI;
oSizeROI.width = src.cols;
oSizeROI.height = src.rows;
nppSafeCall( nppiBGRToLab_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
}
@ -1176,13 +1191,13 @@ namespace
bgr_to_lab(dst, dst, -1, stream);
}
void lab_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
void lab_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& st)
{
#if (CUDA_VERSION < 5000)
(void)src;
(void)dst;
(void)dcn;
(void)stream;
(void)st;
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
#else
CV_Assert(src.depth() == CV_8U);
@ -1192,13 +1207,17 @@ namespace
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn));
NppStreamHandler h(StreamAccessor::getStream(stream));
cudaStream_t stream = StreamAccessor::getStream(st);
NppStreamHandler h(stream);
NppiSize oSizeROI;
oSizeROI.width = src.cols;
oSizeROI.height = src.rows;
nppSafeCall( nppiLabToBGR_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
}
@ -1208,13 +1227,13 @@ namespace
bgr_to_rgb(dst, dst, -1, stream);
}
void rgb_to_luv(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
void rgb_to_luv(const GpuMat& src, GpuMat& dst, int dcn, Stream& st)
{
#if (CUDA_VERSION < 5000)
(void)src;
(void)dst;
(void)dcn;
(void)stream;
(void)st;
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
#else
CV_Assert(src.depth() == CV_8U);
@ -1224,7 +1243,8 @@ namespace
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn));
NppStreamHandler h(StreamAccessor::getStream(stream));
cudaStream_t stream = StreamAccessor::getStream(st);
NppStreamHandler h(stream);
NppiSize oSizeROI;
oSizeROI.width = src.cols;
@ -1234,6 +1254,9 @@ namespace
nppSafeCall( nppiRGBToLUV_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
else
nppSafeCall( nppiRGBToLUV_8u_AC4R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
}
@ -1243,13 +1266,13 @@ namespace
rgb_to_luv(dst, dst, -1, stream);
}
void luv_to_rgb(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
void luv_to_rgb(const GpuMat& src, GpuMat& dst, int dcn, Stream& st)
{
#if (CUDA_VERSION < 5000)
(void)src;
(void)dst;
(void)dcn;
(void)stream;
(void)st;
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
#else
CV_Assert(src.depth() == CV_8U);
@ -1259,7 +1282,8 @@ namespace
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn));
NppStreamHandler h(StreamAccessor::getStream(stream));
cudaStream_t stream = StreamAccessor::getStream(st);
NppStreamHandler h(stream);
NppiSize oSizeROI;
oSizeROI.width = src.cols;
@ -1269,6 +1293,9 @@ namespace
nppSafeCall( nppiLUVToRGB_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
else
nppSafeCall( nppiLUVToRGB_8u_AC4R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
}
@ -1278,19 +1305,20 @@ namespace
bgr_to_rgb(dst, dst, -1, stream);
}
void rgba_to_mbgra(const GpuMat& src, GpuMat& dst, int, Stream& stream)
void rgba_to_mbgra(const GpuMat& src, GpuMat& dst, int, Stream& st)
{
#if (CUDA_VERSION < 5000)
(void)src;
(void)dst;
(void)stream;
(void)st;
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
#else
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4);
dst.create(src.size(), src.type());
NppStreamHandler h(StreamAccessor::getStream(stream));
cudaStream_t stream = StreamAccessor::getStream(st);
NppStreamHandler h(stream);
NppiSize oSizeROI;
oSizeROI.width = src.cols;
@ -1300,8 +1328,52 @@ namespace
nppSafeCall( nppiAlphaPremul_8u_AC4R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
else
nppSafeCall( nppiAlphaPremul_16u_AC4R(src.ptr<Npp16u>(), static_cast<int>(src.step), dst.ptr<Npp16u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
}
void bayer_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, bool blue_last, bool start_with_green, Stream& stream)
{
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
static const func_t funcs[3][4] =
{
{0,0,Bayer2BGR_8u_gpu<3>, Bayer2BGR_8u_gpu<4>},
{0,0,0,0},
{0,0,Bayer2BGR_16u_gpu<3>, Bayer2BGR_16u_gpu<4>}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1);
CV_Assert(src.rows > 2 && src.cols > 2);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn));
funcs[src.depth()][dcn - 1](src, dst, blue_last, start_with_green, StreamAccessor::getStream(stream));
}
void bayerBG_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
bayer_to_bgr(src, dst, dcn, false, false, stream);
}
void bayerGB_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
bayer_to_bgr(src, dst, dcn, false, true, stream);
}
void bayerRG_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
bayer_to_bgr(src, dst, dcn, true, false, stream);
}
void bayerGR_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
bayer_to_bgr(src, dst, dcn, true, true, stream);
}
}
void cv::gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, Stream& stream)
@ -1366,10 +1438,10 @@ void cv::gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, Stream
bgr_to_lab, // CV_BGR2Lab =44
rgb_to_lab, // CV_RGB2Lab =45
0, // CV_BayerBG2BGR =46
0, // CV_BayerGB2BGR =47
0, // CV_BayerRG2BGR =48
0, // CV_BayerGR2BGR =49
bayerBG_to_bgr, // CV_BayerBG2BGR =46
bayerGB_to_bgr, // CV_BayerGB2BGR =47
bayerRG_to_bgr, // CV_BayerRG2BGR =48
bayerGR_to_bgr, // CV_BayerGR2BGR =49
bgr_to_luv, // CV_BGR2Luv =50
rgb_to_luv, // CV_RGB2Luv =51
@ -1424,57 +1496,57 @@ void cv::gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, Stream
0, // CV_BayerGR2GRAY = 89
//YUV 4:2:0 formats family
0, // COLOR_YUV2RGB_NV12 = 90,
0, // COLOR_YUV2BGR_NV12 = 91,
0, // COLOR_YUV2RGB_NV21 = 92,
0, // COLOR_YUV2BGR_NV21 = 93,
0, // CV_YUV2RGB_NV12 = 90,
0, // CV_YUV2BGR_NV12 = 91,
0, // CV_YUV2RGB_NV21 = 92,
0, // CV_YUV2BGR_NV21 = 93,
0, // COLOR_YUV2RGBA_NV12 = 94,
0, // COLOR_YUV2BGRA_NV12 = 95,
0, // COLOR_YUV2RGBA_NV21 = 96,
0, // COLOR_YUV2BGRA_NV21 = 97,
0, // CV_YUV2RGBA_NV12 = 94,
0, // CV_YUV2BGRA_NV12 = 95,
0, // CV_YUV2RGBA_NV21 = 96,
0, // CV_YUV2BGRA_NV21 = 97,
0, // COLOR_YUV2RGB_YV12 = 98,
0, // COLOR_YUV2BGR_YV12 = 99,
0, // COLOR_YUV2RGB_IYUV = 100,
0, // COLOR_YUV2BGR_IYUV = 101,
0, // CV_YUV2RGB_YV12 = 98,
0, // CV_YUV2BGR_YV12 = 99,
0, // CV_YUV2RGB_IYUV = 100,
0, // CV_YUV2BGR_IYUV = 101,
0, // COLOR_YUV2RGBA_YV12 = 102,
0, // COLOR_YUV2BGRA_YV12 = 103,
0, // COLOR_YUV2RGBA_IYUV = 104,
0, // COLOR_YUV2BGRA_IYUV = 105,
0, // CV_YUV2RGBA_YV12 = 102,
0, // CV_YUV2BGRA_YV12 = 103,
0, // CV_YUV2RGBA_IYUV = 104,
0, // CV_YUV2BGRA_IYUV = 105,
0, // COLOR_YUV2GRAY_420 = 106,
0, // CV_YUV2GRAY_420 = 106,
//YUV 4:2:2 formats family
0, // COLOR_YUV2RGB_UYVY = 107,
0, // COLOR_YUV2BGR_UYVY = 108,
0, // //COLOR_YUV2RGB_VYUY = 109,
0, // //COLOR_YUV2BGR_VYUY = 110,
0, // CV_YUV2RGB_UYVY = 107,
0, // CV_YUV2BGR_UYVY = 108,
0, // //CV_YUV2RGB_VYUY = 109,
0, // //CV_YUV2BGR_VYUY = 110,
0, // COLOR_YUV2RGBA_UYVY = 111,
0, // COLOR_YUV2BGRA_UYVY = 112,
0, // //COLOR_YUV2RGBA_VYUY = 113,
0, // //COLOR_YUV2BGRA_VYUY = 114,
0, // CV_YUV2RGBA_UYVY = 111,
0, // CV_YUV2BGRA_UYVY = 112,
0, // //CV_YUV2RGBA_VYUY = 113,
0, // //CV_YUV2BGRA_VYUY = 114,
0, // COLOR_YUV2RGB_YUY2 = 115,
0, // COLOR_YUV2BGR_YUY2 = 116,
0, // COLOR_YUV2RGB_YVYU = 117,
0, // COLOR_YUV2BGR_YVYU = 118,
0, // CV_YUV2RGB_YUY2 = 115,
0, // CV_YUV2BGR_YUY2 = 116,
0, // CV_YUV2RGB_YVYU = 117,
0, // CV_YUV2BGR_YVYU = 118,
0, // COLOR_YUV2RGBA_YUY2 = 119,
0, // COLOR_YUV2BGRA_YUY2 = 120,
0, // COLOR_YUV2RGBA_YVYU = 121,
0, // COLOR_YUV2BGRA_YVYU = 122,
0, // CV_YUV2RGBA_YUY2 = 119,
0, // CV_YUV2BGRA_YUY2 = 120,
0, // CV_YUV2RGBA_YVYU = 121,
0, // CV_YUV2BGRA_YVYU = 122,
0, // COLOR_YUV2GRAY_UYVY = 123,
0, // COLOR_YUV2GRAY_YUY2 = 124,
0, // CV_YUV2GRAY_UYVY = 123,
0, // CV_YUV2GRAY_YUY2 = 124,
// alpha premultiplication
rgba_to_mbgra, // COLOR_RGBA2mRGBA = 125,
0, // COLOR_mRGBA2RGBA = 126,
rgba_to_mbgra, // CV_RGBA2mRGBA = 125,
0, // CV_mRGBA2RGBA = 126,
0, // COLOR_COLORCVT_MAX = 127
0, // CV_COLORCVT_MAX = 127
};
CV_Assert(code < 128);

View File

@ -0,0 +1,522 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <iostream>
#include <stdio.h>
namespace cv { namespace gpu { namespace device
{
namespace ccl
{
enum
{
WARP_SIZE = 32,
WARP_LOG = 5,
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
STA_SIZE_MARGE_Y = 4,
STA_SIZE_MARGE_X = 32,
TPB_X = 1,
TPB_Y = 4,
TILE_COLS = CTA_SIZE_X * TPB_X,
TILE_ROWS = CTA_SIZE_Y * TPB_Y
};
template<typename T> struct IntervalsTraits
{
typedef T elem_type;
};
template<> struct IntervalsTraits<unsigned char>
{
typedef int dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<uchar3>
{
typedef int3 dist_type;
enum {ch = 3};
};
template<> struct IntervalsTraits<uchar4>
{
typedef int4 dist_type;
enum {ch = 4};
};
template<> struct IntervalsTraits<unsigned short>
{
typedef int dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<ushort3>
{
typedef int3 dist_type;
enum {ch = 3};
};
template<> struct IntervalsTraits<ushort4>
{
typedef int4 dist_type;
enum {ch = 4};
};
template<> struct IntervalsTraits<float>
{
typedef float dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<int>
{
typedef int dist_type;
enum {ch = 1};
};
typedef unsigned char component;
enum Edges { UP = 1, DOWN = 2, LEFT = 4, RIGHT = 8, EMPTY = 0xF0 };
template<typename T, int CH> struct InInterval {};
template<typename T> struct InInterval<T, 1>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo(-_lo.x), hi(_hi.x) {};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo <= d && d <= hi;
}
};
template<typename T> struct InInterval<T, 3>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
: lo (VecTraits<T>::make(-_lo.x, -_lo.y, -_lo.z)), hi (VecTraits<T>::make(_hi.x, _hi.y, _hi.z)){};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z;
}
};
template<typename T> struct InInterval<T, 4>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
: lo (VecTraits<T>::make(-_lo.x, -_lo.y, -_lo.z, -_lo.w)), hi (VecTraits<T>::make(_hi.x, _hi.y, _hi.z, -_hi.w)){};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z &&
lo.w <= d.w && d.w <= hi.w;
}
};
template<typename T, typename F>
__global__ void computeConnectivity(const DevMem2D_<T> image, DevMem2D components, F connected)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= image.cols || y >= image.rows) return;
T intensity = image(y, x);
component c = 0;
if ( x > 0 && connected(intensity, image(y, x - 1)))
c |= LEFT;
if ( y > 0 && connected(intensity, image(y - 1, x)))
c |= UP;
if ( x - 1 < image.cols && connected(intensity, image(y, x + 1)))
c |= RIGHT;
if ( y - 1 < image.rows && connected(intensity, image(y + 1, x)))
c |= DOWN;
components(y, x) = c;
}
template< typename T>
void computeEdges(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(image.cols, block.x), divUp(image.rows, block.y));
typedef InInterval<typename IntervalsTraits<T>::dist_type, IntervalsTraits<T>::ch> Int_t;
Int_t inInt(lo, hi);
computeConnectivity<T, Int_t><<<grid, block, 0, stream>>>(static_cast<const DevMem2D_<T> >(image), edges, inInt);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void computeEdges<uchar> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<uchar3> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<uchar4> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<ushort> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<ushort3>(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<ushort4>(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<int> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<float> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
__global__ void lableTiles(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * TILE_COLS;
int y = threadIdx.y + blockIdx.y * TILE_ROWS;
if (x >= edges.cols || y >= edges.rows) return;
//currently x is 1
int bounds = ((y + TPB_Y) < edges.rows);
__shared__ int labelsTile[TILE_ROWS][TILE_COLS];
__shared__ int edgesTile[TILE_ROWS][TILE_COLS];
int new_labels[TPB_Y][TPB_X];
int old_labels[TPB_Y][TPB_X];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edges(bounds * (y + CTA_SIZE_Y * i), x + CTA_SIZE_X * j);
if (!xloc) c &= ~LEFT;
if (!yloc) c &= ~UP;
if (xloc == TILE_COLS -1) c &= ~RIGHT;
if (yloc == TILE_ROWS -1) c &= ~DOWN;
new_labels[i][j] = yloc * TILE_COLS + xloc;
edgesTile[yloc][xloc] = c;
}
for (int i = 0; ; ++i)
{
//1. backup
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
old_labels[i][j] = new_labels[i][j];
labelsTile[yloc][xloc] = new_labels[i][j];
}
__syncthreads();
//2. compare local arrays
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edgesTile[yloc][xloc];
int label = new_labels[i][j];
if (c & UP)
label = ::min(label, labelsTile[yloc - 1][xloc]);
if (c & DOWN)
label = ::min(label, labelsTile[yloc + 1][xloc]);
if (c & LEFT)
label = ::min(label, labelsTile[yloc][xloc - 1]);
if (c & RIGHT)
label = ::min(label, labelsTile[yloc][xloc + 1]);
new_labels[i][j] = label;
}
__syncthreads();
//3. determine: Is any value changed?
int changed = 0;
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
if (new_labels[i][j] < old_labels[i][j])
{
changed = 1;
atomicMin(&labelsTile[0][0] + old_labels[i][j], new_labels[i][j]);
}
}
changed = __syncthreads_or(changed);
if (!changed)
break;
//4. Compact paths
const int *labels = &labelsTile[0][0];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
while( labels[label] < label ) label = labels[label];
new_labels[i][j] = label;
}
__syncthreads();
}
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
int yloc = label / TILE_COLS;
int xloc = label - yloc * TILE_COLS;
xloc += blockIdx.x * TILE_COLS;
yloc += blockIdx.y * TILE_ROWS;
label = yloc * edges.cols + xloc;
// do it for x too.
if (y + CTA_SIZE_Y * i < comps.rows) comps(y + CTA_SIZE_Y * i, x + CTA_SIZE_X * j) = label;
}
}
__device__ __forceinline__ int root(const DevMem2Di& comps, int label)
{
while(1)
{
int y = label / comps.cols;
int x = label - y * comps.cols;
int parent = comps(y, x);
if (label == parent) break;
label = parent;
}
return label;
}
__device__ __forceinline__ void isConnected(DevMem2Di& comps, int l1, int l2, bool& changed)
{
int r1 = root(comps, l1);
int r2 = root(comps, l2);
if (r1 == r2) return;
int mi = ::min(r1, r2);
int ma = ::max(r1, r2);
int y = ma / comps.cols;
int x = ma - y * comps.cols;
atomicMin(&comps.ptr(y)[x], mi);
changed = true;
}
__global__ void crossMerge(const int tilesNumY, const int tilesNumX, int tileSizeY, int tileSizeX,
const DevMem2D edges, DevMem2Di comps, const int yIncomplete, int xIncomplete)
{
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int stride = blockDim.y * blockDim.x;
int ybegin = blockIdx.y * (tilesNumY * tileSizeY);
int yend = ybegin + tilesNumY * tileSizeY;
if (blockIdx.y == gridDim.y - 1)
{
yend -= yIncomplete * tileSizeY;
yend -= tileSizeY;
tileSizeY = (edges.rows % tileSizeY);
yend += tileSizeY;
}
int xbegin = blockIdx.x * tilesNumX * tileSizeX;
int xend = xbegin + tilesNumX * tileSizeX;
if (blockIdx.x == gridDim.x - 1)
{
if (xIncomplete) yend = ybegin;
xend -= xIncomplete * tileSizeX;
xend -= tileSizeX;
tileSizeX = (edges.cols % tileSizeX);
xend += tileSizeX;
}
if (blockIdx.y == (gridDim.y - 1) && yIncomplete)
{
xend = xbegin;
}
int tasksV = (tilesNumX - 1) * (yend - ybegin);
int tasksH = (tilesNumY - 1) * (xend - xbegin);
int total = tasksH + tasksV;
bool changed;
do
{
changed = false;
for (int taskIdx = tid; taskIdx < total; taskIdx += stride)
{
if (taskIdx < tasksH)
{
int indexH = taskIdx;
int row = indexH / (xend - xbegin);
int col = indexH - row * (xend - xbegin);
int y = ybegin + (row + 1) * tileSizeY;
int x = xbegin + col;
component e = edges( x, y);
if (e & UP)
{
int lc = comps(y,x);
int lu = comps(y - 1, x);
isConnected(comps, lc, lu, changed);
}
}
else
{
int indexV = taskIdx - tasksH;
int col = indexV / (yend - ybegin);
int row = indexV - col * (yend - ybegin);
int x = xbegin + (col + 1) * tileSizeX;
int y = ybegin + row;
component e = edges(x, y);
if (e & LEFT)
{
int lc = comps(y, x);
int ll = comps(y, x - 1);
isConnected(comps, lc, ll, changed);
}
}
}
} while (__syncthreads_or(changed));
}
__global__ void flatten(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x < comps.cols && y < comps.rows)
comps(y, x) = root(comps, comps(y, x));
}
void labelComponents(const DevMem2D& edges, DevMem2Di comps, cudaStream_t stream)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS));
lableTiles<<<grid, block, 0, stream>>>(edges, comps);
cudaSafeCall( cudaGetLastError() );
int tileSizeX = TILE_COLS, tileSizeY = TILE_ROWS;
cudaSafeCall( cudaGetLastError() );
// cudaSafeCall( cudaDeviceSynchronize() );
while (grid.x > 1 || grid.y > 1)
{
dim3 mergeGrid(ceilf(grid.x / 2.0), ceilf(grid.y / 2.0));
dim3 mergeBlock(STA_SIZE_MARGE_X, STA_SIZE_MARGE_Y);
std::cout << "merging: " << grid.y << " x " << grid.x << " ---> " << mergeGrid.y << " x " << mergeGrid.x << " for tiles: " << tileSizeY << " x " << tileSizeX << std::endl;
crossMerge<<<mergeGrid, mergeBlock, 0, stream>>>(2, 2, tileSizeY, tileSizeX, edges, comps, ceilf(grid.y / 2.0) - grid.y / 2, ceilf(grid.x / 2.0) - grid.x / 2);
tileSizeX <<= 1;
tileSizeY <<= 1;
grid = mergeGrid;
cudaSafeCall( cudaGetLastError() );
}
grid.x = divUp(edges.cols, block.x);
grid.y = divUp(edges.rows, block.y);
flatten<<<grid, block, 0, stream>>>(edges, comps);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} } }

View File

@ -0,0 +1,327 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <opencv2/gpu/device/limits.hpp>
namespace cv { namespace gpu {
namespace device
{
template <typename D>
__global__ void Bayer2BGR_8u(const PtrStepb src, DevMem2D_<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 2) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
uchar4 patch[3][3];
patch[0][1] = ((const uchar4*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const uchar4*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const uchar4*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[1][1] = ((const uchar4*) src.ptr(s_y))[s_x];
patch[1][0] = ((const uchar4*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const uchar4*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[2][1] = ((const uchar4*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const uchar4*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const uchar4*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res1 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res2 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res3 = VecTraits<D>::all(numeric_limits<uchar>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].w + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][1].z + patch[2][1].x + patch[2][1].z + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][1].z + patch[2][1].y + 2) >> 2;
const int t4 = (patch[0][1].z + patch[2][1].z + 1) >> 1;
const int t5 = (patch[1][1].y + patch[1][1].w + 1) >> 1;
const int t6 = (patch[0][1].z + patch[0][2].x + patch[2][1].z + patch[2][2].x + 2) >> 2;
const int t7 = (patch[0][1].w + patch[1][1].z + patch[1][2].x + patch[2][1].w + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
res2.x = t5;
res2.y = patch[1][1].z;
res2.z = t4;
res3.x = patch[1][1].w;
res3.y = t7;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
res2.x = t4;
res2.y = patch[1][1].z;
res2.z = t5;
res3.x = t6;
res3.y = t7;
res3.z = patch[1][1].w;
}
}
else
{
const int t0 = (patch[0][0].w + patch[0][1].y + patch[2][0].w + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].w + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][1].z + 1) >> 1;
const int t4 = (patch[0][1].y + patch[0][1].w + patch[2][1].y + patch[2][1].w + 2) >> 2;
const int t5 = (patch[0][1].z + patch[1][1].y + patch[1][1].w + patch[2][1].z + 2) >> 2;
const int t6 = (patch[0][1].w + patch[2][1].w + 1) >> 1;
const int t7 = (patch[1][1].z + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
res2.x = patch[1][1].z;
res2.y = t5;
res2.z = t4;
res3.x = t7;
res3.y = patch[1][1].w;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
res2.x = t4;
res2.y = t5;
res2.z = patch[1][1].z;
res3.x = t6;
res3.y = patch[1][1].w;
res3.z = t7;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 2;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
if (d_x + 2 < dst.cols)
dst(d_y, d_x + 2) = res2;
if (d_x + 3 < dst.cols)
dst(d_y, d_x + 3) = res3;
}
template <typename D>
__global__ void Bayer2BGR_16u(const PtrStepb src, DevMem2D_<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 1) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
ushort2 patch[3][3];
patch[0][1] = ((const ushort2*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const ushort2*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const ushort2*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[1][1] = ((const ushort2*) src.ptr(s_y))[s_x];
patch[1][0] = ((const ushort2*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const ushort2*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[2][1] = ((const ushort2*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const ushort2*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const ushort2*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<ushort>::max());
D res1 = VecTraits<D>::all(numeric_limits<ushort>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].y + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][2].x + patch[2][1].x + patch[2][2].x + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][2].x + patch[2][1].y + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
}
}
else
{
const int t0 = (patch[0][0].y + patch[0][1].y + patch[2][0].y + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].y + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 1;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
}
template <int cn>
void Bayer2BGR_8u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream)
{
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 4 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_8u<dst_t>, cudaFuncCachePreferL1) );
Bayer2BGR_8u<dst_t><<<grid, block, 0, stream>>>(src, (DevMem2D_<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int cn>
void Bayer2BGR_16u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream)
{
typedef typename TypeVec<ushort, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 2 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_16u<dst_t>, cudaFuncCachePreferL1) );
Bayer2BGR_16u<dst_t><<<grid, block, 0, stream>>>(src, (DevMem2D_<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void Bayer2BGR_8u_gpu<3>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_8u_gpu<4>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_16u_gpu<3>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_16u_gpu<4>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
}
}}

View File

@ -47,8 +47,76 @@
void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::connectivityMask(const GpuMat&, GpuMat&, const cv::Scalar&, const cv::Scalar&, Stream&) { throw_nogpu(); }
void cv::gpu::labelComponents(const GpuMat& mask, GpuMat& components, Stream& stream) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
namespace cv { namespace gpu { namespace device
{
namespace ccl
{
void labelComponents(const DevMem2D& edges, DevMem2Di comps, cudaStream_t stream);
template<typename T>
void computeEdges(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
}
}}}
float4 scalarToCudaType(const cv::Scalar& in)
{
float4 res;
res.x = in[0]; res.y = in[1]; res.z = in[2]; res.w = in[3];
return res;
}
void cv::gpu::connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& s)
{
CV_Assert(!image.empty());
int ch = image.channels();
CV_Assert(ch <= 4);
int depth = image.depth();
typedef void (*func_t)(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
static const func_t suppotLookup[8][4] =
{ // 1, 2, 3, 4
{ device::ccl::computeEdges<uchar>, 0, device::ccl::computeEdges<uchar3>, device::ccl::computeEdges<uchar4> },// CV_8U
{ 0, 0, 0, 0 },// CV_16U
{ device::ccl::computeEdges<ushort>, 0, device::ccl::computeEdges<ushort3>, device::ccl::computeEdges<ushort4> },// CV_8S
{ 0, 0, 0, 0 },// CV_16S
{ device::ccl::computeEdges<int>, 0, 0, 0 },// CV_32S
{ device::ccl::computeEdges<float>, 0, 0, 0 },// CV_32F
{ 0, 0, 0, 0 },// CV_64F
{ 0, 0, 0, 0 } // CV_USRTYPE1
};
func_t f = suppotLookup[depth][ch - 1];
CV_Assert(f);
if (image.size() != mask.size() || mask.type() != CV_8UC1)
mask.create(image.size(), CV_8UC1);
cudaStream_t stream = StreamAccessor::getStream(s);
float4 culo = scalarToCudaType(lo), cuhi = scalarToCudaType(hi);
f(image, mask, culo, cuhi, stream);
}
void cv::gpu::labelComponents(const GpuMat& mask, GpuMat& components, Stream& s)
{
CV_Assert(!mask.empty() && mask.type() == CV_8U);
if (mask.size() != components.size() || components.type() != CV_32SC1)
components.create(mask.size(), CV_32SC1);
cudaStream_t stream = StreamAccessor::getStream(s);
device::ccl::labelComponents(mask, components, stream);
}
namespace
{
typedef NppStatus (*init_func_t)(NppiSize oSize, NppiGraphcutState** ppState, Npp8u* pDeviceMem);

View File

@ -41,6 +41,8 @@
#include "precomp.hpp"
#ifdef HAVE_CUDA
namespace {
///////////////////////////////////////////////////////////////////////////////////////////////////////
@ -1628,7 +1630,7 @@ TEST_P(CvtColor, BGR2Lab)
}
catch (const cv::Exception& e)
{
#if (CUDA_VERSION < 5000)
#if defined (CUDA_VERSION) && (CUDA_VERSION < 5000)
ASSERT_EQ(CV_StsBadFlag, e.code);
#else
FAIL();
@ -1655,7 +1657,7 @@ TEST_P(CvtColor, RGB2Lab)
}
catch (const cv::Exception& e)
{
#if (CUDA_VERSION < 5000)
#if defined (CUDA_VERSION) && (CUDA_VERSION < 5000)
ASSERT_EQ(CV_StsBadFlag, e.code);
#else
FAIL();
@ -1682,7 +1684,7 @@ TEST_P(CvtColor, BGR2Luv)
}
catch (const cv::Exception& e)
{
#if (CUDA_VERSION < 5000)
#if defined (CUDA_VERSION) && (CUDA_VERSION < 5000)
ASSERT_EQ(CV_StsBadFlag, e.code);
#else
FAIL();
@ -1709,7 +1711,7 @@ TEST_P(CvtColor, RGB2Luv)
}
catch (const cv::Exception& e)
{
#if (CUDA_VERSION < 5000)
#if defined (CUDA_VERSION) && (CUDA_VERSION < 5000)
ASSERT_EQ(CV_StsBadFlag, e.code);
#else
FAIL();
@ -1736,7 +1738,7 @@ TEST_P(CvtColor, RGBA2mRGBA)
}
catch (const cv::Exception& e)
{
#if (CUDA_VERSION < 5000)
#if defined (CUDA_VERSION) && (CUDA_VERSION < 5000)
ASSERT_EQ(CV_StsBadFlag, e.code);
#else
FAIL();
@ -1744,6 +1746,159 @@ TEST_P(CvtColor, RGBA2mRGBA)
}
}
TEST_P(CvtColor, BayerBG2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2BGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
TEST_P(CvtColor, BayerBG2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2BGR, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2BGR);
cv::Mat dst4(dst);
cv::Mat dst3;
cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
TEST_P(CvtColor, BayerGB2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2BGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
TEST_P(CvtColor, BayerGB2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2BGR, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2BGR);
cv::Mat dst4(dst);
cv::Mat dst3;
cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
TEST_P(CvtColor, BayerRG2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2BGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
TEST_P(CvtColor, BayerRG2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2BGR, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2BGR);
cv::Mat dst4(dst);
cv::Mat dst3;
cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
TEST_P(CvtColor, BayerGR2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2BGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
TEST_P(CvtColor, BayerGR2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
cv::Mat src = randomMat(size, depth);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2BGR, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2BGR);
cv::Mat dst4(dst);
cv::Mat dst3;
cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CvtColor, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
@ -1791,3 +1946,5 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, SwapChannels, testing::Combine(
WHOLE_SUBMAT));
} // namespace
#endif // HAVE_CUDA

View File

@ -0,0 +1,205 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//M*/
#include "precomp.hpp"
#include <string>
#include <iostream>
#ifdef HAVE_CUDA
namespace {
struct GreedyLabeling
{
struct dot
{
int x;
int y;
static dot make(int i, int j)
{
dot d; d.x = i; d.y = j;
return d;
}
};
struct InInterval
{
InInterval(const int& _lo, const int& _hi) : lo(-_lo), hi(_hi) {};
const int lo, hi;
bool operator() (const unsigned char a, const unsigned char b) const
{
int d = a - b;
return lo <= d && d <= hi;
}
};
GreedyLabeling(cv::Mat img)
: image(img), _labels(image.cols, image.rows, CV_32SC1, cv::Scalar::all(-1)) {}
void operator() (cv::Mat labels) const
{
InInterval inInt(0, 2);
dot* stack = new dot[image.cols * image.rows];
int cc = -1;
int* dist_labels = (int*)labels.data;
int pitch = labels.step1();
unsigned char* source = (unsigned char*)image.data;
int width = image.cols;
int height = image.rows;
for (int j = 0; j < image.rows; ++j)
for(int i = 0; i < image.cols; ++i)
{
if (dist_labels[j * pitch + i] != -1) continue;
dot* top = stack;
dot p = dot::make(i, j);
cc++;
dist_labels[j * pitch + i] = cc;
while (top >= stack)
{
int* dl = &dist_labels[p.y * pitch + p.x];
unsigned char* sp = &source[p.y * image.step1() + p.x];
//right
if( p.x < (width - 1) && dl[ +1] == -1 && inInt(sp[0], sp[+1]))
{
dl[+1] = cc;
*top++ = dot::make(p.x + 1, p.y);
}
//left
if( p.x > 0 && dl[-1] == -1 && inInt(sp[0], sp[-1]))
{
dl[-1] = cc;
*top++ = dot::make(p.x - 1, p.y);
}
//bottom
if( p.y < (height - 1) && dl[+pitch] == -1 && inInt(sp[0], sp[+pitch]))
{
dl[+pitch] = cc;
*top++ = dot::make(p.x, p.y + 1);
}
//top
if( p.y > 0 && dl[-pitch] == -1 && inInt(sp[0], sp[-pitch]))
{
dl[-pitch] = cc;
*top++ = dot::make(p.x, p.y - 1);
}
p = *--top;
}
}
delete[] stack;
}
cv::Mat image;
cv::Mat _labels;
};
}
struct Labeling : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
cv::Mat loat_image()
{
return cv::imread(std::string( cvtest::TS::ptr()->get_data_path() ) + "labeling/label.png");
}
};
TEST_P(Labeling, ConnectedComponents)
{
cv::Mat image;
cvtColor(loat_image(), image, CV_BGR2GRAY);
ASSERT_TRUE(image.type() == CV_8UC1);
GreedyLabeling host(image);
host(host._labels);
cv::gpu::GpuMat mask;
mask.create(image.rows, image.cols, CV_8UC1);
cv::gpu::GpuMat components;
components.create(image.rows, image.cols, CV_32SC1);
cv::gpu::connectivityMask(cv::gpu::GpuMat(image), mask, cv::Scalar::all(0), cv::Scalar::all(2));
ASSERT_NO_THROW(cv::gpu::labelComponents(mask, components));
// for (int j = 0; j + 32 < components.rows; j += 32)
// for (int i = 0; i + 32 < components.cols; i += 32)
// {
// std::cout << "Tile: " << i << " " << j << std::endl;
// std::cout << cv::Mat(host._labels, cv::Rect(i,j,32,32)) << std::endl;
// std::cout << cv::Mat(cv::Mat(components), cv::Rect(i,j,32,32)) << std::endl;
// }
// for debug
// cv::imshow("test", image);
// cv::waitKey(0);
// cv::imshow("test", host._labels * 50);
// cv::waitKey(0);
// // cv::imshow("test", cv::Mat(mask) * 10);
// // cv::waitKey(0);
// cv::imshow("test", cv::Mat(components) * 2);
// cv::waitKey(0);
}
INSTANTIATE_TEST_CASE_P(ConnectedComponents, Labeling, ALL_DEVICES);
#endif

View File

@ -294,7 +294,7 @@ The methods/functions grab the next frame from video file or camera and return t
The primary use of the function is in multi-camera environments, especially when the cameras do not have hardware synchronization. That is, you call ``VideoCapture::grab()`` for each camera and after that call the slower method ``VideoCapture::retrieve()`` to decode and get frame from each camera. This way the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames from different cameras will be closer in time.
Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the correct way of retrieving data from it is to call `VideoCapture::grab` first and then call :ocv:func:`VideoCapture::retrieve` one or more times with different values of the ``channel`` parameter. See http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/kinect_maps.cpp
Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the correct way of retrieving data from it is to call `VideoCapture::grab` first and then call :ocv:func:`VideoCapture::retrieve` one or more times with different values of the ``channel`` parameter. See http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/kinect_maps.cpp
VideoCapture::retrieve

View File

@ -203,7 +203,7 @@ Sets mouse handler for the specified window
:param winname: Window name
:param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param userdata: The optional parameter passed to the callback.

View File

@ -202,7 +202,7 @@ Approximates a polygonal curve(s) with the specified precision.
The functions ``approxPolyDP`` approximate a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. It uses the Douglas-Peucker algorithm
http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
See http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/contours.cpp for the function usage model.
See http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/contours.cpp for the function usage model.
ApproxChains

View File

@ -1298,17 +1298,17 @@ public:
maxk(_maxk), space_ofs(_space_ofs), space_weight(_space_weight), color_weight(_color_weight)
{
}
virtual void operator() (const Range& range) const
{
int i, j, cn = dest->channels(), k;
Size size = dest->size();
for( i = range.start; i < range.end; i++ )
{
const uchar* sptr = temp->ptr(i+radius) + radius*cn;
uchar* dptr = dest->ptr(i);
if( cn == 1 )
{
for( j = 0; j < size.width; j++ )
@ -1351,10 +1351,10 @@ public:
}
}
}
private:
const Mat *temp;
Mat *dest;
const Mat *temp;
int radius, maxk, *space_ofs;
float *space_weight, *color_weight;
};
@ -1367,40 +1367,40 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d,
int cn = src.channels();
int i, j, maxk, radius;
Size size = src.size();
CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) &&
src.type() == dst.type() && src.size() == dst.size() &&
src.data != dst.data );
if( sigma_color <= 0 )
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
if( d <= 0 )
radius = cvRound(sigma_space*1.5);
else
radius = d/2;
radius = MAX(radius, 1);
d = radius*2 + 1;
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
vector<float> _color_weight(cn*256);
vector<float> _space_weight(d*d);
vector<int> _space_ofs(d*d);
float* color_weight = &_color_weight[0];
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// initialize color-related bilateral filter coefficients
for( i = 0; i < 256*cn; i++ )
color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
@ -1411,7 +1411,7 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d,
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*temp.step + j*cn);
}
BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight);
parallel_for_(Range(0, size.height), body);
}

View File

@ -21,7 +21,7 @@ The word "cascade" in the classifier name means that the resultant classifier co
The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within the region of interest and the scale (this scale is not the same as the scale used at the detection stage, though these two scales are multiplied). For example, in the case of the third line feature (2c) the response is calculated as the difference between the sum of image pixels under the rectangle covering the whole feature (including the two white stripes and the black stripe in the middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to compensate for the differences in the size of areas. The sums of pixel values over a rectangular regions are calculated rapidly using integral images (see below and the :ocv:func:`integral` description).
To see the object detector at work, have a look at the facedetect demo:
http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/facedetect.cpp
http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/facedetect.cpp
The following reference is for the detection part only. There is a separate application called ``opencv_traincascade`` that can train a cascade of boosted classifiers from a set of samples.

View File

@ -444,7 +444,7 @@ inline int predictCategoricalStump( CascadeClassifier& cascade, Ptr<FeatureEvalu
CascadeClassifier::Data::Stage* cascadeStages = &cascade.data.stages[0];
#ifdef HAVE_TEGRA_OPTIMIZATION
float tmp; // float accumulator -- float operations are quicker
float tmp = 0; // float accumulator -- float operations are quicker
#endif
for( int si = 0; si < nstages; si++ )
{

View File

@ -29,6 +29,14 @@ if (HAVE_OPENCL)
if(OPENCL_INCLUDE_DIR)
ocv_include_directories(${OPENCL_INCLUDE_DIR})
endif()
if (HAVE_CLAMDFFT)
set(ocl_link_libs ${ocl_link_libs} ${CLAMDFFT_LIBRARIES})
ocv_include_directories(${CLAMDFFT_INCLUDE_DIR})
endif()
if (HAVE_CLAMDBLAS)
set(ocl_link_libs ${ocl_link_libs} ${CLAMDBLAS_LIBRARIES})
ocv_include_directories(${CLAMDBLAS_INCLUDE_DIR})
endif()
endif()
ocv_set_module_sources(

View File

@ -1,19 +1,20 @@
************************************
***************************************
ocl. OpenCL-accelerated Computer Vision
************************************
***************************************
.. toctree::
:maxdepth: 1
introduction
initalization_and_information
data_structures
operations_on_matrices
per_element_operations
image_processing
matrix_reductions
object_detection
feature_detection_and_description
image_filtering
camera_calibration_and_3d_reconstruction
video
structures_and_functions
.. initalization_and_information
.. data_structures
.. operations_on_matrices
.. per_element_operations
.. image_processing
.. matrix_reductions
.. object_detection
.. feature_detection_and_description
.. image_filtering
.. camera_calibration_and_3d_reconstruction
.. video

View File

@ -858,7 +858,72 @@ namespace cv
void benchmark_copy_vectorize(const oclMat &src, oclMat &dst);
void benchmark_copy_offset_stride(const oclMat &src, oclMat &dst);
void benchmark_ILP();
//! computes vertical sum, supports only CV_32FC1 images
CV_EXPORTS void columnSum(const oclMat& src, oclMat& sum);
//! performs linear blending of two images
//! to avoid accuracy errors sum of weigths shouldn't be very close to zero
// supports only CV_8UC1 source type
CV_EXPORTS void blendLinear(const oclMat& img1, const oclMat& img2, const oclMat& weights1, const oclMat& weights2, oclMat& result);
/////////////////////////////// Pyramid /////////////////////////////////////
CV_EXPORTS void pyrDown(const oclMat& src, oclMat& dst);
//! upsamples the source image and then smoothes it
CV_EXPORTS void pyrUp(const cv::ocl::oclMat& src,cv::ocl::oclMat& dst);
///////////////////////////////////////// match_template /////////////////////////////////////////////////////////////
struct CV_EXPORTS MatchTemplateBuf
{
Size user_block_size;
oclMat imagef, templf;
std::vector<oclMat> images;
std::vector<oclMat> image_sums;
std::vector<oclMat> image_sqsums;
};
//! computes the proximity map for the raster template and the image where the template is searched for
// Supports TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED for type 8UC1 and 8UC4
// Supports TM_SQDIFF, TM_CCORR for type 32FC1 and 32FC4
CV_EXPORTS void matchTemplate(const oclMat& image, const oclMat& templ, oclMat& result, int method);
//! computes the proximity map for the raster template and the image where the template is searched for
// Supports TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED for type 8UC1 and 8UC4
// Supports TM_SQDIFF, TM_CCORR for type 32FC1 and 32FC4
CV_EXPORTS void matchTemplate(const oclMat& image, const oclMat& templ, oclMat& result, int method, MatchTemplateBuf& buf);
#ifdef HAVE_CLAMDFFT
///////////////////////////////////////// clAmdFft related /////////////////////////////////////////
// the two functions must be called before/after run any fft library functions.
CV_EXPORTS void fft_setup(); // this will be implicitly invoked
CV_EXPORTS void fft_teardown(); // you need to teardown fft library manually
/////////////////////////////////////// DFT /////////////////////////////////////////////////////
//! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
//! Param dft_size is the size of DFT transform.
//!
//! For complex-to-real transform it is assumed that the source matrix is packed in CLFFT's format.
// support src type of CV32FC1, CV32FC2
// support flags: DFT_INVERSE, DFT_REAL_OUTPUT, DFT_COMPLEX_OUTPUT, DFT_ROWS
// dft_size is the size of original input, which is used for transformation from complex to real.
// dft_size must be powers of 2, 3 and 5
// real to complex dft requires at least v1.8 clAmdFft
// real to complex dft output is not the same with cpu version
// real to complex and complex to real does not support DFT_ROWS
CV_EXPORTS void dft(const oclMat& src, oclMat& dst, Size dft_size = Size(0, 0), int flags = 0);
#endif // HAVE_CLAMDFFT
#ifdef HAVE_CLAMDBLAS
//! implements generalized matrix product algorithm GEMM from BLAS
// The functionality requires clAmdBlas library
// only support type CV_32FC1
// flag GEMM_3_T is not supported
CV_EXPORTS void gemm(const oclMat& src1, const oclMat& src2, double alpha,
const oclMat& src3, double beta, oclMat& dst, int flags = 0);
#endif
}
}
#include "opencv2/ocl/matrix_operations.hpp"

98
modules/ocl/src/blend.cpp Normal file
View File

@ -0,0 +1,98 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Nathan, liujun@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <iomanip>
using namespace cv;
using namespace cv::ocl;
using namespace std;
#if !defined (HAVE_OPENCL)
void cv::ocl::blendLinear(const oclMat& img1, const oclMat& img2, const oclMat& weights1, const oclMat& weights2,
oclMat& result){throw_nogpu();}
#else
namespace cv
{
namespace ocl
{
////////////////////////////////////OpenCL kernel strings//////////////////////////
extern const char *blend_linear;
}
}
void cv::ocl::blendLinear(const oclMat& img1, const oclMat& img2, const oclMat& weights1, const oclMat& weights2,
oclMat& result)
{
cv::ocl::Context *ctx = img1.clCxt;
assert(ctx == img2.clCxt && ctx == weights1.clCxt && ctx == weights2.clCxt);
int channels = img1.channels();
int depth = img1.depth();
int rows = img1.rows;
int cols = img1.cols;
int istep = img1.step;
int wstep = weights1.step;
size_t globalSize[] = {cols * channels, rows, 1};
size_t localSize[] = {16, 16, 1};
vector< pair<size_t, const void *> > args;
if(globalSize[0]!=0)
{
args.push_back( make_pair( sizeof(cl_mem), (void *)&result.data ));
args.push_back( make_pair( sizeof(cl_mem), (void *)&img1.data ));
args.push_back( make_pair( sizeof(cl_mem), (void *)&img2.data ));
args.push_back( make_pair( sizeof(cl_mem), (void *)&weights1.data ));
args.push_back( make_pair( sizeof(cl_mem), (void *)&weights2.data ));
args.push_back( make_pair( sizeof(cl_int), (void *)&rows ));
args.push_back( make_pair( sizeof(cl_int), (void *)&cols ));
args.push_back( make_pair( sizeof(cl_int), (void *)&istep ));
args.push_back( make_pair( sizeof(cl_int), (void *)&wstep ));
std::string kernelName = "BlendLinear";
openCLExecuteKernel(ctx, &blend_linear, kernelName, globalSize, localSize, args, channels, depth);
}
}
#endif

View File

@ -0,0 +1,91 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Chunpeng Zhang, chunpeng@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iomanip>
#include "precomp.hpp"
using namespace cv;
using namespace cv::ocl;
using namespace std;
#if !defined(HAVE_OPENCL)
void cv::ocl::columnSum(const oclMat& src,oclMat& dst){ throw_nogpu(); }
#else /*!HAVE_OPENCL */
namespace cv
{
namespace ocl
{
extern const char* imgproc_columnsum;
}
}
void cv::ocl::columnSum(const oclMat& src,oclMat& dst)
{
CV_Assert(src.type() == CV_32FC1 && dst.type() == CV_32FC1 && src.size() == dst.size());
Context *clCxt = src.clCxt;
const std::string kernelName = "columnSum";
std::vector< pair<size_t, const void *> > args;
args.push_back( make_pair( sizeof(cl_mem), (void *)&src.data));
args.push_back( make_pair( sizeof(cl_mem), (void *)&dst.data));
args.push_back( make_pair( sizeof(cl_int), (void *)&src.cols));
args.push_back( make_pair( sizeof(cl_int), (void *)&src.rows));
args.push_back( make_pair( sizeof(cl_int), (void *)&src.step));
args.push_back( make_pair( sizeof(cl_int), (void *)&dst.step));
size_t globalThreads[3] = {dst.cols, dst.rows, 1};
size_t localThreads[3] = {16, 16, 1};
openCLExecuteKernel(clCxt, &imgproc_columnsum, kernelName, globalThreads, localThreads, args, src.channels(), src.depth());
}
#endif

302
modules/ocl/src/fft.cpp Normal file
View File

@ -0,0 +1,302 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iomanip>
#include "precomp.hpp"
#ifdef HAVE_CLAMDFFT
using namespace cv;
using namespace cv::ocl;
using namespace std;
#if !defined (HAVE_OPENCL)
void cv::ocl::dft(const oclMat& src, oclMat& dst, int flags) { throw_nogpu(); }
#else
#include <clAmdFft.h>
namespace cv{ namespace ocl {
enum FftType
{
C2R = 1, // complex to complex
R2C = 2, // real to opencl HERMITIAN_INTERLEAVED
C2C = 3 // opencl HERMITIAN_INTERLEAVED to real
};
struct FftPlan
{
friend void fft_setup();
friend void fft_teardown();
~FftPlan();
protected:
FftPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type);
const Size dft_size;
const int src_step, dst_step;
const int flags;
const FftType type;
clAmdFftPlanHandle plHandle;
static vector<FftPlan*> planStore;
static bool started;
static clAmdFftSetupData * setupData;
public:
// return a baked plan->
// if there is one matched plan, return it
// if not, bake a new one, put it into the planStore and return it.
static clAmdFftPlanHandle getPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type);
};
}}
bool cv::ocl::FftPlan::started = false;
vector<cv::ocl::FftPlan*> cv::ocl::FftPlan::planStore = vector<cv::ocl::FftPlan*>();
clAmdFftSetupData * cv::ocl::FftPlan::setupData = 0;
void cv::ocl::fft_setup()
{
if(FftPlan::started)
{
return;
}
FftPlan::setupData = new clAmdFftSetupData;
openCLSafeCall(clAmdFftInitSetupData( FftPlan::setupData ));
FftPlan::started = true;
}
void cv::ocl::fft_teardown()
{
if(!FftPlan::started)
{
return;
}
delete FftPlan::setupData;
for(int i = 0; i < FftPlan::planStore.size(); i ++)
{
delete FftPlan::planStore[i];
}
FftPlan::planStore.clear();
openCLSafeCall( clAmdFftTeardown( ) );
FftPlan::started = false;
}
// bake a new plan
cv::ocl::FftPlan::FftPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type)
: dft_size(_dft_size), src_step(_src_step), dst_step(_dst_step), flags(_flags), type(_type), plHandle(0)
{
if(!FftPlan::started)
{
// implicitly do fft setup
fft_setup();
}
bool is_1d_input = (_dft_size.height == 1);
int is_row_dft = flags & DFT_ROWS;
int is_scaled_dft = flags & DFT_SCALE;
int is_inverse = flags & DFT_INVERSE;
clAmdFftResultLocation place;
clAmdFftLayout inLayout;
clAmdFftLayout outLayout;
clAmdFftDim dim = is_1d_input||is_row_dft ? CLFFT_1D : CLFFT_2D;
size_t batchSize = is_row_dft?dft_size.height : 1;
size_t clLengthsIn[ 3 ] = {1, 1, 1};
size_t clStridesIn[ 3 ] = {1, 1, 1};
size_t clLengthsOut[ 3 ] = {1, 1, 1};
size_t clStridesOut[ 3 ] = {1, 1, 1};
clLengthsIn[0] = dft_size.width;
clLengthsIn[1] = is_row_dft ? 1 : dft_size.height;
clStridesIn[0] = 1;
clStridesOut[0] = 1;
switch(_type)
{
case C2C:
inLayout = CLFFT_COMPLEX_INTERLEAVED;
outLayout = CLFFT_COMPLEX_INTERLEAVED;
clStridesIn[1] = src_step / sizeof(std::complex<float>);
clStridesOut[1] = clStridesIn[1];
break;
case R2C:
CV_Assert(!is_row_dft); // this is not supported yet
inLayout = CLFFT_REAL;
outLayout = CLFFT_HERMITIAN_INTERLEAVED;
clStridesIn[1] = src_step / sizeof(float);
clStridesOut[1] = dst_step / sizeof(std::complex<float>);
break;
case C2R:
CV_Assert(!is_row_dft); // this is not supported yet
inLayout = CLFFT_HERMITIAN_INTERLEAVED;
outLayout = CLFFT_REAL;
clStridesIn[1] = src_step / sizeof(std::complex<float>);
clStridesOut[1] = dst_step / sizeof(float);
break;
default:
//std::runtime_error("does not support this convertion!");
cout << "Does not support this convertion!" << endl;
throw exception();
break;
}
clStridesIn[2] = is_row_dft ? clStridesIn[1] : dft_size.width * clStridesIn[1];
clStridesOut[2] = is_row_dft ? clStridesOut[1] : dft_size.width * clStridesOut[1];
openCLSafeCall( clAmdFftCreateDefaultPlan( &plHandle, Context::getContext()->impl->clContext, dim, clLengthsIn ) );
openCLSafeCall( clAmdFftSetResultLocation( plHandle, CLFFT_OUTOFPLACE ) );
openCLSafeCall( clAmdFftSetLayout( plHandle, inLayout, outLayout ) );
openCLSafeCall( clAmdFftSetPlanBatchSize( plHandle, batchSize ) );
openCLSafeCall( clAmdFftSetPlanInStride ( plHandle, dim, clStridesIn ) );
openCLSafeCall( clAmdFftSetPlanOutStride ( plHandle, dim, clStridesOut ) );
openCLSafeCall( clAmdFftSetPlanDistance ( plHandle, clStridesIn[ dim ], clStridesIn[ dim ]) );
openCLSafeCall( clAmdFftBakePlan( plHandle, 1, &(Context::getContext()->impl->clCmdQueue), NULL, NULL ) );
}
cv::ocl::FftPlan::~FftPlan()
{
for(int i = 0; i < planStore.size(); i ++)
{
if(planStore[i]->plHandle == plHandle)
{
planStore.erase(planStore.begin()+ i);
}
}
openCLSafeCall( clAmdFftDestroyPlan( &plHandle ) );
}
clAmdFftPlanHandle cv::ocl::FftPlan::getPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type)
{
// go through search
for(int i = 0; i < planStore.size(); i ++)
{
FftPlan * plan = planStore[i];
if(
plan->dft_size.width == _dft_size.width &&
plan->dft_size.height == _dft_size.height &&
plan->flags == _flags &&
plan->src_step == _src_step &&
plan->dst_step == _dst_step &&
plan->type == _type
)
{
return plan->plHandle;
}
}
// no baked plan is found
FftPlan *newPlan = new FftPlan(_dft_size, _src_step, _dst_step, _flags, _type);
planStore.push_back(newPlan);
return newPlan->plHandle;
}
void cv::ocl::dft(const oclMat& src, oclMat& dst, Size dft_size, int flags)
{
if(dft_size == Size(0,0))
{
dft_size = src.size();
}
// check if the given dft size is of optimal dft size
CV_Assert(dft_size.area() == getOptimalDFTSize(dft_size.area()));
// similar assertions with cuda module
CV_Assert(src.type() == CV_32F || src.type() == CV_32FC2);
// we don't support DFT_SCALE flag
CV_Assert(!(DFT_SCALE & flags));
bool is_1d_input = (src.rows == 1);
int is_row_dft = flags & DFT_ROWS;
int is_scaled_dft = flags & DFT_SCALE;
int is_inverse = flags & DFT_INVERSE;
bool is_complex_input = src.channels() == 2;
bool is_complex_output = !(flags & DFT_REAL_OUTPUT);
// We don't support real-to-real transform
CV_Assert(is_complex_input || is_complex_output);
FftType type = (FftType)(is_complex_input << 0 | is_complex_output << 1);
switch(type)
{
case C2C:
dst.create(src.rows, src.cols, CV_32FC2);
break;
case R2C:
CV_Assert(!is_row_dft); // this is not supported yet
dst.create(src.rows, src.cols/2 + 1, CV_32FC2);
break;
case C2R:
CV_Assert(dft_size.width / 2 + 1 == src.cols && dft_size.height == src.rows);
CV_Assert(!is_row_dft); // this is not supported yet
dst.create(src.rows, dft_size.width, CV_32FC1);
break;
default:
//std::runtime_error("does not support this convertion!");
cout << "Does not support this convertion!" << endl;
throw exception();
break;
}
clAmdFftPlanHandle plHandle = FftPlan::getPlan(dft_size, src.step, dst.step, flags, type);
//get the buffersize
size_t buffersize=0;
openCLSafeCall( clAmdFftGetTmpBufSize(plHandle, &buffersize ) );
//allocate the intermediate buffer
cl_mem clMedBuffer=NULL;
if (buffersize)
{
cl_int medstatus;
clMedBuffer = clCreateBuffer ( src.clCxt->impl->clContext, CL_MEM_READ_WRITE, buffersize, 0, &medstatus);
openCLSafeCall( medstatus );
}
openCLSafeCall( clAmdFftEnqueueTransform( plHandle,
is_inverse?CLFFT_BACKWARD:CLFFT_FORWARD,
1,
&src.clCxt->impl->clCmdQueue,
0, NULL, NULL,
(cl_mem*)&src.data, (cl_mem*)&dst.data, clMedBuffer ) );
openCLSafeCall( clFinish(src.clCxt->impl->clCmdQueue) );
if(clMedBuffer)
{
openCLFree(clMedBuffer);
}
}
#endif
#endif //HAVE_CLAMDFFT

161
modules/ocl/src/gemm.cpp Normal file
View File

@ -0,0 +1,161 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iomanip>
#include "precomp.hpp"
#ifdef HAVE_CLAMDBLAS
#include "clAmdBlas.h"
#if !defined (HAVE_OPENCL)
void cv::ocl::dft(const oclMat& src, oclMat& dst, int flags) { throw_nogpu(); }
#else
using namespace cv;
void cv::ocl::gemm(const oclMat& src1, const oclMat& src2, double alpha,
const oclMat& src3, double beta, oclMat& dst, int flags)
{
CV_Assert(src1.cols == src2.rows &&
(src3.empty() || src1.rows == src3.rows && src2.cols == src3.cols));
CV_Assert(!(cv::GEMM_3_T & flags)); // cv::GEMM_3_T is not supported
if(!src3.empty())
{
src3.copyTo(dst);
}
else
{
dst.create(src1.rows, src2.cols, src1.type());
dst.setTo(Scalar::all(0));
}
openCLSafeCall( clAmdBlasSetup() );
const clAmdBlasTranspose transA = (cv::GEMM_1_T & flags)?clAmdBlasTrans:clAmdBlasNoTrans;
const clAmdBlasTranspose transB = (cv::GEMM_2_T & flags)?clAmdBlasTrans:clAmdBlasNoTrans;
const clAmdBlasOrder order = clAmdBlasRowMajor;
const int M = src1.rows;
const int N = src2.cols;
const int K = src1.cols;
int lda = src1.step;
int ldb = src2.step;
int ldc = dst.step;
int offa = src1.offset;
int offb = src2.offset;
int offc = dst.offset;
switch(src1.type())
{
case CV_32FC1:
lda /= sizeof(float);
ldb /= sizeof(float);
ldc /= sizeof(float);
offa /= sizeof(float);
offb /= sizeof(float);
offc /= sizeof(float);
openCLSafeCall
(
clAmdBlasSgemmEx(order, transA, transB, M, N, K,
alpha, (const cl_mem)src1.data, offa, lda, (const cl_mem)src2.data, offb, ldb,
beta, (cl_mem)dst.data, offc, ldc, 1, &src1.clCxt->impl->clCmdQueue, 0, NULL, NULL)
);
break;
case CV_64FC1:
lda /= sizeof(double);
ldb /= sizeof(double);
ldc /= sizeof(double);
offa /= sizeof(double);
offb /= sizeof(double);
offc /= sizeof(double);
openCLSafeCall
(
clAmdBlasDgemmEx(order, transA, transB, M, N, K,
alpha, (const cl_mem)src1.data, offa, lda, (const cl_mem)src2.data, offb, ldb,
beta, (cl_mem)dst.data, offc, ldc, 1, &src1.clCxt->impl->clCmdQueue, 0, NULL, NULL)
);
break;
case CV_32FC2:
{
lda /= sizeof(std::complex<float>);
ldb /= sizeof(std::complex<float>);
ldc /= sizeof(std::complex<float>);
offa /= sizeof(std::complex<float>);
offb /= sizeof(std::complex<float>);
offc /= sizeof(std::complex<float>);
cl_float2 alpha_2 = {{alpha, 0}};
cl_float2 beta_2 = {{beta, 0}};
openCLSafeCall
(
clAmdBlasCgemmEx(order, transA, transB, M, N, K,
alpha_2, (const cl_mem)src1.data, offa, lda, (const cl_mem)src2.data, offb, ldb,
beta_2, (cl_mem)dst.data, offc, ldc, 1, &src1.clCxt->impl->clCmdQueue, 0, NULL, NULL)
);
}
break;
case CV_64FC2:
{
lda /= sizeof(std::complex<double>);
ldb /= sizeof(std::complex<double>);
ldc /= sizeof(std::complex<double>);
offa /= sizeof(std::complex<double>);
offb /= sizeof(std::complex<double>);
offc /= sizeof(std::complex<double>);
cl_double2 alpha_2 = {{alpha, 0}};
cl_double2 beta_2 = {{beta, 0}};
openCLSafeCall
(
clAmdBlasZgemmEx(order, transA, transB, M, N, K,
alpha_2, (const cl_mem)src1.data, offa, lda, (const cl_mem)src2.data, offb, ldb,
beta_2, (cl_mem)dst.data, offc, ldc, 1, &src1.clCxt->impl->clCmdQueue, 0, NULL, NULL)
);
}
break;
}
clAmdBlasTeardown();
}
#endif
#endif

View File

@ -0,0 +1,196 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, MulticoreWare Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Liu Liujun, liujun@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
__kernel void BlendLinear_C1_D0(
__global uchar *dst,
__global uchar *img1,
__global uchar *img2,
__global float *weight1,
__global float *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
if (idx < cols && idy < rows)
{
int pos = idy * istep + idx;
int wpos = idy * (wstep /sizeof(float)) + idx;
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}
__kernel void BlendLinear_C3_D0(
__global uchar *dst,
__global uchar *img1,
__global uchar *img2,
__global float *weight1,
__global float *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
int x = idx / 3;
int y = idy;
if (x < cols && y < rows)
{
int pos = idy * istep + idx;
int wpos = idy * (wstep /sizeof(float)) + x;
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}
__kernel void BlendLinear_C4_D0(
__global uchar *dst,
__global uchar *img1,
__global uchar *img2,
__global float *weight1,
__global float *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
int x = idx / 4;
int y = idy;
if (x < cols && y < rows)
{
int pos = idy * istep + idx;
int wpos = idy * (wstep /sizeof(float)) + x;
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}
__kernel void BlendLinear_C1_D5(
__global float *dst,
__global float *img1,
__global float *img2,
__global float *weight1,
__global float *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
if (idx < cols && idy < rows)
{
int pos = idy * (istep / sizeof(float)) + idx;
int wpos = idy * (wstep /sizeof(float)) + idx;
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}
__kernel void BlendLinear_C3_D5(
__global float *dst,
__global float *img1,
__global float *img2,
__global float *weight1,
__global float *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
int x = idx / 3;
int y = idy;
if (x < cols && y < rows)
{
int pos = idy * (istep / sizeof(float)) + idx;
int wpos = idy * (wstep /sizeof(float)) + x;
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}
__kernel void BlendLinear_C4_D5(
__global float *dst,
__global float *img1,
__global float *img2,
__global float *weight1,
__global float *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
int x = idx / 4;
int y = idy;
if (x < cols && y < rows)
{
int pos = idy * (istep / sizeof(float)) + idx;
int wpos = idy * (wstep /sizeof(float)) + x;
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}

View File

@ -0,0 +1,80 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Chunpeng Zhang chunpeng@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#pragma OPENCL EXTENSION cl_amd_printf : enable
#if defined (__ATI__)
#pragma OPENCL EXTENSION cl_amd_fp64:enable
#elif defined (__NVIDIA__)
#pragma OPENCL EXTENSION cl_khr_fp64:enable
#endif
////////////////////////////////////////////////////////////////////
///////////////////////// columnSum ////////////////////////////////
////////////////////////////////////////////////////////////////////
/// CV_32FC1
__kernel void columnSum_C1_D5(__global float* src,__global float* dst,int srcCols,int srcRows,int srcStep,int dstStep)
{
const int x = get_global_id(0);
srcStep >>= 2;
dstStep >>= 2;
if (x < srcCols)
{
int srcIdx = x ;
int dstIdx = x ;
float sum = 0;
for (int y = 0; y < srcRows; ++y)
{
sum += src[srcIdx];
dst[dstIdx] = sum;
srcIdx += srcStep;
dstIdx += dstStep;
}
}
}

View File

@ -0,0 +1,824 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#pragma OPENCL EXTENSION cl_amd_printf : enable
#if defined (__ATI__)
#pragma OPENCL EXTENSION cl_amd_fp64:enable
#elif defined (__NVIDIA__)
#pragma OPENCL EXTENSION cl_khr_fp64:enable
#endif
#if !defined(USE_SQR_INTEGRAL) && (defined (__ATI__) || defined (__NVIDIA__))
#define TYPE_IMAGE_SQSUM double
#else
#define TYPE_IMAGE_SQSUM ulong
#endif
//////////////////////////////////////////////////
// utilities
#define SQSUMS_PTR(ox, oy) mad24(gidy + oy, img_sqsums_step, gidx + img_sqsums_offset + ox)
#define SUMS_PTR(ox, oy) mad24(gidy + oy, img_sums_step, gidx + img_sums_offset + ox)
// normAcc* are accurate normalization routines which make GPU matchTemplate
// consistent with CPU one
float normAcc(float num, float denum)
{
if(fabs(num) < denum)
{
return num / denum;
}
if(fabs(num) < denum * 1.125f)
{
return num > 0 ? 1 : -1;
}
return 0;
}
float normAcc_SQDIFF(float num, float denum)
{
if(fabs(num) < denum)
{
return num / denum;
}
if(fabs(num) < denum * 1.125f)
{
return num > 0 ? 1 : -1;
}
return 1;
}
//////////////////////////////////////////////////////////////////////
// normalize
__kernel
void normalizeKernel_C1_D0
(
__global const TYPE_IMAGE_SQSUM * img_sqsums,
__global float * res,
ulong tpl_sqsum,
int res_rows,
int res_cols,
int tpl_rows,
int tpl_cols,
int img_sqsums_offset,
int img_sqsums_step,
int res_offset,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
img_sqsums_step /= sizeof(*img_sqsums);
img_sqsums_offset /= sizeof(*img_sqsums);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
float image_sqsum_ = (float)(
(img_sqsums[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums[SQSUMS_PTR(0, tpl_rows)] - img_sqsums[SQSUMS_PTR(0, 0)]));
res[res_idx] = normAcc(res[res_idx], sqrt(image_sqsum_ * tpl_sqsum));
}
}
__kernel
void matchTemplate_Prepared_SQDIFF_C1_D0
(
__global const TYPE_IMAGE_SQSUM * img_sqsums,
__global float * res,
ulong tpl_sqsum,
int res_rows,
int res_cols,
int tpl_rows,
int tpl_cols,
int img_sqsums_offset,
int img_sqsums_step,
int res_offset,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
img_sqsums_step /= sizeof(*img_sqsums);
img_sqsums_offset /= sizeof(*img_sqsums);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
float image_sqsum_ = (float)(
(img_sqsums[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums[SQSUMS_PTR(0, tpl_rows)] - img_sqsums[SQSUMS_PTR(0, 0)]));
res[res_idx] = image_sqsum_ - 2.f * res[res_idx] + tpl_sqsum;
}
}
__kernel
void matchTemplate_Prepared_SQDIFF_NORMED_C1_D0
(
__global const TYPE_IMAGE_SQSUM * img_sqsums,
__global float * res,
ulong tpl_sqsum,
int res_rows,
int res_cols,
int tpl_rows,
int tpl_cols,
int img_sqsums_offset,
int img_sqsums_step,
int res_offset,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
img_sqsums_step /= sizeof(*img_sqsums);
img_sqsums_offset /= sizeof(*img_sqsums);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
float image_sqsum_ = (float)(
(img_sqsums[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums[SQSUMS_PTR(0, tpl_rows)] - img_sqsums[SQSUMS_PTR(0, 0)]));
res[res_idx] = normAcc_SQDIFF(image_sqsum_ - 2.f * res[res_idx] + tpl_sqsum,
sqrt(image_sqsum_ * tpl_sqsum));
}
}
//////////////////////////////////////////////////
// SQDIFF
__kernel
void matchTemplate_Naive_SQDIFF_C1_D0
(
__global const uchar * img,
__global const uchar * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
int delta;
int sum = 0;
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const uchar * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const uchar * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
delta = img_ptr[j] - tpl_ptr[j];
sum = mad24(delta, delta, sum);
}
}
res[res_idx] = sum;
}
}
__kernel
void matchTemplate_Naive_SQDIFF_C1_D5
(
__global const float * img,
__global const float * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
float delta;
float sum = 0;
img_step /= sizeof(*img);
img_offset /= sizeof(*img);
tpl_step /= sizeof(*tpl);
tpl_offset /= sizeof(*tpl);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const float * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const float * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
delta = img_ptr[j] - tpl_ptr[j];
sum = mad(delta, delta, sum);
}
}
res[res_idx] = sum;
}
}
__kernel
void matchTemplate_Naive_SQDIFF_C4_D0
(
__global const uchar4 * img,
__global const uchar4 * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
int4 delta;
int4 sum = (int4)(0, 0, 0, 0);
img_step /= sizeof(*img);
img_offset /= sizeof(*img);
tpl_step /= sizeof(*tpl);
tpl_offset /= sizeof(*tpl);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const uchar4 * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const uchar4 * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
//delta = convert_int4(img_ptr[j] - tpl_ptr[j]); // this alternative is incorrect
delta.x = img_ptr[j].x - tpl_ptr[j].x;
delta.y = img_ptr[j].y - tpl_ptr[j].y;
delta.z = img_ptr[j].z - tpl_ptr[j].z;
delta.w = img_ptr[j].w - tpl_ptr[j].w;
sum = mad24(delta, delta, sum);
}
}
res[res_idx] = sum.x + sum.y + sum.z + sum.w;
}
}
__kernel
void matchTemplate_Naive_SQDIFF_C4_D5
(
__global const float4 * img,
__global const float4 * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
float4 delta;
float4 sum = (float4)(0, 0, 0, 0);
img_step /= sizeof(*img);
img_offset /= sizeof(*img);
tpl_step /= sizeof(*tpl);
tpl_offset /= sizeof(*tpl);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const float4 * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const float4 * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
//delta = convert_int4(img_ptr[j] - tpl_ptr[j]); // this alternative is incorrect
delta.x = img_ptr[j].x - tpl_ptr[j].x;
delta.y = img_ptr[j].y - tpl_ptr[j].y;
delta.z = img_ptr[j].z - tpl_ptr[j].z;
delta.w = img_ptr[j].w - tpl_ptr[j].w;
sum = mad(delta, delta, sum);
}
}
res[res_idx] = sum.x + sum.y + sum.z + sum.w;
}
}
//////////////////////////////////////////////////
// CCORR
__kernel
void matchTemplate_Naive_CCORR_C1_D0
(
__global const uchar * img,
__global const uchar * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
int sum = 0;
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const uchar * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const uchar * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
sum = mad24(img_ptr[j], tpl_ptr[j], sum);
}
}
res[res_idx] = sum;
}
}
__kernel
void matchTemplate_Naive_CCORR_C1_D5
(
__global const float * img,
__global const float * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
float sum = 0;
img_step /= sizeof(*img);
img_offset /= sizeof(*img);
tpl_step /= sizeof(*tpl);
tpl_offset /= sizeof(*tpl);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const float * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const float * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
sum = mad(img_ptr[j], tpl_ptr[j], sum);
}
}
res[res_idx] = sum;
}
}
__kernel
void matchTemplate_Naive_CCORR_C4_D0
(
__global const uchar4 * img,
__global const uchar4 * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
int4 sum = (int4)(0, 0, 0, 0);
img_step /= sizeof(*img);
img_offset /= sizeof(*img);
tpl_step /= sizeof(*tpl);
tpl_offset /= sizeof(*tpl);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const uchar4 * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const uchar4 * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
sum = mad24(convert_int4(img_ptr[j]), convert_int4(tpl_ptr[j]), sum);
}
}
res[res_idx] = sum.x + sum.y + sum.z + sum.w;
}
}
__kernel
void matchTemplate_Naive_CCORR_C4_D5
(
__global const float4 * img,
__global const float4 * tpl,
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int img_offset,
int tpl_offset,
int res_offset,
int img_step,
int tpl_step,
int res_step
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
int i,j;
float4 sum = (float4)(0, 0, 0, 0);
img_step /= sizeof(*img);
img_offset /= sizeof(*img);
tpl_step /= sizeof(*tpl);
tpl_offset /= sizeof(*tpl);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
for(i = 0; i < tpl_rows; i ++)
{
// get specific rows of img data
__global const float4 * img_ptr = img + mad24(gidy + i, img_step, gidx + img_offset);
__global const float4 * tpl_ptr = tpl + mad24(i, tpl_step, tpl_offset);
for(j = 0; j < tpl_cols; j ++)
{
sum = mad(convert_float4(img_ptr[j]), convert_float4(tpl_ptr[j]), sum);
}
}
res[res_idx] = sum.x + sum.y + sum.z + sum.w;
}
}
//////////////////////////////////////////////////
// CCOFF
__kernel
void matchTemplate_Prepared_CCOFF_C1_D0
(
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int res_offset,
int res_step,
__global const uint * img_sums,
int img_sums_offset,
int img_sums_step,
float tpl_sum
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
img_sums_offset /= sizeof(*img_sums);
img_sums_step /= sizeof(*img_sums);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
float sum = (float)(
(img_sums[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums[SUMS_PTR(tpl_cols, 0)])
- (img_sums[SUMS_PTR(0, tpl_rows)] - img_sums[SUMS_PTR(0, 0)]));
res[res_idx] -= sum * tpl_sum;
}
}
__kernel
void matchTemplate_Prepared_CCOFF_C4_D0
(
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int res_offset,
int res_step,
__global const uint * img_sums_c0,
__global const uint * img_sums_c1,
__global const uint * img_sums_c2,
__global const uint * img_sums_c3,
int img_sums_offset,
int img_sums_step,
float tpl_sum_c0,
float tpl_sum_c1,
float tpl_sum_c2,
float tpl_sum_c3
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
img_sums_offset /= sizeof(*img_sums_c0);
img_sums_step /= sizeof(*img_sums_c0);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
float ccorr = res[res_idx];
ccorr -= tpl_sum_c0*(float)(
(img_sums_c0[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c0[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c0[SUMS_PTR(0, tpl_rows)] - img_sums_c0[SUMS_PTR(0, 0)]));
ccorr -= tpl_sum_c1*(float)(
(img_sums_c1[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c1[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c1[SUMS_PTR(0, tpl_rows)] - img_sums_c1[SUMS_PTR(0, 0)]));
ccorr -= tpl_sum_c2*(float)(
(img_sums_c2[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c2[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c2[SUMS_PTR(0, tpl_rows)] - img_sums_c2[SUMS_PTR(0, 0)]));
ccorr -= tpl_sum_c3*(float)(
(img_sums_c3[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c3[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c3[SUMS_PTR(0, tpl_rows)] - img_sums_c3[SUMS_PTR(0, 0)]));
res[res_idx] = ccorr;
}
}
__kernel
void matchTemplate_Prepared_CCOFF_NORMED_C1_D0
(
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int res_offset,
int res_step,
float weight,
__global const uint * img_sums,
int img_sums_offset,
int img_sums_step,
__global const TYPE_IMAGE_SQSUM * img_sqsums,
int img_sqsums_offset,
int img_sqsums_step,
float tpl_sum,
float tpl_sqsum
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
img_sqsums_step /= sizeof(*img_sqsums);
img_sqsums_offset /= sizeof(*img_sqsums);
img_sums_offset /= sizeof(*img_sums);
img_sums_step /= sizeof(*img_sums);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
float image_sum_ = (float)(
(img_sums[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums[SUMS_PTR(tpl_cols, 0)])
- (img_sums[SUMS_PTR(0, tpl_rows)] - img_sums[SUMS_PTR(0, 0)]));
float image_sqsum_ = (float)(
(img_sqsums[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums[SQSUMS_PTR(0, tpl_rows)] - img_sqsums[SQSUMS_PTR(0, 0)]));
res[res_idx] = normAcc(res[res_idx] - image_sum_ * tpl_sum,
sqrt(tpl_sqsum * (image_sqsum_ - weight * image_sum_ * image_sum_)));
}
}
__kernel
void matchTemplate_Prepared_CCOFF_NORMED_C4_D0
(
__global float * res,
int img_rows,
int img_cols,
int tpl_rows,
int tpl_cols,
int res_rows,
int res_cols,
int res_offset,
int res_step,
float weight,
__global const uint * img_sums_c0,
__global const uint * img_sums_c1,
__global const uint * img_sums_c2,
__global const uint * img_sums_c3,
int img_sums_offset,
int img_sums_step,
__global const TYPE_IMAGE_SQSUM * img_sqsums_c0,
__global const TYPE_IMAGE_SQSUM * img_sqsums_c1,
__global const TYPE_IMAGE_SQSUM * img_sqsums_c2,
__global const TYPE_IMAGE_SQSUM * img_sqsums_c3,
int img_sqsums_offset,
int img_sqsums_step,
float tpl_sum_c0,
float tpl_sum_c1,
float tpl_sum_c2,
float tpl_sum_c3,
float tpl_sqsum
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
img_sqsums_step /= sizeof(*img_sqsums_c0);
img_sqsums_offset /= sizeof(*img_sqsums_c0);
img_sums_offset /= sizeof(*img_sums_c0);
img_sums_step /= sizeof(*img_sums_c0);
res_step /= sizeof(*res);
res_offset /= sizeof(*res);
int res_idx = mad24(gidy, res_step, res_offset + gidx);
if(gidx < res_cols && gidy < res_rows)
{
float image_sum_c0 = (float)(
(img_sums_c0[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c0[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c0[SUMS_PTR(0, tpl_rows)] - img_sums_c0[SUMS_PTR(0, 0)]));
float image_sum_c1 = (float)(
(img_sums_c1[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c1[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c1[SUMS_PTR(0, tpl_rows)] - img_sums_c1[SUMS_PTR(0, 0)]));
float image_sum_c2 = (float)(
(img_sums_c2[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c2[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c2[SUMS_PTR(0, tpl_rows)] - img_sums_c2[SUMS_PTR(0, 0)]));
float image_sum_c3 = (float)(
(img_sums_c3[SUMS_PTR(tpl_cols, tpl_rows)] - img_sums_c3[SUMS_PTR(tpl_cols, 0)])
- (img_sums_c3[SUMS_PTR(0, tpl_rows)] - img_sums_c3[SUMS_PTR(0, 0)]));
float image_sqsum_c0 = (float)(
(img_sqsums_c0[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums_c0[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums_c0[SQSUMS_PTR(0, tpl_rows)] - img_sqsums_c0[SQSUMS_PTR(0, 0)]));
float image_sqsum_c1 = (float)(
(img_sqsums_c1[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums_c1[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums_c1[SQSUMS_PTR(0, tpl_rows)] - img_sqsums_c1[SQSUMS_PTR(0, 0)]));
float image_sqsum_c2 = (float)(
(img_sqsums_c2[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums_c2[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums_c2[SQSUMS_PTR(0, tpl_rows)] - img_sqsums_c2[SQSUMS_PTR(0, 0)]));
float image_sqsum_c3 = (float)(
(img_sqsums_c3[SQSUMS_PTR(tpl_cols, tpl_rows)] - img_sqsums_c3[SQSUMS_PTR(tpl_cols, 0)]) -
(img_sqsums_c3[SQSUMS_PTR(0, tpl_rows)] - img_sqsums_c3[SQSUMS_PTR(0, 0)]));
float num = res[res_idx] -
image_sum_c0 * tpl_sum_c0 -
image_sum_c1 * tpl_sum_c1 -
image_sum_c2 * tpl_sum_c2 -
image_sum_c3 * tpl_sum_c3;
float denum = sqrt( tpl_sqsum * (
image_sqsum_c0 - weight * image_sum_c0 * image_sum_c0 +
image_sqsum_c1 - weight * image_sum_c1 * image_sum_c1 +
image_sqsum_c2 - weight * image_sum_c2 * image_sum_c2 +
image_sqsum_c3 - weight * image_sum_c0 * image_sum_c3)
);
res[res_idx] = normAcc(num, denum);
}
}

View File

@ -0,0 +1,500 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Dachuan Zhao, dachuan@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#pragma OPENCL EXTENSION cl_amd_printf : enable
uchar round_uchar_uchar(uchar v)
{
return v;
}
uchar round_uchar_int(int v)
{
return (uchar)((uint)v <= 255 ? v : v > 0 ? 255 : 0);
}
uchar round_uchar_float(float v)
{
if(v - convert_int_sat_rte(v) > 1e-6 || v - convert_int_sat_rte(v) < -1e-6)
{
if(((int)v + 1) - (v + 0.5f) < 1e-6 && ((int)v + 1) - (v + 0.5f) > -1e-6)
{
v = (int)v + 0.51f;
}
}
int iv = convert_int_sat_rte(v);
return round_uchar_int(iv);
}
uchar4 round_uchar4_uchar4(uchar4 v)
{
return v;
}
uchar4 round_uchar4_int4(int4 v)
{
uchar4 result;
result.x = (uchar)(v.x <= 255 ? v.x : v.x > 0 ? 255 : 0);
result.y = (uchar)(v.y <= 255 ? v.y : v.y > 0 ? 255 : 0);
result.z = (uchar)(v.z <= 255 ? v.z : v.z > 0 ? 255 : 0);
result.w = (uchar)(v.w <= 255 ? v.w : v.w > 0 ? 255 : 0);
return result;
}
uchar4 round_uchar4_float4(float4 v)
{
if(v.x - convert_int_sat_rte(v.x) > 1e-6 || v.x - convert_int_sat_rte(v.x) < -1e-6)
{
if(((int)(v.x) + 1) - (v.x + 0.5f) < 1e-6 && ((int)(v.x) + 1) - (v.x + 0.5f) > -1e-6)
{
v.x = (int)(v.x) + 0.51f;
}
}
if(v.y - convert_int_sat_rte(v.y) > 1e-6 || v.y - convert_int_sat_rte(v.y) < -1e-6)
{
if(((int)(v.y) + 1) - (v.y + 0.5f) < 1e-6 && ((int)(v.y) + 1) - (v.y + 0.5f) > -1e-6)
{
v.y = (int)(v.y) + 0.51f;
}
}
if(v.z - convert_int_sat_rte(v.z) > 1e-6 || v.z - convert_int_sat_rte(v.z) < -1e-6)
{
if(((int)(v.z) + 1) - (v.z + 0.5f) < 1e-6 && ((int)(v.z) + 1) - (v.z + 0.5f) > -1e-6)
{
v.z = (int)(v.z) + 0.51f;
}
}
if(v.w - convert_int_sat_rte(v.w) > 1e-6 || v.w - convert_int_sat_rte(v.w) < -1e-6)
{
if(((int)(v.w) + 1) - (v.w + 0.5f) < 1e-6 && ((int)(v.w) + 1) - (v.w + 0.5f) > -1e-6)
{
v.w = (int)(v.w) + 0.51f;
}
}
int4 iv = convert_int4_sat_rte(v);
return round_uchar4_int4(iv);
}
int idx_row_low(int y, int last_row)
{
if(y < 0)
{
y = -y;
}
return y % (last_row + 1);
}
int idx_row_high(int y, int last_row)
{
int i;
int j;
if(last_row - y < 0)
{
i = (y - last_row);
}
else
{
i = (last_row - y);
}
if(last_row - i < 0)
{
j = i - last_row;
}
else
{
j = last_row - i;
}
return j % (last_row + 1);
}
int idx_row(int y, int last_row)
{
return idx_row_low(idx_row_high(y, last_row), last_row);
}
int idx_col_low(int x, int last_col)
{
if(x < 0)
{
x = -x;
}
return x % (last_col + 1);
}
int idx_col_high(int x, int last_col)
{
int i;
int j;
if(last_col - x < 0)
{
i = (x - last_col);
}
else
{
i = (last_col - x);
}
if(last_col - i < 0)
{
j = i - last_col;
}
else
{
j = last_col - i;
}
return j % (last_col + 1);
}
int idx_col(int x, int last_col)
{
return idx_col_low(idx_col_high(x, last_col), last_col);
}
__kernel void pyrDown_C1_D0(__global uchar * srcData, int srcStep, int srcOffset, int srcRows, int srcCols, __global uchar *dst, int dstStep, int dstOffset, int dstCols)
{
const int x = get_group_id(0) * get_local_size(0) + get_local_id(0);
const int y = get_group_id(1);
__local float smem[256 + 4];
float sum;
const int src_y = 2*y;
const int last_row = srcRows - 1;
const int last_col = srcCols - 1;
sum = 0;
sum = sum + 0.0625f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y - 2, last_row) * srcStep))[idx_col(x, last_col)]);
sum = sum + 0.25f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y - 1, last_row) * srcStep))[idx_col(x, last_col)]);
sum = sum + 0.375f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y , last_row) * srcStep))[idx_col(x, last_col)]);
sum = sum + 0.25f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y + 1, last_row) * srcStep))[idx_col(x, last_col)]);
sum = sum + 0.0625f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y + 2, last_row) * srcStep))[idx_col(x, last_col)]);
smem[2 + get_local_id(0)] = sum;
if (get_local_id(0) < 2)
{
const int left_x = x - 2;
sum = 0;
sum = sum + 0.0625f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y - 2, last_row) * srcStep))[idx_col(left_x, last_col)]);
sum = sum + 0.25f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y - 1, last_row) * srcStep))[idx_col(left_x, last_col)]);
sum = sum + 0.375f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y , last_row) * srcStep))[idx_col(left_x, last_col)]);
sum = sum + 0.25f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y + 1, last_row) * srcStep))[idx_col(left_x, last_col)]);
sum = sum + 0.0625f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y + 2, last_row) * srcStep))[idx_col(left_x, last_col)]);
smem[get_local_id(0)] = sum;
}
if (get_local_id(0) > 253)
{
const int right_x = x + 2;
sum = 0;
sum = sum + 0.0625f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y - 2, last_row) * srcStep))[idx_col(right_x, last_col)]);
sum = sum + 0.25f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y - 1, last_row) * srcStep))[idx_col(right_x, last_col)]);
sum = sum + 0.375f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y , last_row) * srcStep))[idx_col(right_x, last_col)]);
sum = sum + 0.25f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y + 1, last_row) * srcStep))[idx_col(right_x, last_col)]);
sum = sum + 0.0625f * round_uchar_uchar(((__global uchar*)((__global char*)srcData + idx_row(src_y + 2, last_row) * srcStep))[idx_col(right_x, last_col)]);
smem[4 + get_local_id(0)] = sum;
}
barrier(CLK_LOCAL_MEM_FENCE);
if (get_local_id(0) < 128)
{
const int tid2 = get_local_id(0) * 2;
sum = 0;
sum = sum + 0.0625f * smem[2 + tid2 - 2];
sum = sum + 0.25f * smem[2 + tid2 - 1];
sum = sum + 0.375f * smem[2 + tid2 ];
sum = sum + 0.25f * smem[2 + tid2 + 1];
sum = sum + 0.0625f * smem[2 + tid2 + 2];
const int dst_x = (get_group_id(0) * get_local_size(0) + tid2) / 2;
if (dst_x < dstCols)
dst[y * dstStep + dst_x] = round_uchar_float(sum);
}
}
__kernel void pyrDown_C4_D0(__global uchar4 * srcData, int srcStep, int srcOffset, int srcRows, int srcCols, __global uchar4 *dst, int dstStep, int dstOffset, int dstCols)
{
const int x = get_group_id(0) * get_local_size(0) + get_local_id(0);
const int y = get_group_id(1);
__local float4 smem[256 + 4];
float4 sum;
const int src_y = 2*y;
const int last_row = srcRows - 1;
const int last_col = srcCols - 1;
float4 co1 = (float4)(0.375f, 0.375f, 0.375f, 0.375f);
float4 co2 = (float4)(0.25f, 0.25f, 0.25f, 0.25f);
float4 co3 = (float4)(0.0625f, 0.0625f, 0.0625f, 0.0625f);
sum = 0;
sum = sum + co3 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y - 2, last_row) * srcStep / 4))[idx_col(x, last_col)]));
sum = sum + co2 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y - 1, last_row) * srcStep / 4))[idx_col(x, last_col)]));
sum = sum + co1 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y , last_row) * srcStep / 4))[idx_col(x, last_col)]));
sum = sum + co2 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y + 1, last_row) * srcStep / 4))[idx_col(x, last_col)]));
sum = sum + co3 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y + 2, last_row) * srcStep / 4))[idx_col(x, last_col)]));
smem[2 + get_local_id(0)] = sum;
if (get_local_id(0) < 2)
{
const int left_x = x - 2;
sum = 0;
sum = sum + co3 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y - 2, last_row) * srcStep / 4))[idx_col(left_x, last_col)]));
sum = sum + co2 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y - 1, last_row) * srcStep / 4))[idx_col(left_x, last_col)]));
sum = sum + co1 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y , last_row) * srcStep / 4))[idx_col(left_x, last_col)]));
sum = sum + co2 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y + 1, last_row) * srcStep / 4))[idx_col(left_x, last_col)]));
sum = sum + co3 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y + 2, last_row) * srcStep / 4))[idx_col(left_x, last_col)]));
smem[get_local_id(0)] = sum;
}
if (get_local_id(0) > 253)
{
const int right_x = x + 2;
sum = 0;
sum = sum + co3 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y - 2, last_row) * srcStep / 4))[idx_col(right_x, last_col)]));
sum = sum + co2 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y - 1, last_row) * srcStep / 4))[idx_col(right_x, last_col)]));
sum = sum + co1 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y , last_row) * srcStep / 4))[idx_col(right_x, last_col)]));
sum = sum + co2 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y + 1, last_row) * srcStep / 4))[idx_col(right_x, last_col)]));
sum = sum + co3 * convert_float4(round_uchar4_uchar4(((__global uchar4*)((__global char4*)srcData + idx_row(src_y + 2, last_row) * srcStep / 4))[idx_col(right_x, last_col)]));
smem[4 + get_local_id(0)] = sum;
}
barrier(CLK_LOCAL_MEM_FENCE);
if (get_local_id(0) < 128)
{
const int tid2 = get_local_id(0) * 2;
sum = 0;
sum = sum + co3 * smem[2 + tid2 - 2];
sum = sum + co2 * smem[2 + tid2 - 1];
sum = sum + co1 * smem[2 + tid2 ];
sum = sum + co2 * smem[2 + tid2 + 1];
sum = sum + co3 * smem[2 + tid2 + 2];
const int dst_x = (get_group_id(0) * get_local_size(0) + tid2) / 2;
if (dst_x < dstCols)
dst[y * dstStep / 4 + dst_x] = round_uchar4_float4(sum);
}
}
__kernel void pyrDown_C1_D5(__global float * srcData, int srcStep, int srcOffset, int srcRows, int srcCols, __global float *dst, int dstStep, int dstOffset, int dstCols)
{
const int x = get_group_id(0) * get_local_size(0) + get_local_id(0);
const int y = get_group_id(1);
__local float smem[256 + 4];
float sum;
const int src_y = 2*y;
const int last_row = srcRows - 1;
const int last_col = srcCols - 1;
sum = 0;
sum = sum + 0.0625f * ((__global float*)((__global char*)srcData + idx_row(src_y - 2, last_row) * srcStep))[idx_col(x, last_col)];
sum = sum + 0.25f * ((__global float*)((__global char*)srcData + idx_row(src_y - 1, last_row) * srcStep))[idx_col(x, last_col)];
sum = sum + 0.375f * ((__global float*)((__global char*)srcData + idx_row(src_y , last_row) * srcStep))[idx_col(x, last_col)];
sum = sum + 0.25f * ((__global float*)((__global char*)srcData + idx_row(src_y + 1, last_row) * srcStep))[idx_col(x, last_col)];
sum = sum + 0.0625f * ((__global float*)((__global char*)srcData + idx_row(src_y + 2, last_row) * srcStep))[idx_col(x, last_col)];
smem[2 + get_local_id(0)] = sum;
if (get_local_id(0) < 2)
{
const int left_x = x - 2;
sum = 0;
sum = sum + 0.0625f * ((__global float*)((__global char*)srcData + idx_row(src_y - 2, last_row) * srcStep))[idx_col(left_x, last_col)];
sum = sum + 0.25f * ((__global float*)((__global char*)srcData + idx_row(src_y - 1, last_row) * srcStep))[idx_col(left_x, last_col)];
sum = sum + 0.375f * ((__global float*)((__global char*)srcData + idx_row(src_y , last_row) * srcStep))[idx_col(left_x, last_col)];
sum = sum + 0.25f * ((__global float*)((__global char*)srcData + idx_row(src_y + 1, last_row) * srcStep))[idx_col(left_x, last_col)];
sum = sum + 0.0625f * ((__global float*)((__global char*)srcData + idx_row(src_y + 2, last_row) * srcStep))[idx_col(left_x, last_col)];
smem[get_local_id(0)] = sum;
}
if (get_local_id(0) > 253)
{
const int right_x = x + 2;
sum = 0;
sum = sum + 0.0625f * ((__global float*)((__global char*)srcData + idx_row(src_y - 2, last_row) * srcStep))[idx_col(right_x, last_col)];
sum = sum + 0.25f * ((__global float*)((__global char*)srcData + idx_row(src_y - 1, last_row) * srcStep))[idx_col(right_x, last_col)];
sum = sum + 0.375f * ((__global float*)((__global char*)srcData + idx_row(src_y , last_row) * srcStep))[idx_col(right_x, last_col)];
sum = sum + 0.25f * ((__global float*)((__global char*)srcData + idx_row(src_y + 1, last_row) * srcStep))[idx_col(right_x, last_col)];
sum = sum + 0.0625f * ((__global float*)((__global char*)srcData + idx_row(src_y + 2, last_row) * srcStep))[idx_col(right_x, last_col)];
smem[4 + get_local_id(0)] = sum;
}
barrier(CLK_LOCAL_MEM_FENCE);
if (get_local_id(0) < 128)
{
const int tid2 = get_local_id(0) * 2;
sum = 0;
sum = sum + 0.0625f * smem[2 + tid2 - 2];
sum = sum + 0.25f * smem[2 + tid2 - 1];
sum = sum + 0.375f * smem[2 + tid2 ];
sum = sum + 0.25f * smem[2 + tid2 + 1];
sum = sum + 0.0625f * smem[2 + tid2 + 2];
const int dst_x = (get_group_id(0) * get_local_size(0) + tid2) / 2;
if (dst_x < dstCols)
dst[y * dstStep / 4 + dst_x] = sum;
}
}
__kernel void pyrDown_C4_D5(__global float4 * srcData, int srcStep, int srcOffset, int srcRows, int srcCols, __global float4 *dst, int dstStep, int dstOffset, int dstCols)
{
const int x = get_group_id(0) * get_local_size(0) + get_local_id(0);
const int y = get_group_id(1);
__local float4 smem[256 + 4];
float4 sum;
const int src_y = 2*y;
const int last_row = srcRows - 1;
const int last_col = srcCols - 1;
float4 co1 = (float4)(0.375f, 0.375f, 0.375f, 0.375f);
float4 co2 = (float4)(0.25f, 0.25f, 0.25f, 0.25f);
float4 co3 = (float4)(0.0625f, 0.0625f, 0.0625f, 0.0625f);
sum = 0;
sum = sum + co3 * ((__global float4*)((__global char4*)srcData + idx_row(src_y - 2, last_row) * srcStep / 4))[idx_col(x, last_col)];
sum = sum + co2 * ((__global float4*)((__global char4*)srcData + idx_row(src_y - 1, last_row) * srcStep / 4))[idx_col(x, last_col)];
sum = sum + co1 * ((__global float4*)((__global char4*)srcData + idx_row(src_y , last_row) * srcStep / 4))[idx_col(x, last_col)];
sum = sum + co2 * ((__global float4*)((__global char4*)srcData + idx_row(src_y + 1, last_row) * srcStep / 4))[idx_col(x, last_col)];
sum = sum + co3 * ((__global float4*)((__global char4*)srcData + idx_row(src_y + 2, last_row) * srcStep / 4))[idx_col(x, last_col)];
smem[2 + get_local_id(0)] = sum;
if (get_local_id(0) < 2)
{
const int left_x = x - 2;
sum = 0;
sum = sum + co3 * ((__global float4*)((__global char4*)srcData + idx_row(src_y - 2, last_row) * srcStep / 4))[idx_col(left_x, last_col)];
sum = sum + co2 * ((__global float4*)((__global char4*)srcData + idx_row(src_y - 1, last_row) * srcStep / 4))[idx_col(left_x, last_col)];
sum = sum + co1 * ((__global float4*)((__global char4*)srcData + idx_row(src_y , last_row) * srcStep / 4))[idx_col(left_x, last_col)];
sum = sum + co2 * ((__global float4*)((__global char4*)srcData + idx_row(src_y + 1, last_row) * srcStep / 4))[idx_col(left_x, last_col)];
sum = sum + co3 * ((__global float4*)((__global char4*)srcData + idx_row(src_y + 2, last_row) * srcStep / 4))[idx_col(left_x, last_col)];
smem[get_local_id(0)] = sum;
}
if (get_local_id(0) > 253)
{
const int right_x = x + 2;
sum = 0;
sum = sum + co3 * ((__global float4*)((__global char4*)srcData + idx_row(src_y - 2, last_row) * srcStep / 4))[idx_col(right_x, last_col)];
sum = sum + co2 * ((__global float4*)((__global char4*)srcData + idx_row(src_y - 1, last_row) * srcStep / 4))[idx_col(right_x, last_col)];
sum = sum + co1 * ((__global float4*)((__global char4*)srcData + idx_row(src_y , last_row) * srcStep / 4))[idx_col(right_x, last_col)];
sum = sum + co2 * ((__global float4*)((__global char4*)srcData + idx_row(src_y + 1, last_row) * srcStep / 4))[idx_col(right_x, last_col)];
sum = sum + co3 * ((__global float4*)((__global char4*)srcData + idx_row(src_y + 2, last_row) * srcStep / 4))[idx_col(right_x, last_col)];
smem[4 + get_local_id(0)] = sum;
}
barrier(CLK_LOCAL_MEM_FENCE);
if (get_local_id(0) < 128)
{
const int tid2 = get_local_id(0) * 2;
sum = 0;
sum = sum + co3 * smem[2 + tid2 - 2];
sum = sum + co2 * smem[2 + tid2 - 1];
sum = sum + co1 * smem[2 + tid2 ];
sum = sum + co2 * smem[2 + tid2 + 1];
sum = sum + co3 * smem[2 + tid2 + 2];
const int dst_x = (get_group_id(0) * get_local_size(0) + tid2) / 2;
if (dst_x < dstCols)
dst[y * dstStep / 16 + dst_x] = sum;
}
}

Some files were not shown because too many files have changed in this diff Show More