diff --git a/.gitattributes b/.gitattributes index cd4359ba3..35df6ca2f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,42 +1,58 @@ -.git* export-ignore - * text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4 -*.py text -*.cpp text -*.hpp text -*.cxx text -*.hxx text -*.mm text -*.c text -*.h text -*.i text -*.js text -*.java text -*.scala text -*.cu text -*.cl text -*.css_t text -*.qrc text -*.qss text -*.S text -*.rst text -*.tex text -*.sty text +.git* text export-ignore -*.aidl text -*.mk text +*.aidl text +*.appxmanifest text +*.bib text +*.c text +*.cl text +*.conf text +*.cpp text +*.css_t text +*.cu text +*.cxx text +*.def text +*.filelist text +*.h text +*.hpp text +*.htm text +*.html text +*.hxx text +*.i text +*.idl text +*.java text +*.js text +*.mk text +*.mm text +*.plist text +*.properties text +*.py text +*.qrc text +*.qss text +*.S text +*.sbt text +*.scala text +*.sty text +*.tex text +*.txt text +*.xaml text + +# reST underlines/overlines can look like conflict markers +*.rst text conflict-marker-size=80 *.cmake text whitespace=tabwidth=2 *.cmakein text whitespace=tabwidth=2 *.in text whitespace=tabwidth=2 CMakeLists.txt text whitespace=tabwidth=2 -*.png binary -*.jpeg binary -*.jpg binary +*.avi binary +*.bmp binary *.exr binary *.ico binary +*.jpeg binary +*.jpg binary +*.png binary *.a binary *.so binary @@ -47,6 +63,7 @@ CMakeLists.txt text whitespace=tabwidth=2 *.pbxproj binary *.vec binary *.doc binary +*.dia binary *.xml -text whitespace=cr-at-eol *.yml -text whitespace=cr-at-eol @@ -55,9 +72,12 @@ CMakeLists.txt text whitespace=tabwidth=2 .cproject -text whitespace=cr-at-eol merge=union org.eclipse.jdt.core.prefs -text whitespace=cr-at-eol merge=union -*.vcproj text eol=crlf merge=union *.bat text eol=crlf *.cmd text eol=crlf *.cmd.tmpl text eol=crlf +*.dsp text eol=crlf -whitespace +*.sln text eol=crlf -whitespace +*.vcproj text eol=crlf -whitespace merge=union +*.vcxproj text eol=crlf -whitespace merge=union -*.sh text eol=lf \ No newline at end of file +*.sh text eol=lf diff --git a/.gitignore b/.gitignore index 0a19f3cee..4fd406edd 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .DS_Store refman.rst OpenCV4Tegra/ +tegra/ *.user .sw[a-z] .*.swp diff --git a/3rdparty/.gitattributes b/3rdparty/.gitattributes new file mode 100644 index 000000000..562b12e16 --- /dev/null +++ b/3rdparty/.gitattributes @@ -0,0 +1 @@ +* -whitespace diff --git a/3rdparty/ffmpeg/ffmpeg_version.cmake b/3rdparty/ffmpeg/ffmpeg_version.cmake index 3f27077d6..7fc3d6530 100644 --- a/3rdparty/ffmpeg/ffmpeg_version.cmake +++ b/3rdparty/ffmpeg/ffmpeg_version.cmake @@ -1,4 +1,3 @@ -set(NEW_FFMPEG 1) set(HAVE_FFMPEG_CODEC 1) set(HAVE_FFMPEG_FORMAT 1) set(HAVE_FFMPEG_UTIL 1) @@ -8,4 +7,4 @@ set(HAVE_GENTOO_FFMPEG 1) set(ALIASOF_libavcodec_VERSION 53.61.100) set(ALIASOF_libavformat_VERSION 53.32.100) set(ALIASOF_libavutil_VERSION 51.35.100) -set(ALIASOF_libswscale_VERSION 2.1.100) \ No newline at end of file +set(ALIASOF_libswscale_VERSION 2.1.100) diff --git a/3rdparty/ffmpeg/make.bat b/3rdparty/ffmpeg/make.bat index 9f11b52d6..2323af9ab 100644 --- a/3rdparty/ffmpeg/make.bat +++ b/3rdparty/ffmpeg/make.bat @@ -1,2 +1,2 @@ set path=c:\dev\msys32\bin;%path% & gcc -Wall -shared -o opencv_ffmpeg.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat -lavcodec -lavdevice -lswscale -lavutil -lwsock32 -set path=c:\dev\msys64\bin;%path% & gcc -m64 -Wall -shared -o opencv_ffmpeg_64.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat64 -lavcodec64 -lavdevice64 -lswscale64 -lavutil64 -lavcore64 -lwsock32 -lws2_32 \ No newline at end of file +set path=c:\dev\msys64\bin;%path% & gcc -m64 -Wall -shared -o opencv_ffmpeg_64.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat64 -lavcodec64 -lavdevice64 -lswscale64 -lavutil64 -lavcore64 -lwsock32 -lws2_32 diff --git a/3rdparty/ffmpeg/readme.txt b/3rdparty/ffmpeg/readme.txt index 1089ee2a7..e98b28520 100644 --- a/3rdparty/ffmpeg/readme.txt +++ b/3rdparty/ffmpeg/readme.txt @@ -16,7 +16,7 @@ How to update opencv_ffmpeg.dll and opencv_ffmpeg_64.dll when a new version of F 2. Install 64-bit MinGW. http://mingw-w64.sourceforge.net/ Let's assume, it's installed in C:\MSYS64 3. Copy C:\MSYS32\msys to C:\MSYS64\msys. Edit C:\MSYS64\msys\etc\fstab, change C:\MSYS32 to C:\MSYS64. - + 4. Now you have working MSYS32 and MSYS64 environments. Launch, one by one, C:\MSYS32\msys\msys.bat and C:\MSYS64\msys\msys.bat to create your home directories. @@ -40,5 +40,3 @@ How to update opencv_ffmpeg.dll and opencv_ffmpeg_64.dll when a new version of F 8. Then, go to \3rdparty\ffmpeg, edit make.bat (change paths to the actual paths to your msys32 and msys64 distributions) and then run make.bat - - diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so index f62e5961f..6f28f2c55 100755 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so index c0237c1d0..010641ed1 100755 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so index 2c235d824..5a145b25b 100755 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so index 487258889..a524b743f 100755 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so index 169d97e77..a1802f1ff 100755 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so index bdd09fb06..089c7e9e2 100755 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so index 74bfdea5e..a9ffa4b0c 100755 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so new file mode 100755 index 000000000..8ff7177ad Binary files /dev/null and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so b/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so index 7c061d9ee..b6ce0d5a5 100755 Binary files a/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so and b/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so b/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so index 686bdfb61..635ce681a 100755 Binary files a/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so and b/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so b/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so index 5a5c23173..caacf39d4 100755 Binary files a/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so and b/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so index 694cfb80d..fff4a8069 100755 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so and b/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so b/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so index c6cc8ab5f..311926555 100755 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so and b/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so b/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so index 94966c82e..7c18baf01 100755 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so and b/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so index 8251510c4..8bb093a3d 100755 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so and b/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so new file mode 100755 index 000000000..a05f17917 Binary files /dev/null and b/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.0.3.so b/3rdparty/lib/mips/libnative_camera_r4.0.3.so index c8c9e2c57..b9500441a 100755 Binary files a/3rdparty/lib/mips/libnative_camera_r4.0.3.so and b/3rdparty/lib/mips/libnative_camera_r4.0.3.so differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.1.1.so b/3rdparty/lib/mips/libnative_camera_r4.1.1.so index 6845d715d..d11dcf036 100755 Binary files a/3rdparty/lib/mips/libnative_camera_r4.1.1.so and b/3rdparty/lib/mips/libnative_camera_r4.1.1.so differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.2.0.so b/3rdparty/lib/mips/libnative_camera_r4.2.0.so index b148d1621..b06a6819f 100755 Binary files a/3rdparty/lib/mips/libnative_camera_r4.2.0.so and b/3rdparty/lib/mips/libnative_camera_r4.2.0.so differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.3.0.so b/3rdparty/lib/mips/libnative_camera_r4.3.0.so new file mode 100755 index 000000000..844b806b9 Binary files /dev/null and b/3rdparty/lib/mips/libnative_camera_r4.3.0.so differ diff --git a/3rdparty/lib/x86/libnative_camera_r2.3.3.so b/3rdparty/lib/x86/libnative_camera_r2.3.3.so index d9400638b..0dd8904ac 100755 Binary files a/3rdparty/lib/x86/libnative_camera_r2.3.3.so and b/3rdparty/lib/x86/libnative_camera_r2.3.3.so differ diff --git a/3rdparty/lib/x86/libnative_camera_r3.0.1.so b/3rdparty/lib/x86/libnative_camera_r3.0.1.so index cf2e9908c..105a19d0c 100755 Binary files a/3rdparty/lib/x86/libnative_camera_r3.0.1.so and b/3rdparty/lib/x86/libnative_camera_r3.0.1.so differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.0.3.so b/3rdparty/lib/x86/libnative_camera_r4.0.3.so index 420ec818f..b01a4bd28 100755 Binary files a/3rdparty/lib/x86/libnative_camera_r4.0.3.so and b/3rdparty/lib/x86/libnative_camera_r4.0.3.so differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.1.1.so b/3rdparty/lib/x86/libnative_camera_r4.1.1.so index 5468d206c..a59ae39b3 100755 Binary files a/3rdparty/lib/x86/libnative_camera_r4.1.1.so and b/3rdparty/lib/x86/libnative_camera_r4.1.1.so differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.2.0.so b/3rdparty/lib/x86/libnative_camera_r4.2.0.so index 992331032..b90b82644 100755 Binary files a/3rdparty/lib/x86/libnative_camera_r4.2.0.so and b/3rdparty/lib/x86/libnative_camera_r4.2.0.so differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.3.0.so b/3rdparty/lib/x86/libnative_camera_r4.3.0.so new file mode 100755 index 000000000..6607e5da8 Binary files /dev/null and b/3rdparty/lib/x86/libnative_camera_r4.3.0.so differ diff --git a/3rdparty/libjasper/CMakeLists.txt b/3rdparty/libjasper/CMakeLists.txt index 97919bee4..8430ca165 100644 --- a/3rdparty/libjasper/CMakeLists.txt +++ b/3rdparty/libjasper/CMakeLists.txt @@ -48,4 +48,3 @@ endif() if(NOT BUILD_SHARED_LIBS) install(TARGETS ${JASPER_LIBRARY} ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main) endif() - diff --git a/3rdparty/libpng/CMakeLists.txt b/3rdparty/libpng/CMakeLists.txt index d47dd53fc..59dca6990 100644 --- a/3rdparty/libpng/CMakeLists.txt +++ b/3rdparty/libpng/CMakeLists.txt @@ -29,6 +29,10 @@ if(MSVC) add_definitions(-D_CRT_SECURE_NO_DEPRECATE) endif(MSVC) +if (HAVE_WINRT) + add_definitions(-DHAVE_WINRT) +endif() + add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARY}) diff --git a/3rdparty/libpng/opencv-libpng.patch b/3rdparty/libpng/opencv-libpng.patch new file mode 100644 index 000000000..6ca96392a --- /dev/null +++ b/3rdparty/libpng/opencv-libpng.patch @@ -0,0 +1,22 @@ +diff --git a/3rdparty/libpng/pngpriv.h b/3rdparty/libpng/pngpriv.h +index 07b2b0b..e7824b8 100644 +--- a/3rdparty/libpng/pngpriv.h ++++ b/3rdparty/libpng/pngpriv.h +@@ -360,7 +360,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; + + /* Memory model/platform independent fns */ + #ifndef PNG_ABORT +-# ifdef _WINDOWS_ ++# if defined(_WINDOWS_) && !defined(HAVE_WINRT) + # define PNG_ABORT() ExitProcess(0) + # else + # define PNG_ABORT() abort() +@@ -378,7 +378,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; + # define png_memcpy _fmemcpy + # define png_memset _fmemset + #else +-# ifdef _WINDOWS_ /* Favor Windows over C runtime fns */ ++# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */ + # define CVT_PTR(ptr) (ptr) + # define CVT_PTR_NOCHECK(ptr) (ptr) + # define png_strlen lstrlenA diff --git a/3rdparty/libpng/pngpriv.h b/3rdparty/libpng/pngpriv.h index 07b2b0b05..e7824b839 100644 --- a/3rdparty/libpng/pngpriv.h +++ b/3rdparty/libpng/pngpriv.h @@ -360,7 +360,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; /* Memory model/platform independent fns */ #ifndef PNG_ABORT -# ifdef _WINDOWS_ +# if defined(_WINDOWS_) && !defined(HAVE_WINRT) # define PNG_ABORT() ExitProcess(0) # else # define PNG_ABORT() abort() @@ -378,7 +378,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; # define png_memcpy _fmemcpy # define png_memset _fmemset #else -# ifdef _WINDOWS_ /* Favor Windows over C runtime fns */ +# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */ # define CVT_PTR(ptr) (ptr) # define CVT_PTR_NOCHECK(ptr) (ptr) # define png_strlen lstrlenA diff --git a/3rdparty/libtiff/CMakeLists.txt b/3rdparty/libtiff/CMakeLists.txt index 46fef61c7..e087dc13e 100644 --- a/3rdparty/libtiff/CMakeLists.txt +++ b/3rdparty/libtiff/CMakeLists.txt @@ -17,7 +17,7 @@ check_include_file(string.h HAVE_STRING_H) check_include_file(sys/types.h HAVE_SYS_TYPES_H) check_include_file(unistd.h HAVE_UNISTD_H) -if(WIN32) +if(WIN32 AND NOT HAVE_WINRT) set(USE_WIN32_FILEIO 1) endif() @@ -79,14 +79,12 @@ set(lib_srcs "${CMAKE_CURRENT_BINARY_DIR}/tif_config.h" ) -if(UNIX) +if(WIN32 AND NOT HAVE_WINRT) + list(APPEND lib_srcs tif_win32.c) +else() list(APPEND lib_srcs tif_unix.c) endif() - -if(WIN32) - list(APPEND lib_srcs tif_win32.c) -endif(WIN32) - + ocv_warnings_disable(CMAKE_C_FLAGS -Wno-unused-but-set-variable -Wmissing-prototypes -Wmissing-declarations -Wundef -Wunused -Wsign-compare -Wcast-align -Wshadow -Wno-maybe-uninitialized -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast) ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter) # clang diff --git a/3rdparty/libtiff/tif_config.h.cmakein b/3rdparty/libtiff/tif_config.h.cmakein index 1e6bc0454..182f2833d 100644 --- a/3rdparty/libtiff/tif_config.h.cmakein +++ b/3rdparty/libtiff/tif_config.h.cmakein @@ -168,4 +168,3 @@ /* Support Deflate compression */ #define ZIP_SUPPORT 1 - diff --git a/3rdparty/readme.txt b/3rdparty/readme.txt index 6d2aeeca9..ca46fbd57 100644 --- a/3rdparty/readme.txt +++ b/3rdparty/readme.txt @@ -45,13 +45,13 @@ jasper-1.900.1 - JasPer is a collection of software and manipulation of images. This software can handle image data in a variety of formats. One such format supported by JasPer is the JPEG-2000 format defined in ISO/IEC 15444-1. - + Copyright (c) 1999-2000 Image Power, Inc. Copyright (c) 1999-2000 The University of British Columbia Copyright (c) 2001-2003 Michael David Adams The JasPer license can be found in src/libjasper. - + OpenCV on Windows uses pre-built libjasper library (lib/libjasper*). To get the latest source code, please, visit the project homepage: diff --git a/3rdparty/tbb/.gitignore b/3rdparty/tbb/.gitignore index 601e1b265..69ce9ceb9 100644 --- a/3rdparty/tbb/.gitignore +++ b/3rdparty/tbb/.gitignore @@ -1 +1 @@ -tbb*.tgz \ No newline at end of file +tbb*.tgz diff --git a/3rdparty/tbb/CMakeLists.txt b/3rdparty/tbb/CMakeLists.txt index ce0819596..c728440f2 100644 --- a/3rdparty/tbb/CMakeLists.txt +++ b/3rdparty/tbb/CMakeLists.txt @@ -11,7 +11,7 @@ if (WIN32 AND ARM) set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130613oss_src.tgz") set(tbb_md5 "108c8c1e481b0aaea61878289eb28b6a") set(tbb_version_file "version_string.ver") - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow -Wunused-parameter) + ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4702) else() # 4.1 update 2 - works fine set(tbb_ver "tbb41_20130116oss") @@ -230,9 +230,15 @@ endif() ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations) string(REPLACE "-Werror=non-virtual-dtor" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") +if (WIN32) + set(tbb_debug_postfix "_debug") # to fit pragmas in _windef.h inside TBB +else() + set(tbb_debug_postfix ${OPENCV_DEBUG_POSTFIX}) +endif() + set_target_properties(tbb PROPERTIES OUTPUT_NAME tbb - DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" + DEBUG_POSTFIX "${tbb_debug_postfix}" ARCHIVE_OUTPUT_DIRECTORY ${3P_LIBRARY_OUTPUT_PATH} RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} ) diff --git a/CMakeLists.txt b/CMakeLists.txt index f464b2263..59547653f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -212,7 +212,7 @@ OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF ) OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF ) - +OCV_OPTION(ENABLE_WINRT_MODE "Build with Windows Runtime support" OFF IF WIN32 ) # uncategorized options # =================================================== @@ -296,7 +296,6 @@ endif() # Path for build/platform -specific headers # ---------------------------------------------------------------------------- set(OPENCV_CONFIG_FILE_INCLUDE_DIR "${CMAKE_BINARY_DIR}/" CACHE PATH "Where to create the platform-dependant cvconfig.h") -add_definitions(-DHAVE_CVCONFIG_H) ocv_include_directories(${OPENCV_CONFIG_FILE_INCLUDE_DIR}) # ---------------------------------------------------------------------------- @@ -370,9 +369,6 @@ if(UNIX) include(CheckIncludeFile) if(NOT APPLE) - CHECK_INCLUDE_FILE(alloca.h HAVE_ALLOCA_H) - CHECK_FUNCTION_EXISTS(alloca HAVE_ALLOCA) - CHECK_INCLUDE_FILE(unistd.h HAVE_UNISTD_H) CHECK_INCLUDE_FILE(pthread.h HAVE_LIBPTHREAD) if(ANDROID) set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m log) @@ -382,7 +378,7 @@ if(UNIX) set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m pthread rt) endif() else() - add_definitions(-DHAVE_ALLOCA -DHAVE_ALLOCA_H -DHAVE_LIBPTHREAD -DHAVE_UNISTD_H) + set(HAVE_LIBPTHREAD YES) endif() endif() @@ -501,6 +497,8 @@ include(cmake/OpenCVGenAndroidMK.cmake) # Generate OpenCVДonfig.cmake and OpenCVConfig-version.cmake for cmake projects include(cmake/OpenCVGenConfig.cmake) +# Generate Info.plist for the IOS framework +include(cmake/OpenCVGenInfoPlist.cmake) # ---------------------------------------------------------------------------- # Summary: @@ -607,6 +605,16 @@ if(ANDROID) status(" Android examples:" BUILD_ANDROID_EXAMPLES AND CAN_BUILD_ANDROID_PROJECTS THEN YES ELSE NO) endif() +# ================== Windows RT features ================== +if(WIN32) +status("") + status(" Windows RT support:" HAVE_WINRT THEN YES ELSE NO) + if (ENABLE_WINRT_MODE) + status(" Windows SDK v8.0:" ${WINDOWS_SDK_PATH}) + status(" Visual Studio 2012:" ${VISUAL_STUDIO_PATH}) + endif() +endif(WIN32) + # ========================== GUI ========================== status("") status(" GUI: ") @@ -739,8 +747,8 @@ if(DEFINED WITH_GIGEAPI) endif(DEFINED WITH_GIGEAPI) if(DEFINED WITH_QUICKTIME) - status(" QuickTime:" WITH_QUICKTIME THEN YES ELSE NO) - status(" QTKit:" WITH_QUICKTIME THEN NO ELSE YES) + status(" QuickTime:" HAVE_QUICKTIME THEN YES ELSE NO) + status(" QTKit:" HAVE_QTKIT THEN YES ELSE NO) endif(DEFINED WITH_QUICKTIME) if(DEFINED WITH_UNICAP) @@ -885,4 +893,3 @@ ocv_finalize_status() if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}") message(WARNING "The source directory is the same as binary directory. \"make clean\" may damage the source tree") endif() - diff --git a/README b/README deleted file mode 100644 index 0799dff89..000000000 --- a/README +++ /dev/null @@ -1,17 +0,0 @@ -OpenCV: open source computer vision library - -Homepage: http://opencv.org -Online docs: http://docs.opencv.org -Q&A forum: http://answers.opencv.org -Dev zone: http://code.opencv.org - -Please read before starting work on a pull request: - http://code.opencv.org/projects/opencv/wiki/How_to_contribute - -Summary of guidelines: - -* One pull request per issue; -* Choose the right base branch; -* Include tests and documentation; -* Clean up "oops" commits before submitting; -* Follow the coding style guide. diff --git a/README.md b/README.md new file mode 100644 index 000000000..403f118ee --- /dev/null +++ b/README.md @@ -0,0 +1,23 @@ +### OpenCV: Open Source Computer Vision Library + +#### Resources + +* Homepage: +* Docs: +* Q&A forum: +* Issue tracking: + +#### Contributing + +Please read before starting work on a pull request: + +Summary of guidelines: + +* One pull request per issue; +* Choose the right base branch; +* Include tests and documentation; +* Clean up "oops" commits before submitting; +* Follow the coding style guide. + +[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/gittip1.png)](https://www.gittip.com/OpenCV/) +[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/paypal-donate-button.png)](https://www.paypal.com/cgi-bin/webscr?item_name=Donation+to+OpenCV&cmd=_donations&business=accountant%40opencv.org) \ No newline at end of file diff --git a/apps/haartraining/CMakeLists.txt b/apps/haartraining/CMakeLists.txt index 953be3b7e..92c4473b1 100644 --- a/apps/haartraining/CMakeLists.txt +++ b/apps/haartraining/CMakeLists.txt @@ -79,4 +79,3 @@ if(ENABLE_SOLUTION_FOLDERS) set_target_properties(opencv_haartraining PROPERTIES FOLDER "applications") set_target_properties(opencv_haartraining_engine PROPERTIES FOLDER "applications") endif() - diff --git a/apps/haartraining/_cvcommon.h b/apps/haartraining/_cvcommon.h index e4f108164..136fd6808 100644 --- a/apps/haartraining/_cvcommon.h +++ b/apps/haartraining/_cvcommon.h @@ -100,4 +100,3 @@ int icvGetIdxAt( CvMat* idx, int pos ) void icvSave( const CvArr* ptr, const char* filename, int line ); #endif /* __CVCOMMON_H_ */ - diff --git a/apps/haartraining/performance.cpp b/apps/haartraining/performance.cpp index 2fe98f821..fd9c857a8 100644 --- a/apps/haartraining/performance.cpp +++ b/apps/haartraining/performance.cpp @@ -376,4 +376,3 @@ int main( int argc, char* argv[] ) return 0; } - diff --git a/apps/traincascade/CMakeLists.txt b/apps/traincascade/CMakeLists.txt index be60137a9..f4f547893 100644 --- a/apps/traincascade/CMakeLists.txt +++ b/apps/traincascade/CMakeLists.txt @@ -34,4 +34,3 @@ if(ENABLE_SOLUTION_FOLDERS) endif() install(TARGETS ${the_target} RUNTIME DESTINATION bin COMPONENT main) - diff --git a/cmake/OpenCVCRTLinkage.cmake b/cmake/OpenCVCRTLinkage.cmake index 7514285d9..8a297c685 100644 --- a/cmake/OpenCVCRTLinkage.cmake +++ b/cmake/OpenCVCRTLinkage.cmake @@ -2,6 +2,45 @@ if(NOT MSVC) message(FATAL_ERROR "CRT options are available only for MSVC") endif() +#INCLUDE (CheckIncludeFiles) + +set(HAVE_WINRT FALSE) + +# search Windows Platform SDK +message(STATUS "Checking for Windows Platform SDK") +GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE) +if (WINDOWS_SDK_PATH STREQUAL "") + set(HAVE_MSPDK FALSE) + message(STATUS "Windows Platform SDK 8.0 was not found") +else() + set(HAVE_MSPDK TRUE) +endif() + +#search for Visual Studio 11.0 install directory +message(STATUS "Checking for Visual Studio 2012") +GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE) +if (VISUAL_STUDIO_PATH STREQUAL "") + set(HAVE_MSVC2012 FALSE) + message(STATUS "Visual Studio 2012 was not found") +else() + set(HAVE_MSVC2012 TRUE) +endif() + +try_compile(HAVE_WINRT_SDK + "${OpenCV_BINARY_DIR}" + "${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp") + +if (ENABLE_WINRT_MODE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK) + set(HAVE_WINRT TRUE) +endif() + +if (HAVE_WINRT) + add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /appcontainer") + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /appcontainer") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /appcontainer") +endif() + if(NOT BUILD_SHARED_LIBS AND BUILD_WITH_STATIC_CRT) foreach(flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE @@ -62,4 +101,3 @@ if(NOT BUILD_WITH_DEBUG_INFO AND NOT MSVC) string(REPLACE "/Zi" "" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}") string(REPLACE "/Zi" "" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}") endif() - diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 7a91b188a..8ead1a170 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -239,6 +239,10 @@ if(MSVC) set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /fp:fast") # !! important - be on the same wave with x64 compilers endif() endif() + + if(OPENCV_WARNINGS_ARE_ERRORS) + set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /WX") + endif() endif() # Extra link libs if the user selects building static libs: @@ -294,4 +298,4 @@ if(MSVC) if(NOT ENABLE_NOISY_WARNINGS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251") #class 'std::XXX' needs to have dll-interface to be used by clients of YYY endif() -endif() \ No newline at end of file +endif() diff --git a/cmake/OpenCVConfig.cmake b/cmake/OpenCVConfig.cmake index 300fcd419..ef920e94f 100644 --- a/cmake/OpenCVConfig.cmake +++ b/cmake/OpenCVConfig.cmake @@ -156,4 +156,3 @@ else() set(OpenCV_FOUND FALSE CACHE BOOL "" FORCE) set(OPENCV_FOUND FALSE CACHE BOOL "" FORCE) endif() - diff --git a/cmake/OpenCVDetectCUDA.cmake b/cmake/OpenCVDetectCUDA.cmake index 3b93f2932..212d344d5 100644 --- a/cmake/OpenCVDetectCUDA.cmake +++ b/cmake/OpenCVDetectCUDA.cmake @@ -96,7 +96,11 @@ if(CUDA_FOUND) if(CUDA_GENERATION STREQUAL "Fermi") set(__cuda_arch_bin "2.0 2.1(2.0)") elseif(CUDA_GENERATION STREQUAL "Kepler") - set(__cuda_arch_bin "3.0") + if(${CUDA_VERSION} VERSION_LESS "5.0") + set(__cuda_arch_bin "3.0") + else() + set(__cuda_arch_bin "3.0 3.5") + endif() elseif(CUDA_GENERATION STREQUAL "Auto") execute_process( COMMAND "${CUDA_NVCC_EXECUTABLE}" "${OpenCV_SOURCE_DIR}/cmake/checks/OpenCVDetectCudaArch.cu" "--run" WORKING_DIRECTORY "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/" @@ -110,8 +114,12 @@ if(CUDA_FOUND) endif() if(NOT DEFINED __cuda_arch_bin) - set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0") - set(__cuda_arch_ptx "2.0 3.0") + if(${CUDA_VERSION} VERSION_LESS "5.0") + set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0") + else() + set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0 3.5") + endif() + set(__cuda_arch_ptx "3.0") endif() set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported") diff --git a/cmake/OpenCVDetectPython.cmake b/cmake/OpenCVDetectPython.cmake index debaaec53..54ddac15d 100644 --- a/cmake/OpenCVDetectPython.cmake +++ b/cmake/OpenCVDetectPython.cmake @@ -108,7 +108,7 @@ if(PYTHON_EXECUTABLE) OUTPUT_QUIET ERROR_VARIABLE SPHINX_OUTPUT OUTPUT_STRIP_TRAILING_WHITESPACE) - if(SPHINX_OUTPUT MATCHES "^Sphinx v([0-9][^ \n]*)") + if(SPHINX_OUTPUT MATCHES "Sphinx v([0-9][^ \n]*)") set(SPHINX_VERSION "${CMAKE_MATCH_1}") set(HAVE_SPHINX 1) message(STATUS "Found Sphinx ${SPHINX_VERSION}: ${SPHINX_BUILD}") diff --git a/cmake/OpenCVFindIPP.cmake b/cmake/OpenCVFindIPP.cmake index d1af60565..772cae886 100644 --- a/cmake/OpenCVFindIPP.cmake +++ b/cmake/OpenCVFindIPP.cmake @@ -136,12 +136,20 @@ endfunction() # ------------------------------------------------------------------------ # This is auxiliary function called from set_ipp_variables() -# to set IPP_LIBRARIES variable in IPP 7.x style +# to set IPP_LIBRARIES variable in IPP 7.x and 8.x style # ------------------------------------------------------------------------ -function(set_ipp_new_libraries) +function(set_ipp_new_libraries _LATEST_VERSION) set(IPP_PREFIX "ipp") - set(IPP_SUFFIX "_l") # static not threaded libs suffix - set(IPP_THRD "_t") # static threaded libs suffix + + if(${_LATEST_VERSION} VERSION_LESS "8.0") + set(IPP_SUFFIX "_l") # static not threaded libs suffix IPP 7.x + else() + if(WIN32) + set(IPP_SUFFIX "mt") # static not threaded libs suffix IPP 8.x for Windows + else() + set(IPP_SUFFIX "") # static not threaded libs suffix IPP 8.x for Linux/OS X + endif() + endif() set(IPPCORE "core") # core functionality set(IPPSP "s") # signal processing set(IPPIP "i") # image processing @@ -199,7 +207,9 @@ function(set_ipp_variables _LATEST_VERSION) # set INCLUDE and LIB folders set(IPP_INCLUDE_DIRS ${IPP_ROOT_DIR}/include PARENT_SCOPE) - if (IPP_X64) + if (APPLE) + set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib PARENT_SCOPE) + elseif (IPP_X64) if(NOT EXISTS ${IPP_ROOT_DIR}/lib/intel64) message(SEND_ERROR "IPP EM64T libraries not found") endif() @@ -211,8 +221,8 @@ function(set_ipp_variables _LATEST_VERSION) set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib/ia32 PARENT_SCOPE) endif() - # set IPP_LIBRARIES variable (7.x lib names) - set_ipp_new_libraries() + # set IPP_LIBRARIES variable (7.x or 8.x lib names) + set_ipp_new_libraries(${_LATEST_VERSION}) set(IPP_LIBRARIES ${IPP_LIBRARIES} PARENT_SCOPE) message(STATUS "IPP libs: ${IPP_LIBRARIES}") @@ -336,4 +346,4 @@ if(WIN32 AND MINGW AND NOT IPP_LATEST_VERSION_MAJOR LESS 7) # See http://code.opencv.org/issues/1906 for additional details set(MSV_NTDLL "ntdll") set(IPP_LIBRARIES ${IPP_LIBRARIES} ${MSV_NTDLL}${IPP_LIB_SUFFIX}) -endif() \ No newline at end of file +endif() diff --git a/cmake/OpenCVFindLibsGUI.cmake b/cmake/OpenCVFindLibsGUI.cmake index d685d23fe..fb75344bb 100644 --- a/cmake/OpenCVFindLibsGUI.cmake +++ b/cmake/OpenCVFindLibsGUI.cmake @@ -5,12 +5,11 @@ #--- Win32 UI --- ocv_clear_vars(HAVE_WIN32UI) if(WITH_WIN32UI) - TRY_COMPILE(HAVE_WIN32UI - "${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp" + try_compile(HAVE_WIN32UI + "${OpenCV_BINARY_DIR}" "${OpenCV_SOURCE_DIR}/cmake/checks/win32uitest.cpp" - CMAKE_FLAGS "\"user32.lib\" \"gdi32.lib\"" - OUTPUT_VARIABLE OUTPUT) -endif(WITH_WIN32UI) + CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=user32;gdi32") +endif() # --- QT4 --- ocv_clear_vars(HAVE_QT HAVE_QT5) @@ -65,3 +64,12 @@ if(WITH_OPENGL) endif() endif() endif(WITH_OPENGL) + +# --- Carbon & Cocoa --- +if(APPLE) + if(WITH_CARBON) + set(HAVE_CARBON YES) + elseif(NOT IOS) + set(HAVE_COCOA YES) + endif() +endif() diff --git a/cmake/OpenCVFindLibsGrfmt.cmake b/cmake/OpenCVFindLibsGrfmt.cmake index 33e428b7c..f27a302ab 100644 --- a/cmake/OpenCVFindLibsGrfmt.cmake +++ b/cmake/OpenCVFindLibsGrfmt.cmake @@ -36,57 +36,59 @@ if(WITH_TIFF) ocv_parse_header("${TIFF_INCLUDE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION) endif() endif() -endif() -if(WITH_TIFF AND NOT TIFF_FOUND) - ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR) + if(NOT TIFF_FOUND) + ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR) - set(TIFF_LIBRARY libtiff) - set(TIFF_LIBRARIES ${TIFF_LIBRARY}) - add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff") - set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}") - ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION) -endif() - -if(TIFF_VERSION_CLASSIC AND NOT TIFF_VERSION) - set(TIFF_VERSION ${TIFF_VERSION_CLASSIC}) -endif() - -if(TIFF_BIGTIFF_VERSION AND NOT TIFF_VERSION_BIG) - set(TIFF_VERSION_BIG ${TIFF_BIGTIFF_VERSION}) -endif() - -if(NOT TIFF_VERSION_STRING AND TIFF_INCLUDE_DIR) - list(GET TIFF_INCLUDE_DIR 0 _TIFF_INCLUDE_DIR) - if(EXISTS "${_TIFF_INCLUDE_DIR}/tiffvers.h") - file(STRINGS "${_TIFF_INCLUDE_DIR}/tiffvers.h" tiff_version_str REGEX "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version .*") - string(REGEX REPLACE "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version +([^ \\n]*).*" "\\1" TIFF_VERSION_STRING "${tiff_version_str}") - unset(tiff_version_str) + set(TIFF_LIBRARY libtiff) + set(TIFF_LIBRARIES ${TIFF_LIBRARY}) + add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff") + set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}") + ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION) endif() - unset(_TIFF_INCLUDE_DIR) + + if(TIFF_VERSION_CLASSIC AND NOT TIFF_VERSION) + set(TIFF_VERSION ${TIFF_VERSION_CLASSIC}) + endif() + + if(TIFF_BIGTIFF_VERSION AND NOT TIFF_VERSION_BIG) + set(TIFF_VERSION_BIG ${TIFF_BIGTIFF_VERSION}) + endif() + + if(NOT TIFF_VERSION_STRING AND TIFF_INCLUDE_DIR) + list(GET TIFF_INCLUDE_DIR 0 _TIFF_INCLUDE_DIR) + if(EXISTS "${_TIFF_INCLUDE_DIR}/tiffvers.h") + file(STRINGS "${_TIFF_INCLUDE_DIR}/tiffvers.h" tiff_version_str REGEX "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version .*") + string(REGEX REPLACE "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version +([^ \\n]*).*" "\\1" TIFF_VERSION_STRING "${tiff_version_str}") + unset(tiff_version_str) + endif() + unset(_TIFF_INCLUDE_DIR) + endif() + + set(HAVE_TIFF YES) endif() # --- libjpeg (optional) --- -if(WITH_JPEG AND NOT IOS) +if(WITH_JPEG) if(BUILD_JPEG) ocv_clear_vars(JPEG_FOUND) else() include(FindJPEG) endif() + + if(NOT JPEG_FOUND) + ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR) + + set(JPEG_LIBRARY libjpeg) + set(JPEG_LIBRARIES ${JPEG_LIBRARY}) + add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg") + set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}") + endif() + + ocv_parse_header("${JPEG_INCLUDE_DIR}/jpeglib.h" JPEG_VERSION_LINES JPEG_LIB_VERSION) + set(HAVE_JPEG YES) endif() -if(WITH_JPEG AND NOT JPEG_FOUND) - ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR) - - set(JPEG_LIBRARY libjpeg) - set(JPEG_LIBRARIES ${JPEG_LIBRARY}) - add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg") - set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}") -endif() - -ocv_parse_header("${JPEG_INCLUDE_DIR}/jpeglib.h" JPEG_VERSION_LINES JPEG_LIB_VERSION) - - # --- libjasper (optional, should be searched after libjpeg) --- if(WITH_JASPER) if(BUILD_JASPER) @@ -94,53 +96,55 @@ if(WITH_JASPER) else() include(FindJasper) endif() -endif() -if(WITH_JASPER AND NOT JASPER_FOUND) - ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR) + if(NOT JASPER_FOUND) + ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR) - set(JASPER_LIBRARY libjasper) - set(JASPER_LIBRARIES ${JASPER_LIBRARY}) - add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper") - set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}") -endif() + set(JASPER_LIBRARY libjasper) + set(JASPER_LIBRARIES ${JASPER_LIBRARY}) + add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper") + set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}") + endif() -if(NOT JASPER_VERSION_STRING) - ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "") + set(HAVE_JASPER YES) + + if(NOT JASPER_VERSION_STRING) + ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "") + endif() endif() # --- libpng (optional, should be searched after zlib) --- -if(WITH_PNG AND NOT IOS) +if(WITH_PNG) if(BUILD_PNG) ocv_clear_vars(PNG_FOUND) else() include(FindPNG) if(PNG_FOUND) include(CheckIncludeFile) - check_include_file("${PNG_PNG_INCLUDE_DIR}/png.h" HAVE_PNG_H) check_include_file("${PNG_PNG_INCLUDE_DIR}/libpng/png.h" HAVE_LIBPNG_PNG_H) - if(HAVE_PNG_H) - ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE) - elseif(HAVE_LIBPNG_PNG_H) + if(HAVE_LIBPNG_PNG_H) ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/libpng/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE) + else() + ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE) endif() endif() endif() + + if(NOT PNG_FOUND) + ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_LIBPNG_PNG_H PNG_DEFINITIONS) + + set(PNG_LIBRARY libpng) + set(PNG_LIBRARIES ${PNG_LIBRARY}) + add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng") + set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}") + set(PNG_DEFINITIONS "") + ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE) + endif() + + set(HAVE_PNG YES) + set(PNG_VERSION "${PNG_LIBPNG_VER_MAJOR}.${PNG_LIBPNG_VER_MINOR}.${PNG_LIBPNG_VER_RELEASE}") endif() -if(WITH_PNG AND NOT PNG_FOUND) - ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_PNG_H HAVE_LIBPNG_PNG_H PNG_DEFINITIONS) - - set(PNG_LIBRARY libpng) - set(PNG_LIBRARIES ${PNG_LIBRARY}) - add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng") - set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}") - set(PNG_DEFINITIONS "") - ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE) -endif() - -set(PNG_VERSION "${PNG_LIBPNG_VER_MAJOR}.${PNG_LIBPNG_VER_MINOR}.${PNG_LIBPNG_VER_RELEASE}") - # --- OpenEXR (optional) --- if(WITH_OPENEXR) if(BUILD_OPENEXR) @@ -148,17 +152,24 @@ if(WITH_OPENEXR) else() include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindOpenEXR.cmake") endif() -endif() -if(WITH_OPENEXR AND NOT OPENEXR_FOUND) - ocv_clear_vars(OPENEXR_INCLUDE_PATHS OPENEXR_LIBRARIES OPENEXR_ILMIMF_LIBRARY OPENEXR_VERSION) + if(NOT OPENEXR_FOUND) + ocv_clear_vars(OPENEXR_INCLUDE_PATHS OPENEXR_LIBRARIES OPENEXR_ILMIMF_LIBRARY OPENEXR_VERSION) - set(OPENEXR_LIBRARIES IlmImf) - set(OPENEXR_ILMIMF_LIBRARY IlmImf) - add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/openexr") + set(OPENEXR_LIBRARIES IlmImf) + set(OPENEXR_ILMIMF_LIBRARY IlmImf) + add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/openexr") + endif() + + set(HAVE_OPENEXR YES) endif() #cmake 2.8.2 bug - it fails to determine zlib version if(ZLIB_FOUND) ocv_parse_header2(ZLIB "${ZLIB_INCLUDE_DIR}/zlib.h" ZLIB_VERSION) -endif() \ No newline at end of file +endif() + +# --- Apple ImageIO --- +if(WITH_IMAGEIO) + set(HAVE_IMAGEIO YES) +endif() diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 0ca4828fe..dcd5e87d8 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -3,13 +3,12 @@ # ---------------------------------------------------------------------------- ocv_clear_vars(HAVE_VFW) -if (WITH_VFW) - TRY_COMPILE(HAVE_VFW - "${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp" +if(WITH_VFW) + try_compile(HAVE_VFW + "${OpenCV_BINARY_DIR}" "${OpenCV_SOURCE_DIR}/cmake/checks/vfwtest.cpp" - CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=vfw32" - OUTPUT_VARIABLE OUTPUT) - endif(WITH_VFW) + CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=vfw32") +endif(WITH_VFW) # --- GStreamer --- ocv_clear_vars(HAVE_GSTREAMER) @@ -58,7 +57,14 @@ if(WITH_PVAPI) set(_PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_OPENCV_GCC_VERSION_MAJOR}.${CMAKE_OPENCV_GCC_VERSION_MINOR}") endif() - set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library") + if(WIN32) + if(MINGW) + set(PVAPI_DEFINITIONS "-DPVDECL=__stdcall") + endif(MINGW) + set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/PvAPI.lib" CACHE PATH "The PvAPI library") + else(WIN32) + set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library") + endif(WIN32) if(EXISTS "${PVAPI_LIBRARY}") set(HAVE_PVAPI TRUE) endif() @@ -228,3 +234,17 @@ if(WIN32) list(APPEND HIGHGUI_LIBRARIES winmm) endif() endif(WIN32) + +# --- Apple AV Foundation --- +if(WITH_AVFOUNDATION) + set(HAVE_AVFOUNDATION YES) +endif() + +# --- QuickTime --- +if (NOT IOS) + if(WITH_QUICKTIME) + set(HAVE_QUICKTIME YES) + elseif(APPLE) + set(HAVE_QTKIT YES) + endif() +endif() diff --git a/cmake/OpenCVFindXimea.cmake b/cmake/OpenCVFindXimea.cmake index 27e2a78ad..6b86b609e 100644 --- a/cmake/OpenCVFindXimea.cmake +++ b/cmake/OpenCVFindXimea.cmake @@ -19,7 +19,7 @@ set(XIMEA_LIBRARY_DIR) if(WIN32) # Try to find the XIMEA API path in registry. GET_FILENAME_COMPONENT(XIMEA_PATH "[HKEY_CURRENT_USER\\Software\\XIMEA\\CamSupport\\API;Path]" ABSOLUTE) - + if(EXISTS ${XIMEA_PATH}) set(XIMEA_FOUND 1) # set LIB folders @@ -43,4 +43,4 @@ endif() mark_as_advanced(FORCE XIMEA_FOUND) mark_as_advanced(FORCE XIMEA_PATH) -mark_as_advanced(FORCE XIMEA_LIBRARY_DIR) \ No newline at end of file +mark_as_advanced(FORCE XIMEA_LIBRARY_DIR) diff --git a/cmake/OpenCVGenHeaders.cmake b/cmake/OpenCVGenHeaders.cmake index d2d38c90a..c7129fefa 100644 --- a/cmake/OpenCVGenHeaders.cmake +++ b/cmake/OpenCVGenHeaders.cmake @@ -1,13 +1,3 @@ -# ---------------------------------------------------------------------------- -# Variables for cvconfig.h.cmake -# ---------------------------------------------------------------------------- -set(PACKAGE "opencv") -set(PACKAGE_BUGREPORT "opencvlibrary-devel@lists.sourceforge.net") -set(PACKAGE_NAME "opencv") -set(PACKAGE_STRING "${PACKAGE} ${OPENCV_VERSION}") -set(PACKAGE_TARNAME "${PACKAGE}") -set(PACKAGE_VERSION "${OPENCV_VERSION}") - # platform-specific config file configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/cvconfig.h.cmake" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h") diff --git a/cmake/OpenCVGenInfoPlist.cmake b/cmake/OpenCVGenInfoPlist.cmake new file mode 100644 index 000000000..97c674ceb --- /dev/null +++ b/cmake/OpenCVGenInfoPlist.cmake @@ -0,0 +1,4 @@ +if(IOS) + configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.plist.in" + "${CMAKE_BINARY_DIR}/ios/Info.plist") +endif() diff --git a/cmake/OpenCVGenPkgconfig.cmake b/cmake/OpenCVGenPkgconfig.cmake index 49d670734..7bfc7bc5a 100644 --- a/cmake/OpenCVGenPkgconfig.cmake +++ b/cmake/OpenCVGenPkgconfig.cmake @@ -12,7 +12,6 @@ set(prefix "${CMAKE_INSTALL_PREFIX}") set(exec_prefix "\${prefix}") set(libdir "") #TODO: need link paths for OpenCV_EXTRA_COMPONENTS set(includedir "\${prefix}/${OPENCV_INCLUDE_INSTALL_PATH}") -set(VERSION ${OPENCV_VERSION}) if(CMAKE_BUILD_TYPE MATCHES "Release") set(ocv_optkind OPT) diff --git a/cmake/OpenCVLegacyOptions.cmake b/cmake/OpenCVLegacyOptions.cmake index e05ad4c48..d74bd6ed8 100644 --- a/cmake/OpenCVLegacyOptions.cmake +++ b/cmake/OpenCVLegacyOptions.cmake @@ -22,4 +22,3 @@ if(DEFINED OPENCV_BUILD_3RDPARTY_LIBS) set(BUILD_PNG ${OPENCV_BUILD_3RDPARTY_LIBS} CACHE BOOL "Set via depricated OPENCV_BUILD_3RDPARTY_LIBS" FORCE) unset(OPENCV_BUILD_3RDPARTY_LIBS CACHE) endif() - diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index 81340bd0e..80c07bda3 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -470,8 +470,16 @@ endmacro() # ocv_create_module() # ocv_create_module(SKIP_LINK) macro(ocv_create_module) + # The condition we ought to be testing here is whether ocv_add_precompiled_headers will + # be called at some point in the future. We can't look into the future, though, + # so this will have to do. + if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/precomp.hpp") + get_native_precompiled_header(${the_module} precomp.hpp) + endif() + add_library(${the_module} ${OPENCV_MODULE_TYPE} ${OPENCV_MODULE_${the_module}_HEADERS} ${OPENCV_MODULE_${the_module}_SOURCES} - "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp") + "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp" + ${${the_module}_pch}) if(NOT "${ARGN}" STREQUAL "SKIP_LINK") target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_MODULE_${the_module}_DEPS_EXT} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${ARGN}) @@ -508,7 +516,8 @@ macro(ocv_create_module) ) endif() - if(BUILD_SHARED_LIBS) + if((NOT DEFINED OPENCV_MODULE_TYPE AND BUILD_SHARED_LIBS) + OR (DEFINED OPENCV_MODULE_TYPE AND OPENCV_MODULE_TYPE STREQUAL SHARED)) if(MSVC) set_target_properties(${the_module} PROPERTIES DEFINE_SYMBOL CVAPI_EXPORTS) else() @@ -636,7 +645,9 @@ function(ocv_add_perf_tests) set(OPENCV_PERF_${the_module}_SOURCES ${perf_srcs} ${perf_hdrs}) endif() - add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES}) + get_native_precompiled_header(${the_target} perf_precomp.hpp) + + add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES} ${${the_target}_pch}) target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${perf_deps} ${OPENCV_LINKER_LIBS}) add_dependencies(opencv_perf_tests ${the_target}) @@ -684,7 +695,9 @@ function(ocv_add_accuracy_tests) set(OPENCV_TEST_${the_module}_SOURCES ${test_srcs} ${test_hdrs}) endif() - add_executable(${the_target} ${OPENCV_TEST_${the_module}_SOURCES}) + get_native_precompiled_header(${the_target} test_precomp.hpp) + + add_executable(${the_target} ${OPENCV_TEST_${the_module}_SOURCES} ${${the_target}_pch}) target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${test_deps} ${OPENCV_LINKER_LIBS}) add_dependencies(opencv_tests ${the_target}) diff --git a/cmake/OpenCVPCHSupport.cmake b/cmake/OpenCVPCHSupport.cmake index cfc4bfa72..9b849ebd7 100644 --- a/cmake/OpenCVPCHSupport.cmake +++ b/cmake/OpenCVPCHSupport.cmake @@ -272,12 +272,9 @@ ENDMACRO(ADD_PRECOMPILED_HEADER) MACRO(GET_NATIVE_PRECOMPILED_HEADER _targetName _input) if(CMAKE_GENERATOR MATCHES "^Visual.*$") - SET(_dummy_str "#include \"${_input}\"\n" -"// This is required to suppress LNK4221. Very annoying.\n" -"void *g_${_targetName}Dummy = 0\;\n") + set(_dummy_str "#include \"${_input}\"\n") - # Use of cxx extension for generated files (as Qt does) - SET(${_targetName}_pch ${CMAKE_CURRENT_BINARY_DIR}/${_targetName}_pch.cxx) + set(${_targetName}_pch ${CMAKE_CURRENT_BINARY_DIR}/${_targetName}_pch.cpp) if(EXISTS ${${_targetName}_pch}) # Check if contents is the same, if not rewrite # todo @@ -337,11 +334,7 @@ ENDMACRO(ADD_NATIVE_PRECOMPILED_HEADER) macro(ocv_add_precompiled_header_to_target the_target pch_header) if(PCHSupport_FOUND AND ENABLE_PRECOMPILED_HEADERS AND EXISTS "${pch_header}") - if(CMAKE_GENERATOR MATCHES Visual) - string(REGEX REPLACE "hpp$" "cpp" ${the_target}_pch "${pch_header}") - add_native_precompiled_header(${the_target} ${pch_header}) - unset(${the_target}_pch) - elseif(CMAKE_GENERATOR MATCHES Xcode) + if(CMAKE_GENERATOR MATCHES "^Visual" OR CMAKE_GENERATOR MATCHES Xcode) add_native_precompiled_header(${the_target} ${pch_header}) elseif(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GENERATOR MATCHES "Makefiles|Ninja") add_precompiled_header(${the_target} ${pch_header}) diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 59366eb03..ddf029067 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -77,7 +77,7 @@ MACRO(ocv_check_compiler_flag LANG FLAG RESULT) if(_fname) MESSAGE(STATUS "Performing Test ${RESULT}") TRY_COMPILE(${RESULT} - ${CMAKE_BINARY_DIR} + "${CMAKE_BINARY_DIR}" "${_fname}" COMPILE_DEFINITIONS "${FLAG}" OUTPUT_VARIABLE OUTPUT) @@ -515,4 +515,4 @@ function(ocv_source_group group) cmake_parse_arguments(OCV_SOURCE_GROUP "" "" "GLOB" ${ARGN}) file(GLOB srcs ${OCV_SOURCE_GROUP_GLOB}) source_group(${group} FILES ${srcs}) -endfunction() \ No newline at end of file +endfunction() diff --git a/cmake/checks/OpenCVDetectCudaArch.cu b/cmake/checks/OpenCVDetectCudaArch.cu index 008f8ba8d..9d7086cf2 100644 --- a/cmake/checks/OpenCVDetectCudaArch.cu +++ b/cmake/checks/OpenCVDetectCudaArch.cu @@ -11,4 +11,4 @@ int main() printf("%d.%d ", prop.major, prop.minor); } return 0; -} \ No newline at end of file +} diff --git a/cmake/checks/vfwtest.cpp b/cmake/checks/vfwtest.cpp index 63d545788..8d8ecb271 100644 --- a/cmake/checks/vfwtest.cpp +++ b/cmake/checks/vfwtest.cpp @@ -7,4 +7,4 @@ int main() AVIFileInit(); AVIFileExit(); return 0; -} \ No newline at end of file +} diff --git a/cmake/checks/win32uitest.cpp b/cmake/checks/win32uitest.cpp index 6f13a09cc..f475e1c96 100644 --- a/cmake/checks/win32uitest.cpp +++ b/cmake/checks/win32uitest.cpp @@ -2,10 +2,10 @@ int main(int argc, char** argv) { - CreateWindow(NULL /*lpClassName*/, NULL /*lpWindowName*/, 0 /*dwStyle*/, 0 /*x*/, - 0 /*y*/, 0 /*nWidth*/, 0 /*nHeight*/, NULL /*hWndParent*/, NULL /*hMenu*/, - NULL /*hInstance*/, NULL /*lpParam*/); - DeleteDC(NULL); + CreateWindow(NULL /*lpClassName*/, NULL /*lpWindowName*/, 0 /*dwStyle*/, 0 /*x*/, + 0 /*y*/, 0 /*nWidth*/, 0 /*nHeight*/, NULL /*hWndParent*/, NULL /*hMenu*/, + NULL /*hInstance*/, NULL /*lpParam*/); + DeleteDC(NULL); - return 0; + return 0; } diff --git a/cmake/checks/winrttest.cpp b/cmake/checks/winrttest.cpp new file mode 100644 index 000000000..9ec0c9ac1 --- /dev/null +++ b/cmake/checks/winrttest.cpp @@ -0,0 +1,6 @@ +#include + +int main(int, char**) +{ + return 0; +} diff --git a/cmake/cl2cpp.cmake b/cmake/cl2cpp.cmake index 4f18e9e64..1e932eabd 100644 --- a/cmake/cl2cpp.cmake +++ b/cmake/cl2cpp.cmake @@ -32,4 +32,4 @@ foreach(cl ${cl_list}) file(APPEND ${OUTPUT} "const char* ${cl_filename}=\"${lines};\n") endforeach() -file(APPEND ${OUTPUT} "}\n}\n") \ No newline at end of file +file(APPEND ${OUTPUT} "}\n}\n") diff --git a/cmake/templates/cmake_uninstall.cmake.in b/cmake/templates/cmake_uninstall.cmake.in index 14e601019..0e63d705c 100644 --- a/cmake/templates/cmake_uninstall.cmake.in +++ b/cmake/templates/cmake_uninstall.cmake.in @@ -23,5 +23,3 @@ FOREACH(file ${files}) MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") ENDIF(EXISTS "$ENV{DESTDIR}${file}") ENDFOREACH(file) - - diff --git a/cmake/templates/cvconfig.h.cmake b/cmake/templates/cvconfig.h.cmake index f12730988..bce8eafca 100644 --- a/cmake/templates/cvconfig.h.cmake +++ b/cmake/templates/cvconfig.h.cmake @@ -1,20 +1,20 @@ -/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP - systems. This function is required for `alloca.c' support on those systems. - */ -#cmakedefine CRAY_STACKSEG_END +/* OpenCV compiled as static or dynamic libs */ +#cmakedefine BUILD_SHARED_LIBS -/* Define to 1 if using `alloca.c'. */ -#cmakedefine C_ALLOCA +/* Compile for 'real' NVIDIA GPU architectures */ +#define CUDA_ARCH_BIN "${OPENCV_CUDA_ARCH_BIN}" -/* Define to 1 if you have `alloca', as a function or macro. */ -#cmakedefine HAVE_ALLOCA 1 +/* Create PTX or BIN for 1.0 compute capability */ +#cmakedefine CUDA_ARCH_BIN_OR_PTX_10 -/* Define to 1 if you have and it should be used (not on Ultrix). - */ -#cmakedefine HAVE_ALLOCA_H 1 +/* NVIDIA GPU features are used */ +#define CUDA_ARCH_FEATURES "${OPENCV_CUDA_ARCH_FEATURES}" -/* Video for Windows support */ -#cmakedefine HAVE_VFW +/* Compile for 'virtual' NVIDIA PTX architectures */ +#define CUDA_ARCH_PTX "${OPENCV_CUDA_ARCH_PTX}" + +/* AVFoundation video libraries */ +#cmakedefine HAVE_AVFOUNDATION /* V4L capturing support */ #cmakedefine HAVE_CAMV4L @@ -22,15 +22,30 @@ /* V4L2 capturing support */ #cmakedefine HAVE_CAMV4L2 -/* V4L2 capturing support in videoio.h */ -#cmakedefine HAVE_VIDEOIO - -/* V4L/V4L2 capturing support via libv4l */ -#cmakedefine HAVE_LIBV4L - /* Carbon windowing environment */ #cmakedefine HAVE_CARBON +/* AMD's Basic Linear Algebra Subprograms Library*/ +#cmakedefine HAVE_CLAMDBLAS + +/* AMD's OpenCL Fast Fourier Transform Library*/ +#cmakedefine HAVE_CLAMDFFT + +/* Cocoa API */ +#cmakedefine HAVE_COCOA + +/* C= */ +#cmakedefine HAVE_CSTRIPES + +/* NVidia Cuda Basic Linear Algebra Subprograms (BLAS) API*/ +#cmakedefine HAVE_CUBLAS + +/* NVidia Cuda Runtime API*/ +#cmakedefine HAVE_CUDA + +/* NVidia Cuda Fast Fourier Transform (FFT) API*/ +#cmakedefine HAVE_CUFFT + /* IEEE1394 capturing support */ #cmakedefine HAVE_DC1394 @@ -40,197 +55,111 @@ /* IEEE1394 capturing support - libdc1394 v2.x */ #cmakedefine HAVE_DC1394_2 +/* DirectShow Video Capture library */ +#cmakedefine HAVE_DSHOW + +/* Eigen Matrix & Linear Algebra Library */ +#cmakedefine HAVE_EIGEN + +/* FFMpeg video library */ +#cmakedefine HAVE_FFMPEG + +/* ffmpeg's libswscale */ +#cmakedefine HAVE_FFMPEG_SWSCALE + /* ffmpeg in Gentoo */ #cmakedefine HAVE_GENTOO_FFMPEG -/* FFMpeg video library */ -#cmakedefine HAVE_FFMPEG - -/* FFMpeg version flag */ -#cmakedefine NEW_FFMPEG - -/* ffmpeg's libswscale */ -#cmakedefine HAVE_FFMPEG_SWSCALE - /* GStreamer multimedia framework */ -#cmakedefine HAVE_GSTREAMER +#cmakedefine HAVE_GSTREAMER /* GTK+ 2.0 Thread support */ -#cmakedefine HAVE_GTHREAD +#cmakedefine HAVE_GTHREAD + +/* Windows Runtime support */ +#cmakedefine HAVE_WINRT /* Win32 UI */ #cmakedefine HAVE_WIN32UI /* GTK+ 2.x toolkit */ -#cmakedefine HAVE_GTK - -/* OpenEXR codec */ -#cmakedefine HAVE_ILMIMF +#cmakedefine HAVE_GTK /* Apple ImageIO Framework */ -#cmakedefine HAVE_IMAGEIO - -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_INTTYPES_H 1 - -/* JPEG-2000 codec */ -#cmakedefine HAVE_JASPER - -/* IJG JPEG codec */ -#cmakedefine HAVE_JPEG - -/* Define to 1 if you have the `dl' library (-ldl). */ -#cmakedefine HAVE_LIBDL 1 - -/* Define to 1 if you have the `gomp' library (-lgomp). */ -#cmakedefine HAVE_LIBGOMP 1 - -/* Define to 1 if you have the `m' library (-lm). */ -#cmakedefine HAVE_LIBM 1 - -/* libpng/png.h needs to be included */ -#cmakedefine HAVE_LIBPNG_PNG_H - -/* Define to 1 if you have the `pthread' library (-lpthread). */ -#cmakedefine HAVE_LIBPTHREAD 1 - -/* Define to 1 if you have the `lrint' function. */ -#cmakedefine HAVE_LRINT 1 - -/* PNG codec */ -#cmakedefine HAVE_PNG - -/* Define to 1 if you have the `png_get_valid' function. */ -#cmakedefine HAVE_PNG_GET_VALID 1 - -/* png.h needs to be included */ -#cmakedefine HAVE_PNG_H - -/* Define to 1 if you have the `png_set_tRNS_to_alpha' function. */ -#cmakedefine HAVE_PNG_SET_TRNS_TO_ALPHA 1 - -/* QuickTime video libraries */ -#cmakedefine HAVE_QUICKTIME - -/* AVFoundation video libraries */ -#cmakedefine HAVE_AVFOUNDATION - -/* TIFF codec */ -#cmakedefine HAVE_TIFF - -/* Unicap video capture library */ -#cmakedefine HAVE_UNICAP - -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_UNISTD_H 1 - -/* Xine video library */ -#cmakedefine HAVE_XINE - -/* OpenNI library */ -#cmakedefine HAVE_OPENNI - -/* LZ77 compression/decompression library (used for PNG) */ -#cmakedefine HAVE_ZLIB +#cmakedefine HAVE_IMAGEIO /* Intel Integrated Performance Primitives */ -#cmakedefine HAVE_IPP +#cmakedefine HAVE_IPP -/* OpenCV compiled as static or dynamic libs */ -#cmakedefine BUILD_SHARED_LIBS +/* JPEG-2000 codec */ +#cmakedefine HAVE_JASPER -/* Name of package */ -#define PACKAGE "${PACKAGE}" +/* IJG JPEG codec */ +#cmakedefine HAVE_JPEG -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "${PACKAGE_BUGREPORT}" +/* libpng/png.h needs to be included */ +#cmakedefine HAVE_LIBPNG_PNG_H -/* Define to the full name of this package. */ -#define PACKAGE_NAME "${PACKAGE_NAME}" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "${PACKAGE_STRING}" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "${PACKAGE_TARNAME}" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "${PACKAGE_VERSION}" - -/* If using the C implementation of alloca, define if you know the - direction of stack growth for your system; otherwise it will be - automatically deduced at runtime. - STACK_DIRECTION > 0 => grows toward higher addresses - STACK_DIRECTION < 0 => grows toward lower addresses - STACK_DIRECTION = 0 => direction of growth unknown */ -#cmakedefine STACK_DIRECTION - -/* Version number of package */ -#define VERSION "${PACKAGE_VERSION}" - -/* Define to 1 if your processor stores words with the most significant byte - first (like Motorola and SPARC, unlike Intel and VAX). */ -#cmakedefine WORDS_BIGENDIAN - -/* Intel Threading Building Blocks */ -#cmakedefine HAVE_TBB - -/* C= */ -#cmakedefine HAVE_CSTRIPES - -/* Eigen Matrix & Linear Algebra Library */ -#cmakedefine HAVE_EIGEN - -/* NVidia Cuda Runtime API*/ -#cmakedefine HAVE_CUDA - -/* NVidia Cuda Fast Fourier Transform (FFT) API*/ -#cmakedefine HAVE_CUFFT - -/* NVidia Cuda Basic Linear Algebra Subprograms (BLAS) API*/ -#cmakedefine HAVE_CUBLAS - -/* NVidia Video Decoding API*/ -#cmakedefine HAVE_NVCUVID - -/* Compile for 'real' NVIDIA GPU architectures */ -#define CUDA_ARCH_BIN "${OPENCV_CUDA_ARCH_BIN}" - -/* Compile for 'virtual' NVIDIA PTX architectures */ -#define CUDA_ARCH_PTX "${OPENCV_CUDA_ARCH_PTX}" - -/* NVIDIA GPU features are used */ -#define CUDA_ARCH_FEATURES "${OPENCV_CUDA_ARCH_FEATURES}" - -/* Create PTX or BIN for 1.0 compute capability */ -#cmakedefine CUDA_ARCH_BIN_OR_PTX_10 - -/* OpenCL Support */ -#cmakedefine HAVE_OPENCL - -/* AMD's OpenCL Fast Fourier Transform Library*/ -#cmakedefine HAVE_CLAMDFFT - -/* AMD's Basic Linear Algebra Subprograms Library*/ -#cmakedefine HAVE_CLAMDBLAS - -/* DirectShow Video Capture library */ -#cmakedefine HAVE_DSHOW +/* V4L/V4L2 capturing support via libv4l */ +#cmakedefine HAVE_LIBV4L /* Microsoft Media Foundation Capture library */ #cmakedefine HAVE_MSMF -/* XIMEA camera support */ -#cmakedefine HAVE_XIMEA +/* NVidia Video Decoding API*/ +#cmakedefine HAVE_NVCUVID + +/* OpenCL Support */ +#cmakedefine HAVE_OPENCL + +/* OpenEXR codec */ +#cmakedefine HAVE_OPENEXR /* OpenGL support*/ #cmakedefine HAVE_OPENGL -/* Clp support */ -#cmakedefine HAVE_CLP +/* OpenNI library */ +#cmakedefine HAVE_OPENNI + +/* PNG codec */ +#cmakedefine HAVE_PNG /* Qt support */ #cmakedefine HAVE_QT /* Qt OpenGL support */ #cmakedefine HAVE_QT_OPENGL + +/* QuickTime video libraries */ +#cmakedefine HAVE_QUICKTIME + +/* QTKit video libraries */ +#cmakedefine HAVE_QTKIT + +/* Intel Threading Building Blocks */ +#cmakedefine HAVE_TBB + +/* TIFF codec */ +#cmakedefine HAVE_TIFF + +/* Unicap video capture library */ +#cmakedefine HAVE_UNICAP + +/* Video for Windows support */ +#cmakedefine HAVE_VFW + +/* V4L2 capturing support in videoio.h */ +#cmakedefine HAVE_VIDEOIO + +/* Win32 UI */ +#cmakedefine HAVE_WIN32UI + +/* XIMEA camera support */ +#cmakedefine HAVE_XIMEA + +/* Xine video library */ +#cmakedefine HAVE_XINE + +/* Define to 1 if your processor stores words with the most significant byte + first (like Motorola and SPARC, unlike Intel and VAX). */ +#cmakedefine WORDS_BIGENDIAN diff --git a/cmake/templates/opencv-XXX.pc.cmake.in b/cmake/templates/opencv-XXX.pc.cmake.in index 4f6415cab..c9dedb0da 100644 --- a/cmake/templates/opencv-XXX.pc.cmake.in +++ b/cmake/templates/opencv-XXX.pc.cmake.in @@ -8,6 +8,6 @@ includedir_new=@includedir@ Name: OpenCV Description: Open Source Computer Vision Library -Version: @VERSION@ +Version: @OPENCV_VERSION@ Libs: @OpenCV_LIB_COMPONENTS@ Cflags: -I${includedir_old} -I${includedir_new} diff --git a/cmake/templates/opencv_modules.hpp.in b/cmake/templates/opencv_modules.hpp.in index 5d58e40e4..149871502 100644 --- a/cmake/templates/opencv_modules.hpp.in +++ b/cmake/templates/opencv_modules.hpp.in @@ -6,4 +6,4 @@ * */ -@OPENCV_MODULE_DEFINITIONS_CONFIGMAKE@ \ No newline at end of file +@OPENCV_MODULE_DEFINITIONS_CONFIGMAKE@ diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 70f4809d2..8f6955c4b 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -49,7 +49,7 @@ if(BUILD_DOCS AND HAVE_SPHINX) set(toc_file "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/${mod}.rst") if(EXISTS "${toc_file}") file(RELATIVE_PATH toc_file "${OpenCV_SOURCE_DIR}/modules" "${toc_file}") - set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${toc_file}\r\n") + set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${toc_file}\n") endif() endforeach() @@ -122,4 +122,4 @@ if(BUILD_DOCS AND HAVE_SPHINX) install(FILES "${f}" DESTINATION "${OPENCV_DOC_INSTALL_PATH}" OPTIONAL) endforeach() -endif() \ No newline at end of file +endif() diff --git a/doc/_static/insertIframe.js b/doc/_static/insertIframe.js index 904ff361d..0a3099a8c 100644 --- a/doc/_static/insertIframe.js +++ b/doc/_static/insertIframe.js @@ -1,4 +1,4 @@ -function insertIframe (elementId, iframeSrc) +function insertIframe (elementId, iframeSrc) { var iframe; if (document.createElement && (iframe = document.createElement('iframe'))) @@ -10,4 +10,3 @@ function insertIframe (elementId, iframeSrc) element.parentNode.replaceChild(iframe, element); } } - diff --git a/doc/_themes/blue/static/default.css_t b/doc/_themes/blue/static/default.css_t index e2127db1d..f2084902b 100644 --- a/doc/_themes/blue/static/default.css_t +++ b/doc/_themes/blue/static/default.css_t @@ -402,4 +402,4 @@ div.sphinxsidebar #searchbox input[type="text"] { div.sphinxsidebar #searchbox input[type="submit"] { width:auto; -} \ No newline at end of file +} diff --git a/doc/_themes/blue/theme.conf b/doc/_themes/blue/theme.conf index 206193f6f..bc0b99b2a 100644 --- a/doc/_themes/blue/theme.conf +++ b/doc/_themes/blue/theme.conf @@ -28,4 +28,4 @@ feedbacklinkcolor = #ffffff bodyfont = sans-serif headfont = 'Trebuchet MS', sans-serif guifont = "Lucida Sans","Lucida Sans Unicode","Lucida Grande",Verdana,Arial,Helvetica,sans-serif -lang = none \ No newline at end of file +lang = none diff --git a/doc/check_docs.py b/doc/check_docs.py index 8ab5fced4..13d9f763e 100755 --- a/doc/check_docs.py +++ b/doc/check_docs.py @@ -184,5 +184,3 @@ p = RSTParser() for m in opencv_module_list: print "\n\n*************************** " + m + " *************************\n" p.check_module_docs(m) - - diff --git a/doc/mymath.sty b/doc/mymath.sty index 491c160cc..4da828212 100644 --- a/doc/mymath.sty +++ b/doc/mymath.sty @@ -39,4 +39,3 @@ #7 & #8 & #9 \end{bmatrix} } - diff --git a/doc/opencv_cheatsheet.tex b/doc/opencv_cheatsheet.tex index e76bd016e..fc0c0fa41 100644 --- a/doc/opencv_cheatsheet.tex +++ b/doc/opencv_cheatsheet.tex @@ -75,11 +75,11 @@ % if using A4 paper. (This probably isn't strictly necessary.) % If using another size paper, use default 1cm margins. \ifthenelse{\lengthtest { \paperwidth = 11in}} - { \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} } - {\ifthenelse{ \lengthtest{ \paperwidth = 297mm}} - {\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} } - {\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} } - } + { \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} } + {\ifthenelse{ \lengthtest{ \paperwidth = 297mm}} + {\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} } + {\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} } + } % Turn off header and footer % \pagestyle{empty} diff --git a/doc/packaging.txt b/doc/packaging.txt index a4d3dc283..e8dd7699b 100644 --- a/doc/packaging.txt +++ b/doc/packaging.txt @@ -4,14 +4,14 @@ INSTRUCTIONS TO BUILD WIN32 PACKAGES WITH CMAKE+CPACK - Install NSIS. - Generate OpenCV solutions for MSVC using CMake as usual. -- In cmake-gui: +- In cmake-gui: - Mark BUILD_PACKAGE - Mark BUILD_EXAMPLES (If examples are desired to be shipped as binaries...) - Unmark ENABLE_OPENMP, since this feature seems to have some issues yet... - Mark INSTALL_*_EXAMPLES - Open the OpenCV solution and build ALL in Debug and Release. -- Build PACKAGE, from the Release configuration. An NSIS installer package will be +- Build PACKAGE, from the Release configuration. An NSIS installer package will be created with both release and debug LIBs and DLLs. - + Jose Luis Blanco, 2009/JUL/29 diff --git a/doc/pattern_tools/svgfig.py b/doc/pattern_tools/svgfig.py index 86afa5913..c690c9ff0 100755 --- a/doc/pattern_tools/svgfig.py +++ b/doc/pattern_tools/svgfig.py @@ -3667,4 +3667,3 @@ class YErrorBars: output.append(LineAxis(x, start, x, end, start, end, bars, False, False, **self.attr).SVG(trans)) return output - diff --git a/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst b/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst index 6637e2590..2cf00f42a 100644 --- a/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst +++ b/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst @@ -3,42 +3,42 @@ Camera calibration With OpenCV ****************************** -Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determinate the relation between the camera's natural units (pixels) and the real world units (for example millimeters). +Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determine the relation between the camera's natural units (pixels) and the real world units (for example millimeters). Theory ====== -For the distortion OpenCV takes into account the radial and tangential factors. For the radial one uses the following formula: +For the distortion OpenCV takes into account the radial and tangential factors. For the radial factor one uses the following formula: -.. math:: +.. math:: x_{corrected} = x( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) \\ y_{corrected} = y( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) -So for an old pixel point at :math:`(x,y)` coordinate in the input image, for a corrected output image its position will be :math:`(x_{corrected} y_{corrected})` . The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect. +So for an old pixel point at :math:`(x,y)` coordinates in the input image, its position on the corrected output image will be :math:`(x_{corrected} y_{corrected})`. The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect. -Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. Correcting this is made via the formulas: +Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. It can be corrected via the formulas: -.. math:: +.. math:: x_{corrected} = x + [ 2p_1xy + p_2(r^2+2x^2)] \\ y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy] -So we have five distortion parameters, which in OpenCV are organized in a 5 column one row matrix: +So we have five distortion parameters which in OpenCV are presented as one row matrix with 5 columns: -.. math:: +.. math:: Distortion_{coefficients}=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3) -Now for the unit conversion, we use the following formula: +Now for the unit conversion we use the following formula: .. math:: \left [ \begin{matrix} x \\ y \\ w \end{matrix} \right ] = \left [ \begin{matrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1 \end{matrix} \right ] \left [ \begin{matrix} X \\ Y \\ Z \end{matrix} \right ] -Here the presence of the :math:`w` is cause we use a homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` what are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single :math:`f` focal length. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution. +Here the presence of :math:`w` is explained by the use of homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` which are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single focal length :math:`f`. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution. -The process of determining these two matrices is the calibration. Calculating these parameters is done by some basic geometrical equations. The equations used depend on the calibrating objects used. Currently OpenCV supports three types of object for calibration: +The process of determining these two matrices is the calibration. Calculation of these parameters is done through basic geometrical equations. The equations used depend on the chosen calibrating objects. Currently OpenCV supports three types of objects for calibration: .. container:: enumeratevisibleitemswithsquare @@ -46,28 +46,28 @@ The process of determining these two matrices is the calibration. Calculating th + Symmetrical circle pattern + Asymmetrical circle pattern -Basically, you need to take snapshots of these patterns with your camera and let OpenCV find them. Each found pattern equals in a new equation. To solve the equation you need at least a predetermined number of pattern snapshots to form a well-posed equation system. This number is higher for the chessboard pattern and less for the circle ones. For example, in theory the chessboard one requires at least two. However, in practice we have a good amount of noise present in our input images, so for good results you will probably want at least 10 good snapshots of the input pattern in different position. +Basically, you need to take snapshots of these patterns with your camera and let OpenCV find them. Each found pattern results in a new equation. To solve the equation you need at least a predetermined number of pattern snapshots to form a well-posed equation system. This number is higher for the chessboard pattern and less for the circle ones. For example, in theory the chessboard pattern requires at least two snapshots. However, in practice we have a good amount of noise present in our input images, so for good results you will probably need at least 10 good snapshots of the input pattern in different positions. Goal ==== -The sample application will: +The sample application will: .. container:: enumeratevisibleitemswithsquare - + Determinate the distortion matrix - + Determinate the camera matrix - + Input from Camera, Video and Image file list - + Configuration from XML/YAML file + + Determine the distortion matrix + + Determine the camera matrix + + Take input from Camera, Video and Image file list + + Read configuration from XML/YAML file + Save the results into XML/YAML file + Calculate re-projection error Source code =========== -You may also find the source code in the :file:`samples/cpp/tutorial_code/calib3d/camera_calibration/` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp>`. The program has a single argument. The name of its configuration file. If none given it will try to open the one named "default.xml". :download:`Here's a sample configuration file <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml>` in XML format. In the configuration file you may choose to use as input a camera, a video file or an image list. If you opt for the later one, you need to create a configuration file where you enumerate the images to use. Here's :download:`an example of this <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml>`. The important part to remember is that the images needs to be specified using the absolute path or the relative one from your applications working directory. You may find all this in the beforehand mentioned directory. +You may also find the source code in the :file:`samples/cpp/tutorial_code/calib3d/camera_calibration/` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp>`. The program has a single argument: the name of its configuration file. If none is given then it will try to open the one named "default.xml". :download:`Here's a sample configuration file <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml>` in XML format. In the configuration file you may choose to use camera as an input, a video file or an image list. If you opt for the last one, you will need to create a configuration file where you enumerate the images to use. Here's :download:`an example of this <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml>`. The important part to remember is that the images need to be specified using the absolute path or the relative one from your application's working directory. You may find all this in the samples directory mentioned above. -The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen to do not post here the code part for that. The technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial. +The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen not to post the code for that part here. Technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial. Explanation =========== @@ -76,15 +76,15 @@ Explanation .. code-block:: cpp - Settings s; + Settings s; const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml"; FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings if (!fs.isOpened()) { - cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; + cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; return -1; } - fs["Settings"] >> s; + fs["Settings"] >> s; fs.release(); // close Settings file if (!s.goodInput) @@ -93,9 +93,9 @@ Explanation return -1; } - For this I've used simple OpenCV class input operation. After reading the file I've an additional post-process function that checks for the validity of the input. Only if all of them are good will be the *goodInput* variable true. + For this I've used simple OpenCV class input operation. After reading the file I've an additional post-processing function that checks validity of the input. Only if all inputs are good then *goodInput* variable will be true. -#. **Get next input, if it fails or we have enough of them calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to *CALIBRATED* one. +#. **Get next input, if it fails or we have enough of them - calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images then we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to the *CALIBRATED* one. .. code-block:: cpp @@ -123,9 +123,9 @@ Explanation if( s.flipVertical ) flip( view, view, 0 ); } - For some cameras we may need to flip the input image. Here we do this too. + For some cameras we may need to flip the input image. Here we do this too. -#. **Find the pattern in the current input**. The formation of the equations I mentioned above consists of finding the major patterns in the input: in case of the chessboard this is their corners of the squares and for the circles, well, the circles itself. The position of these will form the result and is collected into the *pointBuf* vector. +#. **Find the pattern in the current input**. The formation of the equations I mentioned above aims to finding major patterns in the input: in case of the chessboard this are corners of the squares and for the circles, well, the circles themselves. The position of these will form the result which will be written into the *pointBuf* vector. .. code-block:: cpp @@ -146,19 +146,19 @@ Explanation break; } - Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners ` or the :calib3d:`findCirclesGrid ` function. For both of them you pass on the current image, the size of the board and you'll get back the positions of the patterns. Furthermore, they return a boolean variable that states if in the input we could find or not the pattern (we only need to take into account images where this is true!). + Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners ` or the :calib3d:`findCirclesGrid ` function. For both of them you pass the current image and the size of the board and you'll get the positions of the patterns. Furthermore, they return a boolean variable which states if the pattern was found in the input (we only need to take into account those images where this is true!). - Then again in case of cameras we only take camera images after an input delay time passed. This is in order to allow for the user to move the chessboard around and as getting different images. Same images mean same equations, and same equations at the calibration will form an ill-posed problem, so the calibration will fail. For square images the position of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix ` function. This way will get a better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image with the :calib3d:`findChessboardCorners ` function. + Then again in case of cameras we only take camera images when an input delay time is passed. This is done in order to allow user moving the chessboard around and getting different images. Similar images result in similar equations, and similar equations at the calibration step will form an ill-posed problem, so the calibration will fail. For square images the positions of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix ` function. It will produce better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image using :calib3d:`findChessboardCorners ` function. .. code-block:: cpp - if ( found) // If done with success, + if ( found) // If done with success, { // improve the found corners' coordinate accuracy for chessboard - if( s.calibrationPattern == Settings::CHESSBOARD) + if( s.calibrationPattern == Settings::CHESSBOARD) { Mat viewGray; - cvtColor(view, viewGray, CV_BGR2GRAY); + cvtColor(view, viewGray, CV_BGR2GRAY); cornerSubPix( viewGray, pointBuf, Size(11,11), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); } @@ -171,11 +171,11 @@ Explanation blinkOutput = s.inputCapture.isOpened(); } - // Draw the corners. + // Draw the corners. drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found ); } -#. **Show state and result for the user, plus command line control of the application**. The showing part consists of a text output on the live feed, and for video or camera input to show the "capturing" frame we simply bitwise negate the input image. +#. **Show state and result to the user, plus command line control of the application**. This part shows text output on the image. .. code-block:: cpp @@ -183,7 +183,7 @@ Explanation string msg = (mode == CAPTURING) ? "100/100" : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start"; int baseLine = 0; - Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); + Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10); if( mode == CAPTURING ) @@ -199,7 +199,7 @@ Explanation if( blinkOutput ) bitwise_not(view, view); - If we only ran the calibration and got the camera matrix plus the distortion coefficients we may just as correct the image with the :imgproc_geometric:`undistort ` function: + If we ran calibration and got camera's matrix with the distortion coefficients we may want to correct the image using :imgproc_geometric:`undistort ` function: .. code-block:: cpp @@ -212,7 +212,7 @@ Explanation //------------------------------ Show image and check for input commands ------------------- imshow("Image View", view); - Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g* we start all over the detection process (or simply start it), and finally for the *ESC* key quit the application: + Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g* we start again the detection process, and finally for the *ESC* key we quit the application: .. code-block:: cpp @@ -229,7 +229,7 @@ Explanation imagePoints.clear(); } -#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must append this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort ` function, which is in fact first a call of the :imgproc_geometric:`initUndistortRectifyMap ` to find out the transformation matrices and then doing the transformation with the :imgproc_geometric:`remap ` function. Because, after a successful calibration the map calculation needs to be done only once, by using this expanded form you may speed up your application: +#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must do this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort ` function, which is in fact first calls :imgproc_geometric:`initUndistortRectifyMap ` to find transformation matrices and then performs transformation using :imgproc_geometric:`remap ` function. Because, after successful calibration map calculation needs to be done only once, by using this expanded form you may speed up your application: .. code-block:: cpp @@ -256,9 +256,9 @@ Explanation The calibration and save ======================== -Because the calibration needs to be only once per camera it makes sense to save them after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file. +Because the calibration needs to be done only once per camera, it makes sense to save it after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file. -Therefore in the first function we just split up these two processes. Because we want to save many of the calibration variables we'll create these variables here and pass on both of them to the calibration and saving function. Again, I'll not show the saving part as that has little in common with the calibration. Explore the source file in order to find out how and what: +Therefore in the first function we just split up these two processes. Because we want to save many of the calibration variables we'll create these variables here and pass on both of them to the calibration and saving function. Again, I'll not show the saving part as that has little in common with the calibration. Explore the source file in order to find out how and what: .. code-block:: cpp @@ -269,10 +269,10 @@ Therefore in the first function we just split up these two processes. Because we vector reprojErrs; double totalAvgErr = 0; - bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs, + bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs, reprojErrs, totalAvgErr); cout << (ok ? "Calibration succeeded" : "Calibration failed") - << ". avg re projection error = " << totalAvgErr ; + << ". avg re projection error = " << totalAvgErr ; if( ok ) // save only if the calibration was done with success saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs, @@ -280,15 +280,15 @@ Therefore in the first function we just split up these two processes. Because we return ok; } -We do the calibration with the help of the :calib3d:`calibrateCamera ` function. This has the following parameters: +We do the calibration with the help of the :calib3d:`calibrateCamera ` function. It has the following parameters: .. container:: enumeratevisibleitemswithsquare - + The object points. This is a vector of *Point3f* vector that for each input image describes how should the pattern look. If we have a planar pattern (like a chessboard) then we can simply set all Z coordinates to zero. This is a collection of the points where these important points are present. Because, we use a single pattern for all the input images we can calculate this just once and multiply it for all the other input views. We calculate the corner points with the *calcBoardCornerPositions* function as: + + The object points. This is a vector of *Point3f* vector that for each input image describes how should the pattern look. If we have a planar pattern (like a chessboard) then we can simply set all Z coordinates to zero. This is a collection of the points where these important points are present. Because, we use a single pattern for all the input images we can calculate this just once and multiply it for all the other input views. We calculate the corner points with the *calcBoardCornerPositions* function as: .. code-block:: cpp - void calcBoardCornerPositions(Size boardSize, float squareSize, vector& corners, + void calcBoardCornerPositions(Size boardSize, float squareSize, vector& corners, Settings::Pattern patternType /*= Settings::CHESSBOARD*/) { corners.clear(); @@ -310,19 +310,19 @@ We do the calibration with the help of the :calib3d:`calibrateCamera > objectPoints(1); calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern); - objectPoints.resize(imagePoints.size(),objectPoints[0]); + objectPoints.resize(imagePoints.size(),objectPoints[0]); - + The image points. This is a vector of *Point2f* vector that for each input image contains where the important points (corners for chessboard, and center of circles for the circle patterns) were found. We already collected this from what the :calib3d:`findChessboardCorners ` or the :calib3d:`findCirclesGrid ` function returned. We just need to pass it on. + + The image points. This is a vector of *Point2f* vector which for each input image contains coordinates of the important points (corners for chessboard and centers of the circles for the circle pattern). We have already collected this from :calib3d:`findChessboardCorners ` or :calib3d:`findCirclesGrid ` function. We just need to pass it on. - + The size of the image acquired from the camera, video file or the images. + + The size of the image acquired from the camera, video file or the images. - + The camera matrix. If we used the fix aspect ratio option we need to set the :math:`f_x` to zero: + + The camera matrix. If we used the fixed aspect ratio option we need to set the :math:`f_x` to zero: .. code-block:: cpp @@ -330,24 +330,24 @@ We do the calibration with the help of the :calib3d:`calibrateCamera (0,0) = 1.0; - + The distortion coefficient matrix. Initialize with zero. + + The distortion coefficient matrix. Initialize with zero. .. code-block:: cpp distCoeffs = Mat::zeros(8, 1, CV_64F); - + The function will calculate for all the views the rotation and translation vector that transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7th and 8th parameters are an output vector of matrices containing in the ith position the rotation and translation vector for the ith object point to the ith image point. + + For all the views the function will calculate rotation and translation vectors which transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7-th and 8-th parameters are the output vector of matrices containing in the i-th position the rotation and translation vector for the i-th object point to the i-th image point. - + The final argument is a flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point. + + The final argument is the flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point. .. code-block:: cpp double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5); - + The function returns the average re-projection error. This number gives a good estimation of just how exact is the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints ` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculate for all the calibration images. + + The function returns the average re-projection error. This number gives a good estimation of precision of the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints ` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculated for all the calibration images. - .. code-block:: cpp + .. code-block:: cpp double computeReprojectionErrors( const vector >& objectPoints, const vector >& imagePoints, @@ -378,43 +378,43 @@ We do the calibration with the help of the :calib3d:`calibrateCamera ` that has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into a VID5 directory. I've put this inside the :file:`images/CameraCalibraation` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use: +Let there be :download:`this input chessboard pattern <../../../pattern.png>` which has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into VID5 directory. I've put this inside the :file:`images/CameraCalibration` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use: .. code-block:: xml - images/CameraCalibraation/VID5/xx1.jpg - images/CameraCalibraation/VID5/xx2.jpg - images/CameraCalibraation/VID5/xx3.jpg - images/CameraCalibraation/VID5/xx4.jpg - images/CameraCalibraation/VID5/xx5.jpg - images/CameraCalibraation/VID5/xx6.jpg - images/CameraCalibraation/VID5/xx7.jpg - images/CameraCalibraation/VID5/xx8.jpg + images/CameraCalibration/VID5/xx1.jpg + images/CameraCalibration/VID5/xx2.jpg + images/CameraCalibration/VID5/xx3.jpg + images/CameraCalibration/VID5/xx4.jpg + images/CameraCalibration/VID5/xx5.jpg + images/CameraCalibration/VID5/xx6.jpg + images/CameraCalibration/VID5/xx7.jpg + images/CameraCalibration/VID5/xx8.jpg -Then specified the :file:`images/CameraCalibraation/VID5/VID5.XML` as input in the configuration file. Here's a chessboard pattern found during the runtime of the application: +Then passed :file:`images/CameraCalibration/VID5/VID5.XML` as an input in the configuration file. Here's a chessboard pattern found during the runtime of the application: -.. image:: images/fileListImage.jpg +.. image:: images/fileListImage.jpg :alt: A found chessboard :align: center -After applying the distortion removal we get: +After applying the distortion removal we get: -.. image:: images/fileListImageUnDist.jpg +.. image:: images/fileListImageUnDist.jpg :alt: Distortion removal for File List :align: center -The same works for :download:`this asymmetrical circle pattern <../../../acircles_pattern.png>` by setting the input width to 4 and height to 11. This time I've used a live camera feed by specifying its ID ("1") for the input. Here's, how a detected pattern should look: +The same works for :download:`this asymmetrical circle pattern <../../../acircles_pattern.png>` by setting the input width to 4 and height to 11. This time I've used a live camera feed by specifying its ID ("1") for the input. Here's, how a detected pattern should look: -.. image:: images/asymetricalPattern.jpg +.. image:: images/asymetricalPattern.jpg :alt: Asymmetrical circle detection :align: center -In both cases in the specified output XML/YAML file you'll find the camera and distortion coefficients matrices: +In both cases in the specified output XML/YAML file you'll find the camera and distortion coefficients matrices: .. code-block:: cpp @@ -433,9 +433,9 @@ In both cases in the specified output XML/YAML file you'll find the camera and d -4.1802327176423804e-001 5.0715244063187526e-001 0. 0. -5.7843597214487474e-001 -Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap ` and the :imgproc_geometric:`remap ` function to remove distortion and enjoy distortion free inputs with cheap and low quality cameras. +Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap ` and the :imgproc_geometric:`remap ` function to remove distortion and enjoy distortion free inputs for cheap and low quality cameras. -You may observe a runtime instance of this on the `YouTube here `_. +You may observe a runtime instance of this on the `YouTube here `_. .. raw:: html diff --git a/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst b/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst index ec7354be2..e08550750 100644 --- a/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst +++ b/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst @@ -7,16 +7,16 @@ Camera calibration with square chessboard The goal of this tutorial is to learn how to calibrate a camera given a set of chessboard images. -*Test data*: use images in your data/chess folder. +*Test data*: use images in your data/chess folder. #. - Compile opencv with samples by setting ``BUILD_EXAMPLES`` to ``ON`` in cmake configuration. + Compile opencv with samples by setting ``BUILD_EXAMPLES`` to ``ON`` in cmake configuration. #. Go to ``bin`` folder and use ``imagelist_creator`` to create an ``XML/YAML`` list of your images. - + #. - Then, run ``calibration`` sample to get camera parameters. Use square size equal to 3cm. + Then, run ``calibration`` sample to get camera parameters. Use square size equal to 3cm. Pose estimation =============== @@ -57,6 +57,6 @@ Now, let us write a code that detects a chessboard in a new image and finds its distCoeffs, rvec, tvec, false); #. - Calculate reprojection error like it is done in ``calibration`` sample (see ``opencv/samples/cpp/calibration.cpp``, function ``computeReprojectionErrors``). + Calculate reprojection error like it is done in ``calibration`` sample (see ``opencv/samples/cpp/calibration.cpp``, function ``computeReprojectionErrors``). -Question: how to calculate the distance from the camera origin to any of the corners? \ No newline at end of file +Question: how to calculate the distance from the camera origin to any of the corners? diff --git a/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst b/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst index 3d4566462..91f80b70b 100644 --- a/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst +++ b/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst @@ -3,11 +3,11 @@ *calib3d* module. Camera calibration and 3D reconstruction ----------------------------------------------------------- -Although we got most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out from the 2D images information about the 3D world. +Although we got most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out from the 2D images information about the 3D world. -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -26,7 +26,7 @@ Although we got most of our images in a 2D format they do come from a 3D world. :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv diff --git a/doc/tutorials/core/adding_images/adding_images.rst b/doc/tutorials/core/adding_images/adding_images.rst index 13e4063aa..e3135693d 100644 --- a/doc/tutorials/core/adding_images/adding_images.rst +++ b/doc/tutorials/core/adding_images/adding_images.rst @@ -18,7 +18,7 @@ Theory .. note:: - The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski + The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski From our previous tutorial, we know already a bit of *Pixel operators*. An interesting dyadic (two-input) operator is the *linear blend operator*: @@ -43,7 +43,7 @@ As usual, after the not-so-lengthy explanation, let's go to the code: int main( int argc, char** argv ) { - double alpha = 0.5; double beta; double input; + double alpha = 0.5; double beta; double input; Mat src1, src2, dst; @@ -69,7 +69,7 @@ As usual, after the not-so-lengthy explanation, let's go to the code: beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + imshow( "Linear Blend", dst ); waitKey(0); @@ -99,10 +99,10 @@ Explanation #. Now we need to generate the :math:`g(x)` image. For this, the function :add_weighted:`addWeighted <>` comes quite handy: .. code-block:: cpp - + beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + since :add_weighted:`addWeighted <>` produces: .. math:: @@ -110,12 +110,12 @@ Explanation dst = \alpha \cdot src1 + \beta \cdot src2 + \gamma In this case, :math:`\gamma` is the argument :math:`0.0` in the code above. - -#. Create windows, show the images and wait for the user to end the program. + +#. Create windows, show the images and wait for the user to end the program. Result ======= .. image:: images/Adding_Images_Tutorial_Result_0.jpg :alt: Blending Images Tutorial - Final Result - :align: center + :align: center diff --git a/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.rst b/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.rst index 3bd3237b7..d9c681ed4 100644 --- a/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.rst +++ b/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.rst @@ -99,11 +99,11 @@ Explanation /// 2.b. Creating rectangles rectangle( rook_image, - Point( 0, 7*w/8.0 ), - Point( w, w), - Scalar( 0, 255, 255 ), - -1, - 8 ); + Point( 0, 7*w/8.0 ), + Point( w, w), + Scalar( 0, 255, 255 ), + -1, + 8 ); /// 2.c. Create a few lines MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) ); @@ -118,16 +118,16 @@ Explanation .. code-block:: cpp void MyLine( Mat img, Point start, Point end ) - { - int thickness = 2; - int lineType = 8; - line( img, - start, - end, - Scalar( 0, 0, 0 ), - thickness, - lineType ); - } + { + int thickness = 2; + int lineType = 8; + line( img, + start, + end, + Scalar( 0, 0, 0 ), + thickness, + lineType ); + } As we can see, *MyLine* just call the function :line:`line <>`, which does the following: @@ -145,18 +145,18 @@ Explanation void MyEllipse( Mat img, double angle ) { - int thickness = 2; - int lineType = 8; + int thickness = 2; + int lineType = 8; - ellipse( img, - Point( w/2.0, w/2.0 ), - Size( w/4.0, w/16.0 ), - angle, - 0, - 360, - Scalar( 255, 0, 0 ), - thickness, - lineType ); + ellipse( img, + Point( w/2.0, w/2.0 ), + Size( w/4.0, w/16.0 ), + angle, + 0, + 360, + Scalar( 255, 0, 0 ), + thickness, + lineType ); } From the code above, we can observe that the function :ellipse:`ellipse <>` draws an ellipse such that: @@ -176,17 +176,17 @@ Explanation .. code-block:: cpp void MyFilledCircle( Mat img, Point center ) - { - int thickness = -1; - int lineType = 8; + { + int thickness = -1; + int lineType = 8; - circle( img, - center, - w/32.0, - Scalar( 0, 0, 255 ), - thickness, - lineType ); - } + circle( img, + center, + w/32.0, + Scalar( 0, 0, 255 ), + thickness, + lineType ); + } Similar to the ellipse function, we can observe that *circle* receives as arguments: @@ -203,41 +203,41 @@ Explanation .. code-block:: cpp void MyPolygon( Mat img ) - { - int lineType = 8; + { + int lineType = 8; - /** Create some points */ - Point rook_points[1][20]; - rook_points[0][0] = Point( w/4.0, 7*w/8.0 ); - rook_points[0][1] = Point( 3*w/4.0, 7*w/8.0 ); - rook_points[0][2] = Point( 3*w/4.0, 13*w/16.0 ); - rook_points[0][3] = Point( 11*w/16.0, 13*w/16.0 ); - rook_points[0][4] = Point( 19*w/32.0, 3*w/8.0 ); - rook_points[0][5] = Point( 3*w/4.0, 3*w/8.0 ); - rook_points[0][6] = Point( 3*w/4.0, w/8.0 ); - rook_points[0][7] = Point( 26*w/40.0, w/8.0 ); - rook_points[0][8] = Point( 26*w/40.0, w/4.0 ); - rook_points[0][9] = Point( 22*w/40.0, w/4.0 ); - rook_points[0][10] = Point( 22*w/40.0, w/8.0 ); - rook_points[0][11] = Point( 18*w/40.0, w/8.0 ); - rook_points[0][12] = Point( 18*w/40.0, w/4.0 ); - rook_points[0][13] = Point( 14*w/40.0, w/4.0 ); - rook_points[0][14] = Point( 14*w/40.0, w/8.0 ); - rook_points[0][15] = Point( w/4.0, w/8.0 ); - rook_points[0][16] = Point( w/4.0, 3*w/8.0 ); - rook_points[0][17] = Point( 13*w/32.0, 3*w/8.0 ); - rook_points[0][18] = Point( 5*w/16.0, 13*w/16.0 ); - rook_points[0][19] = Point( w/4.0, 13*w/16.0) ; + /** Create some points */ + Point rook_points[1][20]; + rook_points[0][0] = Point( w/4.0, 7*w/8.0 ); + rook_points[0][1] = Point( 3*w/4.0, 7*w/8.0 ); + rook_points[0][2] = Point( 3*w/4.0, 13*w/16.0 ); + rook_points[0][3] = Point( 11*w/16.0, 13*w/16.0 ); + rook_points[0][4] = Point( 19*w/32.0, 3*w/8.0 ); + rook_points[0][5] = Point( 3*w/4.0, 3*w/8.0 ); + rook_points[0][6] = Point( 3*w/4.0, w/8.0 ); + rook_points[0][7] = Point( 26*w/40.0, w/8.0 ); + rook_points[0][8] = Point( 26*w/40.0, w/4.0 ); + rook_points[0][9] = Point( 22*w/40.0, w/4.0 ); + rook_points[0][10] = Point( 22*w/40.0, w/8.0 ); + rook_points[0][11] = Point( 18*w/40.0, w/8.0 ); + rook_points[0][12] = Point( 18*w/40.0, w/4.0 ); + rook_points[0][13] = Point( 14*w/40.0, w/4.0 ); + rook_points[0][14] = Point( 14*w/40.0, w/8.0 ); + rook_points[0][15] = Point( w/4.0, w/8.0 ); + rook_points[0][16] = Point( w/4.0, 3*w/8.0 ); + rook_points[0][17] = Point( 13*w/32.0, 3*w/8.0 ); + rook_points[0][18] = Point( 5*w/16.0, 13*w/16.0 ); + rook_points[0][19] = Point( w/4.0, 13*w/16.0) ; - const Point* ppt[1] = { rook_points[0] }; - int npt[] = { 20 }; + const Point* ppt[1] = { rook_points[0] }; + int npt[] = { 20 }; - fillPoly( img, - ppt, - npt, - 1, - Scalar( 255, 255, 255 ), - lineType ); + fillPoly( img, + ppt, + npt, + 1, + Scalar( 255, 255, 255 ), + lineType ); } To draw a filled polygon we use the function :fill_poly:`fillPoly <>`. We note that: @@ -255,11 +255,11 @@ Explanation .. code-block:: cpp rectangle( rook_image, - Point( 0, 7*w/8.0 ), - Point( w, w), - Scalar( 0, 255, 255 ), - -1, - 8 ); + Point( 0, 7*w/8.0 ), + Point( w, w), + Scalar( 0, 255, 255 ), + -1, + 8 ); Finally we have the :rectangle:`rectangle <>` function (we did not create a special function for this guy). We note that: diff --git a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst index 097e79e00..613f4e100 100644 --- a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst +++ b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst @@ -10,7 +10,7 @@ In this tutorial you will learn how to: .. container:: enumeratevisibleitemswithsquare - + Access pixel values + + Access pixel values + Initialize a matrix with zeros @@ -20,16 +20,16 @@ In this tutorial you will learn how to: Theory ======= - + .. note:: - The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski + The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski Image Processing -------------------- .. container:: enumeratevisibleitemswithsquare - * A general image processing operator is a function that takes one or more input images and produces an output image. + * A general image processing operator is a function that takes one or more input images and produces an output image. * Image transforms can be seen as: @@ -54,18 +54,18 @@ Brightness and contrast adjustments * Two commonly used point processes are *multiplication* and *addition* with a constant: .. math:: - + g(x) = \alpha f(x) + \beta - + * The parameters :math:`\alpha > 0` and :math:`\beta` are often called the *gain* and *bias* parameters; sometimes these parameters are said to control *contrast* and *brightness* respectively. * You can think of :math:`f(x)` as the source image pixels and :math:`g(x)` as the output image pixels. Then, more conveniently we can write the expression as: .. math:: - + g(i,j) = \alpha \cdot f(i,j) + \beta - - where :math:`i` and :math:`j` indicates that the pixel is located in the *i-th* row and *j-th* column. + + where :math:`i` and :math:`j` indicates that the pixel is located in the *i-th* row and *j-th* column. Code ===== @@ -91,7 +91,7 @@ Code Mat image = imread( argv[1] ); Mat new_image = Mat::zeros( image.size(), image.type() ); - /// Initialize values + /// Initialize values std::cout<<" Basic Linear Transforms "<>alpha; @@ -102,7 +102,7 @@ Code { for( int x = 0; x < image.cols; x++ ) { for( int c = 0; c < 3; c++ ) { - new_image.at(y,x)[c] = + new_image.at(y,x)[c] = saturate_cast( alpha*( image.at(y,x)[c] ) + beta ); } } @@ -133,41 +133,41 @@ Explanation #. We load an image using :imread:`imread <>` and save it in a Mat object: - + .. code-block:: cpp Mat image = imread( argv[1] ); #. Now, since we will make some transformations to this image, we need a new Mat object to store it. Also, we want this to have the following features: - + .. container:: enumeratevisibleitemswithsquare * Initial pixel values equal to zero * Same size and type as the original image - + .. code-block:: cpp - Mat new_image = Mat::zeros( image.size(), image.type() ); - - We observe that :mat_zeros:`Mat::zeros <>` returns a Matlab-style zero initializer based on *image.size()* and *image.type()* + Mat new_image = Mat::zeros( image.size(), image.type() ); + + We observe that :mat_zeros:`Mat::zeros <>` returns a Matlab-style zero initializer based on *image.size()* and *image.type()* #. Now, to perform the operation :math:`g(i,j) = \alpha \cdot f(i,j) + \beta` we will access to each pixel in image. Since we are operating with RGB images, we will have three values per pixel (R, G and B), so we will also access them separately. Here is the piece of code: .. code-block:: cpp - + for( int y = 0; y < image.rows; y++ ) { for( int x = 0; x < image.cols; x++ ) { for( int c = 0; c < 3; c++ ) - { new_image.at(y,x)[c] = + { new_image.at(y,x)[c] = saturate_cast( alpha*( image.at(y,x)[c] ) + beta ); } } } - + Notice the following: .. container:: enumeratevisibleitemswithsquare - * To access each pixel in the images we are using this syntax: *image.at(y,x)[c]* where *y* is the row, *x* is the column and *c* is R, G or B (0, 1 or 2). + * To access each pixel in the images we are using this syntax: *image.at(y,x)[c]* where *y* is the row, *x* is the column and *c* is R, G or B (0, 1 or 2). * Since the operation :math:`\alpha \cdot p(i,j) + \beta` can give values out of range or not integers (if :math:`\alpha` is float), we use :saturate_cast:`saturate_cast <>` to make sure the values are valid. @@ -175,7 +175,7 @@ Explanation #. Finally, we create windows and show the images, the usual way. .. code-block:: cpp - + namedWindow("Original Image", 1); namedWindow("New Image", 1); @@ -185,9 +185,9 @@ Explanation waitKey(0); .. note:: - + Instead of using the **for** loops to access each pixel, we could have simply used this command: - + .. code-block:: cpp image.convertTo(new_image, -1, alpha, beta); @@ -211,4 +211,4 @@ Result .. image:: images/Basic_Linear_Transform_Tutorial_Result_0.jpg :alt: Basic Linear Transform - Final Result - :align: center + :align: center diff --git a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst index b5f8e77c8..b7cf44668 100644 --- a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst +++ b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst @@ -4,22 +4,22 @@ Discrete Fourier Transform ************************** Goal -==== +==== -We'll seek answers for the following questions: +We'll seek answers for the following questions: .. container:: enumeratevisibleitemswithsquare - + What is a Fourier transform and why use it? - + How to do it in OpenCV? + + What is a Fourier transform and why use it? + + How to do it in OpenCV? + Usage of functions such as: :imgprocfilter:`copyMakeBorder() `, :operationsonarrays:`merge() `, :operationsonarrays:`dft() `, :operationsonarrays:`getOptimalDFTSize() `, :operationsonarrays:`log() ` and :operationsonarrays:`normalize() ` . Source code =========== -You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp` of the OpenCV source code library. +You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp` of the OpenCV source code library. -Here's a sample usage of :operationsonarrays:`dft() ` : +Here's a sample usage of :operationsonarrays:`dft() ` : .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp :language: cpp @@ -30,11 +30,11 @@ Here's a sample usage of :operationsonarrays:`dft() ` : Explanation =========== -The Fourier Transform will decompose an image into its sinus and cosines components. In other words, it will transform an image from its spatial domain to its frequency domain. The idea is that any function may be approximated exactly with the sum of infinite sinus and cosines functions. The Fourier Transform is a way how to do this. Mathematically a two dimensional images Fourier transform is: +The Fourier Transform will decompose an image into its sinus and cosines components. In other words, it will transform an image from its spatial domain to its frequency domain. The idea is that any function may be approximated exactly with the sum of infinite sinus and cosines functions. The Fourier Transform is a way how to do this. Mathematically a two dimensional images Fourier transform is: .. math:: - F(k,l) = \displaystyle\sum\limits_{i=0}^{N-1}\sum\limits_{j=0}^{N-1} f(i,j)e^{-i2\pi(\frac{ki}{N}+\frac{lj}{N})} + F(k,l) = \displaystyle\sum\limits_{i=0}^{N-1}\sum\limits_{j=0}^{N-1} f(i,j)e^{-i2\pi(\frac{ki}{N}+\frac{lj}{N})} e^{ix} = \cos{x} + i\sin {x} @@ -44,65 +44,65 @@ In this sample I'll show how to calculate and show the *magnitude* image of a Fo 1. **Expand the image to an optimal size**. The performance of a DFT is dependent of the image size. It tends to be the fastest for image sizes that are multiple of the numbers two, three and five. Therefore, to achieve maximal performance it is generally a good idea to pad border values to the image to get a size with such traits. The :operationsonarrays:`getOptimalDFTSize() ` returns this optimal size and we can use the :imgprocfilter:`copyMakeBorder() ` function to expand the borders of an image: - .. code-block:: cpp + .. code-block:: cpp Mat padded; //expand input image to optimal size int m = getOptimalDFTSize( I.rows ); int n = getOptimalDFTSize( I.cols ); // on the border add zero pixels copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0)); - The appended pixels are initialized with zero. + The appended pixels are initialized with zero. 2. **Make place for both the complex and the real values**. The result of a Fourier Transform is complex. This implies that for each image value the result is two image values (one per component). Moreover, the frequency domains range is much larger than its spatial counterpart. Therefore, we store these usually at least in a *float* format. Therefore we'll convert our input image to this type and expand it with another channel to hold the complex values: - .. code-block:: cpp + .. code-block:: cpp Mat planes[] = {Mat_(padded), Mat::zeros(padded.size(), CV_32F)}; Mat complexI; merge(planes, 2, complexI); // Add to the expanded another plane with zeros -3. **Make the Discrete Fourier Transform**. It's possible an in-place calculation (same input as output): +3. **Make the Discrete Fourier Transform**. It's possible an in-place calculation (same input as output): - .. code-block:: cpp + .. code-block:: cpp dft(complexI, complexI); // this way the result may fit in the source matrix -4. **Transform the real and complex values to magnitude**. A complex number has a real (*Re*) and a complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a DFT is: +4. **Transform the real and complex values to magnitude**. A complex number has a real (*Re*) and a complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a DFT is: .. math:: M = \sqrt[2]{ {Re(DFT(I))}^2 + {Im(DFT(I))}^2} - Translated to OpenCV code: + Translated to OpenCV code: - .. code-block:: cpp + .. code-block:: cpp split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) - magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude + magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude Mat magI = planes[0]; -5. **Switch to a logarithmic scale**. It turns out that the dynamic range of the Fourier coefficients is too large to be displayed on the screen. We have some small and some high changing values that we can't observe like this. Therefore the high values will all turn out as white points, while the small ones as black. To use the gray scale values to for visualization we can transform our linear scale to a logarithmic one: +5. **Switch to a logarithmic scale**. It turns out that the dynamic range of the Fourier coefficients is too large to be displayed on the screen. We have some small and some high changing values that we can't observe like this. Therefore the high values will all turn out as white points, while the small ones as black. To use the gray scale values to for visualization we can transform our linear scale to a logarithmic one: .. math:: M_1 = \log{(1 + M)} - Translated to OpenCV code: + Translated to OpenCV code: - .. code-block:: cpp + .. code-block:: cpp magI += Scalar::all(1); // switch to logarithmic scale log(magI, magI); -6. **Crop and rearrange**. Remember, that at the first step, we expanded the image? Well, it's time to throw away the newly introduced values. For visualization purposes we may also rearrange the quadrants of the result, so that the origin (zero, zero) corresponds with the image center. +6. **Crop and rearrange**. Remember, that at the first step, we expanded the image? Well, it's time to throw away the newly introduced values. For visualization purposes we may also rearrange the quadrants of the result, so that the origin (zero, zero) corresponds with the image center. - .. code-block:: cpp + .. code-block:: cpp magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2)); int cx = magI.cols/2; int cy = magI.rows/2; - Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant + Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right @@ -116,25 +116,25 @@ In this sample I'll show how to calculate and show the *magnitude* image of a Fo q2.copyTo(q1); tmp.copyTo(q2); -7. **Normalize**. This is done again for visualization purposes. We now have the magnitudes, however this are still out of our image display range of zero to one. We normalize our values to this range using the :operationsonarrays:`normalize() ` function. +7. **Normalize**. This is done again for visualization purposes. We now have the magnitudes, however this are still out of our image display range of zero to one. We normalize our values to this range using the :operationsonarrays:`normalize() ` function. - .. code-block:: cpp + .. code-block:: cpp - normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a + normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a // viewable image form (float between values 0 and 1). Result ====== -An application idea would be to determine the geometrical orientation present in the image. For example, let us find out if a text is horizontal or not? Looking at some text you'll notice that the text lines sort of form also horizontal lines and the letters form sort of vertical lines. These two main components of a text snippet may be also seen in case of the Fourier transform. Let us use :download:`this horizontal <../../../../samples/cpp/tutorial_code/images/imageTextN.png>` and :download:`this rotated<../../../../samples/cpp/tutorial_code/images/imageTextR.png>` image about a text. +An application idea would be to determine the geometrical orientation present in the image. For example, let us find out if a text is horizontal or not? Looking at some text you'll notice that the text lines sort of form also horizontal lines and the letters form sort of vertical lines. These two main components of a text snippet may be also seen in case of the Fourier transform. Let us use :download:`this horizontal <../../../../samples/cpp/tutorial_code/images/imageTextN.png>` and :download:`this rotated<../../../../samples/cpp/tutorial_code/images/imageTextR.png>` image about a text. -In case of the horizontal text: +In case of the horizontal text: .. image:: images/result_normal.jpg :alt: In case of normal text :align: center -In case of a rotated text: +In case of a rotated text: .. image:: images/result_rotated.jpg :alt: In case of rotated text diff --git a/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst b/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst index 44eed2eca..42f6a6091 100644 --- a/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst +++ b/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst @@ -4,9 +4,9 @@ File Input and Output using XML and YAML files ********************************************** Goal -==== +==== -You'll find answers for the following questions: +You'll find answers for the following questions: .. container:: enumeratevisibleitemswithsquare @@ -18,7 +18,7 @@ You'll find answers for the following questions: Source code =========== -You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp` of the OpenCV source code library. +You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp` of the OpenCV source code library. Here's a sample code of how to achieve all the stuff enumerated at the goal list. @@ -31,9 +31,9 @@ Here's a sample code of how to achieve all the stuff enumerated at the goal list Explanation =========== -Here we talk only about XML and YAML file inputs. Your output (and its respective input) file may have only one of these extensions and the structure coming from this. They are two kinds of data structures you may serialize: *mappings* (like the STL map) and *element sequence* (like the STL vector>. The difference between these is that in a map every element has a unique name through what you may access it. For sequences you need to go through them to query a specific item. +Here we talk only about XML and YAML file inputs. Your output (and its respective input) file may have only one of these extensions and the structure coming from this. They are two kinds of data structures you may serialize: *mappings* (like the STL map) and *element sequence* (like the STL vector>. The difference between these is that in a map every element has a unique name through what you may access it. For sequences you need to go through them to query a specific item. -1. **XML\\YAML File Open and Close.** Before you write any content to such file you need to open it and at the end to close it. The XML\YAML data structure in OpenCV is :xmlymlpers:`FileStorage `. To specify that this structure to which file binds on your hard drive you can use either its constructor or the *open()* function of this: +1. **XML\\YAML File Open and Close.** Before you write any content to such file you need to open it and at the end to close it. The XML\YAML data structure in OpenCV is :xmlymlpers:`FileStorage `. To specify that this structure to which file binds on your hard drive you can use either its constructor or the *open()* function of this: .. code-block:: cpp @@ -42,29 +42,29 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv \\... fs.open(filename, FileStorage::READ); - Either one of this you use the second argument is a constant specifying the type of operations you'll be able to on them: WRITE, READ or APPEND. The extension specified in the file name also determinates the output format that will be used. The output may be even compressed if you specify an extension such as *.xml.gz*. + Either one of this you use the second argument is a constant specifying the type of operations you'll be able to on them: WRITE, READ or APPEND. The extension specified in the file name also determinates the output format that will be used. The output may be even compressed if you specify an extension such as *.xml.gz*. + + The file automatically closes when the :xmlymlpers:`FileStorage ` objects is destroyed. However, you may explicitly call for this by using the *release* function: - The file automatically closes when the :xmlymlpers:`FileStorage ` objects is destroyed. However, you may explicitly call for this by using the *release* function: - .. code-block:: cpp fs.release(); // explicit close -#. **Input and Output of text and numbers.** The data structure uses the same << output operator that the STL library. For outputting any type of data structure we need first to specify its name. We do this by just simply printing out the name of this. For basic types you may follow this with the print of the value : +#. **Input and Output of text and numbers.** The data structure uses the same << output operator that the STL library. For outputting any type of data structure we need first to specify its name. We do this by just simply printing out the name of this. For basic types you may follow this with the print of the value : .. code-block:: cpp fs << "iterationNr" << 100; - Reading in is a simple addressing (via the [] operator) and casting operation or a read via the >> operator : + Reading in is a simple addressing (via the [] operator) and casting operation or a read via the >> operator : .. code-block:: cpp - int itNr; + int itNr; fs["iterationNr"] >> itNr; itNr = (int) fs["iterationNr"]; -#. **Input\\Output of OpenCV Data structures.** Well these behave exactly just as the basic C++ types: +#. **Input\\Output of OpenCV Data structures.** Well these behave exactly just as the basic C++ types: .. code-block:: cpp @@ -77,7 +77,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv fs["R"] >> R; // Read cv::Mat fs["T"] >> T; -#. **Input\\Output of vectors (arrays) and associative maps.** As I mentioned beforehand we can output maps and sequences (array, vector) too. Again we first print the name of the variable and then we have to specify if our output is either a sequence or map. +#. **Input\\Output of vectors (arrays) and associative maps.** As I mentioned beforehand we can output maps and sequences (array, vector) too. Again we first print the name of the variable and then we have to specify if our output is either a sequence or map. For sequence before the first element print the "[" character and after the last one the "]" character: @@ -95,7 +95,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv fs << "{" << "One" << 1; fs << "Two" << 2 << "}"; - To read from these we use the :xmlymlpers:`FileNode ` and the :xmlymlpers:`FileNodeIterator ` data structures. The [] operator of the :xmlymlpers:`FileStorage ` class returns a :xmlymlpers:`FileNode ` data type. If the node is sequential we can use the :xmlymlpers:`FileNodeIterator ` to iterate through the items: + To read from these we use the :xmlymlpers:`FileNode ` and the :xmlymlpers:`FileNodeIterator ` data structures. The [] operator of the :xmlymlpers:`FileStorage ` class returns a :xmlymlpers:`FileNode ` data type. If the node is sequential we can use the :xmlymlpers:`FileNodeIterator ` to iterate through the items: .. code-block:: cpp @@ -115,8 +115,8 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv .. code-block:: cpp n = fs["Mapping"]; // Read mappings from a sequence - cout << "Two " << (int)(n["Two"]) << "; "; - cout << "One " << (int)(n["One"]) << endl << endl; + cout << "Two " << (int)(n["Two"]) << "; "; + cout << "One " << (int)(n["One"]) << endl << endl; #. **Read and write your own data structures.** Suppose you have a data structure such as: @@ -148,7 +148,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv id = (string)node["id"]; } - Then you need to add the following functions definitions outside the class: + Then you need to add the following functions definitions outside the class: .. code-block:: cpp @@ -175,17 +175,17 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv fs << "MyData" << m; // your own data structures fs["MyData"] >> m; // Read your own structure_ - Or to try out reading a non-existing read: + Or to try out reading a non-existing read: .. code-block:: cpp - fs["NonExisting"] >> m; // Do not add a fs << "NonExisting" << m command for this to work + fs["NonExisting"] >> m; // Do not add a fs << "NonExisting" << m command for this to work cout << endl << "NonExisting = " << endl << m << endl; Result ====== -Well mostly we just print out the defined numbers. On the screen of your console you could see: +Well mostly we just print out the defined numbers. On the screen of your console you could see: .. code-block:: bash @@ -212,7 +212,7 @@ Well mostly we just print out the defined numbers. On the screen of your console Tip: Open up output.xml with a text editor to see the serialized data. -Nevertheless, it's much more interesting what you may see in the output xml file: +Nevertheless, it's much more interesting what you may see in the output xml file: .. code-block:: xml @@ -242,7 +242,7 @@ Nevertheless, it's much more interesting what you may see in the output xml file mydata1234 -Or the YAML file: +Or the YAML file: .. code-block:: yaml @@ -277,4 +277,3 @@ You may observe a runtime instance of this on the `YouTube here - diff --git a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst index eba0cae7c..ef0f8640c 100644 --- a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst +++ b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst @@ -4,9 +4,9 @@ How to scan images, lookup tables and time measurement with OpenCV ******************************************************************* Goal -==== +==== -We'll seek answers for the following questions: +We'll seek answers for the following questions: .. container:: enumeratevisibleitemswithsquare @@ -18,11 +18,11 @@ We'll seek answers for the following questions: Our test case ============= -Let us consider a simple color reduction method. Using the unsigned char C and C++ type for matrix item storing a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result. +Let us consider a simple color reduction method. Using the unsigned char C and C++ type for matrix item storing a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result. -In this cases it's common that we make a *color space reduction*. This means that we divide the color space current value with a new input value to end up with fewer colors. For instance every value between zero and nine takes the new value zero, every value between ten and nineteen the value ten and so on. +In this cases it's common that we make a *color space reduction*. This means that we divide the color space current value with a new input value to end up with fewer colors. For instance every value between zero and nine takes the new value zero, every value between ten and nineteen the value ten and so on. -When you divide an *uchar* (unsigned char - aka values between zero and 255) value with an *int* value the result will be also *char*. These values may only be char values. Therefore, any fraction will be rounded down. Taking advantage of this fact the upper operation in the *uchar* domain may be expressed as: +When you divide an *uchar* (unsigned char - aka values between zero and 255) value with an *int* value the result will be also *char*. These values may only be char values. Therefore, any fraction will be rounded down. Taking advantage of this fact the upper operation in the *uchar* domain may be expressed as: .. math:: @@ -30,11 +30,11 @@ When you divide an *uchar* (unsigned char - aka values between zero and 255) val A simple color space reduction algorithm would consist of just passing through every pixel of an image matrix and applying this formula. It's worth noting that we do a divide and a multiplication operation. These operations are bloody expensive for a system. If possible it's worth avoiding them by using cheaper operations such as a few subtractions, addition or in best case a simple assignment. Furthermore, note that we only have a limited number of input values for the upper operation. In case of the *uchar* system this is 256 to be exact. -Therefore, for larger images it would be wise to calculate all possible values beforehand and during the assignment just make the assignment, by using a lookup table. Lookup tables are simple arrays (having one or more dimensions) that for a given input value variation holds the final output value. Its strength lies that we do not need to make the calculation, we just need to read the result. +Therefore, for larger images it would be wise to calculate all possible values beforehand and during the assignment just make the assignment, by using a lookup table. Lookup tables are simple arrays (having one or more dimensions) that for a given input value variation holds the final output value. Its strength lies that we do not need to make the calculation, we just need to read the result. -Our test case program (and the sample presented here) will do the following: read in a console line argument image (that may be either color or gray scale - console line argument too) and apply the reduction with the given console line argument integer value. In OpenCV, at the moment they are three major ways of going through an image pixel by pixel. To make things a little more interesting will make the scanning for each image using all of these methods, and print out how long it took. +Our test case program (and the sample presented here) will do the following: read in a console line argument image (that may be either color or gray scale - console line argument too) and apply the reduction with the given console line argument integer value. In OpenCV, at the moment they are three major ways of going through an image pixel by pixel. To make things a little more interesting will make the scanning for each image using all of these methods, and print out how long it took. -You can download the full source code :download:`here <../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp>` or look it up in the samples directory of OpenCV at the cpp tutorial code for the core section. Its basic usage is: +You can download the full source code :download:`here <../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp>` or look it up in the samples directory of OpenCV at the cpp tutorial code for the core section. Its basic usage is: .. code-block:: bash @@ -45,25 +45,25 @@ The final argument is optional. If given the image will be loaded in gray scale .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 48-60 + :lines: 48-60 Here we first use the C++ *stringstream* class to convert the third command line argument from text to an integer format. Then we use a simple look and the upper formula to calculate the lookup table. No OpenCV specific stuff here. -Another issue is how do we measure time? Well OpenCV offers two simple functions to achieve this :UtilitySystemFunctions:`getTickCount() ` and :UtilitySystemFunctions:`getTickFrequency() `. The first returns the number of ticks of your systems CPU from a certain event (like since you booted your system). The second returns how many times your CPU emits a tick during a second. So to measure in seconds the number of time elapsed between two operations is easy as: +Another issue is how do we measure time? Well OpenCV offers two simple functions to achieve this :UtilitySystemFunctions:`getTickCount() ` and :UtilitySystemFunctions:`getTickFrequency() `. The first returns the number of ticks of your systems CPU from a certain event (like since you booted your system). The second returns how many times your CPU emits a tick during a second. So to measure in seconds the number of time elapsed between two operations is easy as: .. code-block:: cpp double t = (double)getTickCount(); // do something ... - t = ((double)getTickCount() - t)/getTickFrequency(); + t = ((double)getTickCount() - t)/getTickFrequency(); cout << "Times passed in seconds: " << t << endl; -.. _How_Image_Stored_Memory: +.. _How_Image_Stored_Memory: How the image matrix is stored in the memory? ============================================= -As you could already read in my :ref:`matTheBasicImageContainer` tutorial the size of the matrix depends of the color system used. More accurately, it depends from the number of channels used. In case of a gray scale image we have something like: +As you could already read in my :ref:`matTheBasicImageContainer` tutorial the size of the matrix depends of the color system used. More accurately, it depends from the number of channels used. In case of a gray scale image we have something like: .. math:: @@ -94,14 +94,14 @@ Note that the order of the channels is inverse: BGR instead of RGB. Because in m The efficient way ================= -When it comes to performance you cannot beat the classic C style operator[] (pointer) access. Therefore, the most efficient method we can recommend for making the assignment is: +When it comes to performance you cannot beat the classic C style operator[] (pointer) access. Therefore, the most efficient method we can recommend for making the assignment is: .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 :lines: 125-152 -Here we basically just acquire a pointer to the start of each row and go through it until it ends. In the special case that the matrix is stored in a continues manner we only need to request the pointer a single time and go all the way to the end. We need to look out for color images: we have three channels so we need to pass through three times more items in each row. +Here we basically just acquire a pointer to the start of each row and go through it until it ends. In the special case that the matrix is stored in a continues manner we only need to request the pointer a single time and go all the way to the end. We need to look out for color images: we have three channels so we need to pass through three times more items in each row. There's another way of this. The *data* data member of a *Mat* object returns the pointer to the first row, first column. If this pointer is null you have no valid input in that object. Checking this is the simplest method to check if your image loading was a success. In case the storage is continues we can use this to go through the whole data pointer. In case of a gray scale image this would look like: @@ -114,17 +114,17 @@ There's another way of this. The *data* data member of a *Mat* object returns th You would get the same result. However, this code is a lot harder to read later on. It gets even harder if you have some more advanced technique there. Moreover, in practice I've observed you'll get the same performance result (as most of the modern compilers will probably make this small optimization trick automatically for you). -The iterator (safe) method +The iterator (safe) method ========================== -In case of the efficient way making sure that you pass through the right amount of *uchar* fields and to skip the gaps that may occur between the rows was your responsibility. The iterator method is considered a safer way as it takes over these tasks from the user. All you need to do is ask the begin and the end of the image matrix and then just increase the begin iterator until you reach the end. To acquire the value *pointed* by the iterator use the * operator (add it before it). +In case of the efficient way making sure that you pass through the right amount of *uchar* fields and to skip the gaps that may occur between the rows was your responsibility. The iterator method is considered a safer way as it takes over these tasks from the user. All you need to do is ask the begin and the end of the image matrix and then just increase the begin iterator until you reach the end. To acquire the value *pointed* by the iterator use the * operator (add it before it). .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 :lines: 154-182 -In case of color images we have three uchar items per column. This may be considered a short vector of uchar items, that has been baptized in OpenCV with the *Vec3b* name. To access the n-th sub column we use simple operator[] access. It's important to remember that OpenCV iterators go through the columns and automatically skip to the next row. Therefore in case of color images if you use a simple *uchar* iterator you'll be able to access only the blue channel values. +In case of color images we have three uchar items per column. This may be considered a short vector of uchar items, that has been baptized in OpenCV with the *Vec3b* name. To access the n-th sub column we use simple operator[] access. It's important to remember that OpenCV iterators go through the columns and automatically skip to the next row. Therefore in case of color images if you use a simple *uchar* iterator you'll be able to access only the blue channel values. On-the-fly address calculation with reference returning ======================================================= @@ -136,7 +136,7 @@ The final method isn't recommended for scanning. It was made to acquire or modif :tab-width: 4 :lines: 184-216 -The functions takes your input type and coordinates and calculates on the fly the address of the queried item. Then returns a reference to that. This may be a constant when you *get* the value and non-constant when you *set* the value. As a safety step in **debug mode only*** there is performed a check that your input coordinates are valid and does exist. If this isn't the case you'll get a nice output message of this on the standard error output stream. Compared to the efficient way in release mode the only difference in using this is that for every element of the image you'll get a new row pointer for what we use the C operator[] to acquire the column element. +The functions takes your input type and coordinates and calculates on the fly the address of the queried item. Then returns a reference to that. This may be a constant when you *get* the value and non-constant when you *set* the value. As a safety step in **debug mode only*** there is performed a check that your input coordinates are valid and does exist. If this isn't the case you'll get a nice output message of this on the standard error output stream. Compared to the efficient way in release mode the only difference in using this is that for every element of the image you'll get a new row pointer for what we use the C operator[] to acquire the column element. If you need to multiple lookups using this method for an image it may be troublesome and time consuming to enter the type and the at keyword for each of the accesses. To solve this problem OpenCV has a :basicstructures:`Mat_ ` data type. It's the same as Mat with the extra need that at definition you need to specify the data type through what to look at the data matrix, however in return you can use the operator() for fast access of items. To make things even better this is easily convertible from and to the usual :basicstructures:`Mat ` data type. A sample usage of this you can see in case of the color images of the upper function. Nevertheless, it's important to note that the same operation (with the same runtime speed) could have been done with the :basicstructures:`at() ` function. It's just a less to write for the lazy programmer trick. diff --git a/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst b/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst index 938e504eb..62afaed49 100644 --- a/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst +++ b/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst @@ -6,7 +6,7 @@ Interoperability with OpenCV 1 Goal ==== -For the OpenCV developer team it's important to constantly improve the library. We are constantly thinking about methods that will ease your work process, while still maintain the libraries flexibility. The new C++ interface is a development of us that serves this goal. Nevertheless, backward compatibility remains important. We do not want to break your code written for earlier version of the OpenCV library. Therefore, we made sure that we add some functions that deal with this. In the following you'll learn: +For the OpenCV developer team it's important to constantly improve the library. We are constantly thinking about methods that will ease your work process, while still maintain the libraries flexibility. The new C++ interface is a development of us that serves this goal. Nevertheless, backward compatibility remains important. We do not want to break your code written for earlier version of the OpenCV library. Therefore, we made sure that we add some functions that deal with this. In the following you'll learn: .. container:: enumeratevisibleitemswithsquare @@ -17,9 +17,9 @@ For the OpenCV developer team it's important to constantly improve the library. General ======= -When making the switch you first need to learn some about the new data structure for images: :ref:`matTheBasicImageContainer`, this replaces the old *CvMat* and *IplImage* ones. Switching to the new functions is easier. You just need to remember a couple of new things. +When making the switch you first need to learn some about the new data structure for images: :ref:`matTheBasicImageContainer`, this replaces the old *CvMat* and *IplImage* ones. Switching to the new functions is easier. You just need to remember a couple of new things. -OpenCV 2 received reorganization. No longer are all the functions crammed into a single library. We have many modules, each of them containing data structures and functions relevant to certain tasks. This way you do not need to ship a large library if you use just a subset of OpenCV. This means that you should also include only those headers you will use. For example: +OpenCV 2 received reorganization. No longer are all the functions crammed into a single library. We have many modules, each of them containing data structures and functions relevant to certain tasks. This way you do not need to ship a large library if you use just a subset of OpenCV. This means that you should also include only those headers you will use. For example: .. code-block:: cpp @@ -28,13 +28,13 @@ OpenCV 2 received reorganization. No longer are all the functions crammed into a #include -All the OpenCV related stuff is put into the *cv* namespace to avoid name conflicts with other libraries data structures and functions. Therefore, either you need to prepend the *cv::* keyword before everything that comes from OpenCV or after the includes, you just add a directive to use this: +All the OpenCV related stuff is put into the *cv* namespace to avoid name conflicts with other libraries data structures and functions. Therefore, either you need to prepend the *cv::* keyword before everything that comes from OpenCV or after the includes, you just add a directive to use this: .. code-block:: cpp using namespace cv; // The new C++ interface API is inside this namespace. Import it. -Because the functions are already in a namespace there is no need for them to contain the *cv* prefix in their name. As such all the new C++ compatible functions don't have this and they follow the camel case naming rule. This means the first letter is small (unless it's a name, like Canny) and the subsequent words start with a capital letter (like *copyMakeBorder*). +Because the functions are already in a namespace there is no need for them to contain the *cv* prefix in their name. As such all the new C++ compatible functions don't have this and they follow the camel case naming rule. This means the first letter is small (unless it's a name, like Canny) and the subsequent words start with a capital letter (like *copyMakeBorder*). Now, remember that you need to link to your application all the modules you use, and in case you are on Windows using the *DLL* system you will need to add, again, to the path all the binaries. For more in-depth information if you're on Windows read :ref:`Windows_Visual_Studio_How_To` and for Linux an example usage is explained in :ref:`Linux_Eclipse_Usage`. @@ -42,7 +42,7 @@ Now for converting the *Mat* object you can use either the *IplImage* or the *Cv .. code-block:: cpp - Mat I; + Mat I; IplImage pI = I; CvMat mI = I; @@ -50,9 +50,9 @@ Now if you want pointers the conversion gets just a little more complicated. The .. code-block:: cpp - Mat I; - IplImage* pI = &I.operator IplImage(); - CvMat* mI = &I.operator CvMat(); + Mat I; + IplImage* pI = &I.operator IplImage(); + CvMat* mI = &I.operator CvMat(); One of the biggest complaints of the C interface is that it leaves all the memory management to you. You need to figure out when it is safe to release your unused objects and make sure you do so before the program finishes or you could have troublesome memory leeks. To work around this issue in OpenCV there is introduced a sort of smart pointer. This will automatically release the object when it's no longer in use. To use this declare the pointers as a specialization of the *Ptr* : @@ -60,11 +60,11 @@ One of the biggest complaints of the C interface is that it leaves all the memor Ptr piI = &I.operator IplImage(); -Converting from the C data structures to the *Mat* is done by passing these inside its constructor. For example: +Converting from the C data structures to the *Mat* is done by passing these inside its constructor. For example: .. code-block:: cpp - Mat K(piL), L; + Mat K(piL), L; L = Mat(pI); A case study @@ -79,7 +79,7 @@ Now that you have the basics done :download:`here's <../../../../samples/cpp/tut :tab-width: 4 :lines: 1-9, 22-25, 27-44 -Here you can observe that with the new structure we have no pointer problems, although it is possible to use the old functions and in the end just transform the result to a *Mat* object. +Here you can observe that with the new structure we have no pointer problems, although it is possible to use the old functions and in the end just transform the result to a *Mat* object. .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp :language: cpp @@ -87,7 +87,7 @@ Here you can observe that with the new structure we have no pointer problems, al :tab-width: 4 :lines: 46-51 -Because, we want to mess around with the images luma component we first convert from the default RGB to the YUV color space and then split the result up into separate planes. Here the program splits: in the first example it processes each plane using one of the three major image scanning algorithms in OpenCV (C [] operator, iterator, individual element access). In a second variant we add to the image some Gaussian noise and then mix together the channels according to some formula. +Because, we want to mess around with the images luma component we first convert from the default RGB to the YUV color space and then split the result up into separate planes. Here the program splits: in the first example it processes each plane using one of the three major image scanning algorithms in OpenCV (C [] operator, iterator, individual element access). In a second variant we add to the image some Gaussian noise and then mix together the channels according to some formula. The scanning version looks like: @@ -97,7 +97,7 @@ The scanning version looks like: :tab-width: 4 :lines: 55-75 -Here you can observe that we may go through all the pixels of an image in three fashions: an iterator, a C pointer and an individual element access style. You can read a more in-depth description of these in the :ref:`howToScanImagesOpenCV` tutorial. Converting from the old function names is easy. Just remove the cv prefix and use the new *Mat* data structure. Here's an example of this by using the weighted addition function: +Here you can observe that we may go through all the pixels of an image in three fashions: an iterator, a C pointer and an individual element access style. You can read a more in-depth description of these in the :ref:`howToScanImagesOpenCV` tutorial. Converting from the old function names is easy. Just remove the cv prefix and use the new *Mat* data structure. Here's an example of this by using the weighted addition function: .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp :language: cpp @@ -105,7 +105,7 @@ Here you can observe that we may go through all the pixels of an image in three :tab-width: 4 :lines: 79-112 -As you may observe the *planes* variable is of type *Mat*. However, converting from *Mat* to *IplImage* is easy and made automatically with a simple assignment operator. +As you may observe the *planes* variable is of type *Mat*. However, converting from *Mat* to *IplImage* is easy and made automatically with a simple assignment operator. .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp :language: cpp @@ -113,20 +113,17 @@ As you may observe the *planes* variable is of type *Mat*. However, converting f :tab-width: 4 :lines: 115-127 -The new *imshow* highgui function accepts both the *Mat* and *IplImage* data structures. Compile and run the program and if the first image below is your input you may get either the first or second as output: +The new *imshow* highgui function accepts both the *Mat* and *IplImage* data structures. Compile and run the program and if the first image below is your input you may get either the first or second as output: .. image:: images/outputInteropOpenCV1.jpg :alt: The output of the sample :align: center -You may observe a runtime instance of this on the `YouTube here `_ and you can :download:`download the source code from here <../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp` of the OpenCV source code library. +You may observe a runtime instance of this on the `YouTube here `_ and you can :download:`download the source code from here <../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp` of the OpenCV source code library. .. raw:: html
- - - diff --git a/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst b/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst index f5bacbf79..0549a9c12 100644 --- a/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst +++ b/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst @@ -8,11 +8,11 @@ Mask operations on matrices are quite simple. The idea is that we recalculate ea Our test case ============= -Let us consider the issue of an image contrast enhancement method. Basically we want to apply for every pixel of the image the following formula: +Let us consider the issue of an image contrast enhancement method. Basically we want to apply for every pixel of the image the following formula: .. math:: - I(i,j) = 5*I(i,j) - [ I(i-1,j) + I(i+1,j) + I(i,j-1) + I(i,j+1)] + I(i,j) = 5*I(i,j) - [ I(i-1,j) + I(i+1,j) + I(i,j-1) + I(i,j+1)] \iff I(i,j)*M, \text{where } M = \bordermatrix{ _i\backslash ^j & -1 & 0 & +1 \cr @@ -23,12 +23,12 @@ Let us consider the issue of an image contrast enhancement method. Basically we The first notation is by using a formula, while the second is a compacted version of the first by using a mask. You use the mask by putting the center of the mask matrix (in the upper case noted by the zero-zero index) on the pixel you want to calculate and sum up the pixel values multiplied with the overlapped matrix values. It's the same thing, however in case of large matrices the latter notation is a lot easier to look over. -Now let us see how we can make this happen by using the basic pixel access method or by using the :filtering:`filter2D ` function. +Now let us see how we can make this happen by using the basic pixel access method or by using the :filtering:`filter2D ` function. The Basic Method ================ -Here's a function that will do this: +Here's a function that will do this: .. code-block:: cpp @@ -49,7 +49,7 @@ Here's a function that will do this: for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i) { - *output++ = saturate_cast(5*current[i] + *output++ = saturate_cast(5*current[i] -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]); } } @@ -87,7 +87,7 @@ We'll use the plain C [] operator to access pixels. Because we need to access mu for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i) { - *output++ = saturate_cast(5*current[i] + *output++ = saturate_cast(5*current[i] -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]); } } @@ -96,7 +96,7 @@ On the borders of the image the upper notation results inexistent pixel location .. code-block:: cpp - Result.row(0).setTo(Scalar(0)); // The top row + Result.row(0).setTo(Scalar(0)); // The top row Result.row(Result.rows-1).setTo(Scalar(0)); // The bottom row Result.col(0).setTo(Scalar(0)); // The left column Result.col(Result.cols-1).setTo(Scalar(0)); // The right column @@ -108,19 +108,19 @@ Applying such filters are so common in image processing that in OpenCV there exi .. code-block:: cpp - Mat kern = (Mat_(3,3) << 0, -1, 0, + Mat kern = (Mat_(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0); -Then call the :filtering:`filter2D ` function specifying the input, the output image and the kernell to use: +Then call the :filtering:`filter2D ` function specifying the input, the output image and the kernell to use: .. code-block:: cpp - filter2D(I, K, I.depth(), kern ); + filter2D(I, K, I.depth(), kern ); The function even has a fifth optional argument to specify the center of the kernel, and a sixth one for determining what to do in the regions where the operation is undefined (borders). Using this function has the advantage that it's shorter, less verbose and because there are some optimization techniques implemented it is usually faster than the *hand-coded method*. For example in my test while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some difference. -For example: +For example: .. image:: images/resultMatMaskFilter2D.png :alt: A sample output of the program @@ -128,7 +128,7 @@ For example: You can download this source code from :download:`here <../../../../samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp>` or look in the OpenCV source code libraries sample directory at :file:`samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp`. -Check out an instance of running the program on our `YouTube channel `_ . +Check out an instance of running the program on our `YouTube channel `_ . .. raw:: html diff --git a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst index 7b3f8603f..03d82bbd4 100644 --- a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst +++ b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst @@ -19,15 +19,15 @@ For example in the above image you can see that the mirror of the car is nothing OpenCV has been around since 2001. In those days the library was built around a *C* interface and to store the image in the memory they used a C structure called *IplImage*. This is the one you'll see in most of the older tutorials and educational materials. The problem with this is that it brings to the table all the minuses of the C language. The biggest issue is the manual memory management. It builds on the assumption that the user is responsible for taking care of memory allocation and deallocation. While this is not a problem with smaller programs, once your code base grows it will be more of a struggle to handle all this rather than focusing on solving your development goal. -Luckily C++ came around and introduced the concept of classes making easier for the user through automatic memory management (more or less). The good news is that C++ is fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV 2.0 introduced a new C++ interface which offered a new way of doing things which means you do not need to fiddle with memory management, making your code concise (less to write, to achieve more). The main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting embedded platforms, there's no point to using the *old* methods (unless you're a masochist programmer and you're asking for trouble). +Luckily C++ came around and introduced the concept of classes making easier for the user through automatic memory management (more or less). The good news is that C++ is fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV 2.0 introduced a new C++ interface which offered a new way of doing things which means you do not need to fiddle with memory management, making your code concise (less to write, to achieve more). The main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting embedded platforms, there's no point to using the *old* methods (unless you're a masochist programmer and you're asking for trouble). The first thing you need to know about *Mat* is that you no longer need to manually allocate its memory and release it as soon as you do not need it. While doing this is still a possibility, most of the OpenCV functions will allocate its output data manually. As a nice bonus if you pass on an already existing *Mat* object, which has already allocated the required space for the matrix, this will be reused. In other words we use at all times only as much memory as we need to perform the task. -*Mat* is basically a class with two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored, and so on) and a pointer to the matrix containing the pixel values (taking any dimensionality depending on the method chosen for storing) . The matrix header size is constant, however the size of the matrix itself may vary from image to image and usually is larger by orders of magnitude. +*Mat* is basically a class with two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored, and so on) and a pointer to the matrix containing the pixel values (taking any dimensionality depending on the method chosen for storing) . The matrix header size is constant, however the size of the matrix itself may vary from image to image and usually is larger by orders of magnitude. OpenCV is an image processing library. It contains a large collection of image processing functions. To solve a computational challenge, most of the time you will end up using multiple functions of the library. Because of this, passing images to functions is a common practice. We should not forget that we are talking about image processing algorithms, which tend to be quite computational heavy. The last thing we want to do is further decrease the speed of your program by making unnecessary copies of potentially *large* images. -To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointers point to the same address. Moreover, the copy operators **will only copy the headers** and the pointer to the large matrix, not the data itself. +To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointers point to the same address. Moreover, the copy operators **will only copy the headers** and the pointer to the large matrix, not the data itself. .. code-block:: cpp :linenos: @@ -39,21 +39,21 @@ To tackle this issue OpenCV uses a reference counting system. The idea is that e C = A; // Assignment operator -All the above objects, in the end, point to the same single data matrix. Their headers are different, however, and making a modification using any of them will affect all the other ones as well. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part is that you can create headers which refer to only a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries: +All the above objects, in the end, point to the same single data matrix. Their headers are different, however, and making a modification using any of them will affect all the other ones as well. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part is that you can create headers which refer to only a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries: .. code-block:: cpp :linenos: Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle - Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries + Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries Now you may ask if the matrix itself may belong to multiple *Mat* objects who takes responsibility for cleaning it up when it's no longer needed. The short answer is: the last object that used it. This is handled by using a reference counting mechanism. Whenever somebody copies a header of a *Mat* object, a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Sometimes you will want to copy the matrix itself too, so OpenCV provides the :basicstructures:`clone() ` and :basicstructures:`copyTo() ` functions. .. code-block:: cpp :linenos: - Mat F = A.clone(); - Mat G; + Mat F = A.clone(); + Mat G; A.copyTo(G); Now modifying *F* or *G* will not affect the matrix pointed by the *Mat* header. What you need to remember from all this is that: @@ -66,19 +66,19 @@ Now modifying *F* or *G* will not affect the matrix pointed by the *Mat* header. * The underlying matrix of an image may be copied using the :basicstructures:`clone()` and :basicstructures:`copyTo() ` functions. *Storing* methods -================= +================= -This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale where the colors at our disposal are black and white. The combination of these allows us to create many shades of gray. +This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale where the colors at our disposal are black and white. The combination of these allows us to create many shades of gray. -For *colorful* ways we have a lot more methods to choose from. Each of them breaks it down to three or four basic components and we can use the combination of these to create the others. The most popular one is RGB, mainly because this is also how our eye builds up colors. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added. +For *colorful* ways we have a lot more methods to choose from. Each of them breaks it down to three or four basic components and we can use the combination of these to create the others. The most popular one is RGB, mainly because this is also how our eye builds up colors. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added. There are, however, many other color systems each with their own advantages: .. container:: enumeratevisibleitemswithsquare * RGB is the most common as our eyes use something similar, our display systems also compose colors using these. - * The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. You might, for example, dismiss the last component, making your algorithm less sensible to the light conditions of the input image. - * YCrCb is used by the popular JPEG image format. + * The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. You might, for example, dismiss the last component, making your algorithm less sensible to the light conditions of the input image. + * YCrCb is used by the popular JPEG image format. * CIE L*a*b* is a perceptually uniform color space, which comes handy if you need to measure the *distance* of a given color to another color. Each of the building components has their own valid domains. This leads to the data type used. How we store a component defines the control we have over its domain. The smallest data type possible is *char*, which means one byte or 8 bits. This may be unsigned (so can store values from 0 to 255) or signed (values from -127 to +127). Although in case of three components this already gives 16 million possible colors to represent (like in case of RGB) we may acquire an even finer control by using the float (4 byte = 32 bit) or double (8 byte = 64 bit) data types for each component. Nevertheless, remember that increasing the size of a component also increases the size of the whole picture in the memory. @@ -86,13 +86,13 @@ Each of the building components has their own valid domains. This leads to the d Creating a *Mat* object explicitly ================================== -In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices. +In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices. Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways: .. container:: enumeratevisibleitemswithsquare - + :basicstructures:`Mat() ` Constructor + + :basicstructures:`Mat() ` Constructor .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp @@ -105,7 +105,7 @@ Although *Mat* works really well as an image container, it is also a general mat For two dimensional and multichannel images we first define their size: row and column count wise. - Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions constructed according to the following convention: + Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions constructed according to the following convention: .. code-block:: cpp @@ -176,7 +176,7 @@ Although *Mat* works really well as an image container, it is also a general mat :alt: Demo image of the matrix output :align: center -.. note:: +.. note:: You can fill out a matrix with random values using the :operationsOnArrays:`randu() ` function. You need to give the lower and upper value for the random values: @@ -189,11 +189,11 @@ Although *Mat* works really well as an image container, it is also a general mat Output formatting ================= -In the above examples you could see the default formatting option. OpenCV, however, allows you to format your matrix output: +In the above examples you could see the default formatting option. OpenCV, however, allows you to format your matrix output: .. container:: enumeratevisibleitemswithsquare - + Default + + Default .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp @@ -215,7 +215,7 @@ In the above examples you could see the default formatting option. OpenCV, howev :alt: Default Output :align: center - + Comma separated values (CSV) + + Comma separated values (CSV) .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp @@ -255,7 +255,7 @@ OpenCV offers support for output of other common OpenCV data structures too via .. container:: enumeratevisibleitemswithsquare - + 2D Point + + 2D Point .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp diff --git a/doc/tutorials/core/table_of_content_core/table_of_content_core.rst b/doc/tutorials/core/table_of_content_core/table_of_content_core.rst index ddd8ef201..4150265d1 100644 --- a/doc/tutorials/core/table_of_content_core/table_of_content_core.rst +++ b/doc/tutorials/core/table_of_content_core/table_of_content_core.rst @@ -44,7 +44,7 @@ Here you will learn the about the basic building blocks of the library. A must r .. |HowScanImag| image:: images/howToScanImages.jpg :height: 90pt :width: 90pt - + + .. tabularcolumns:: m{100pt} m{300pt} @@ -193,7 +193,7 @@ Here you will learn the about the basic building blocks of the library. A must r *Author:* |Author_BernatG| Did you used OpenCV before its 2.0 version? Do you wanna know what happened with your library with 2.0? Don't you know how to convert your old OpenCV programs to the new C++ interface? Look here to shed light on all this questions. - + =============== ====================================================== .. |InterOOpenCV1| image:: images/interopOpenCV1.png @@ -208,7 +208,7 @@ Here you will learn the about the basic building blocks of the library. A must r .. toctree:: :hidden: - + ../mat_the_basic_image_container/mat_the_basic_image_container ../how_to_scan_images/how_to_scan_images ../mat-mask-operations/mat-mask-operations @@ -218,4 +218,4 @@ Here you will learn the about the basic building blocks of the library. A must r ../random_generator_and_text/random_generator_and_text ../discrete_fourier_transform/discrete_fourier_transform ../file_input_output_with_xml_yml/file_input_output_with_xml_yml - ../interoperability_with_OpenCV_1/interoperability_with_OpenCV_1 \ No newline at end of file + ../interoperability_with_OpenCV_1/interoperability_with_OpenCV_1 diff --git a/doc/tutorials/definitions/README.txt b/doc/tutorials/definitions/README.txt index 829e27cac..a598a95dd 100644 --- a/doc/tutorials/definitions/README.txt +++ b/doc/tutorials/definitions/README.txt @@ -1 +1 @@ -Include in this directory only defintion files. None of the reST files entered here will be parsed by the Sphinx Builder. \ No newline at end of file +Include in this directory only defintion files. None of the reST files entered here will be parsed by the Sphinx Builder. diff --git a/doc/tutorials/definitions/noContent.rst b/doc/tutorials/definitions/noContent.rst index a273a9ad1..c2780c266 100644 --- a/doc/tutorials/definitions/noContent.rst +++ b/doc/tutorials/definitions/noContent.rst @@ -1,3 +1,3 @@ .. note:: - Unfortunetly we have no tutorials into this section. And you can help us with that, since OpenCV is a community effort. If you have a tutorial suggestion or you have written a tutorial yourself (or coded a sample code) that you would like to see here, please contact follow these instructions: :ref:`howToWriteTutorial` and :how_to_contribute:`How to contribute <>`. \ No newline at end of file + Unfortunetly we have no tutorials into this section. And you can help us with that, since OpenCV is a community effort. If you have a tutorial suggestion or you have written a tutorial yourself (or coded a sample code) that you would like to see here, please contact follow these instructions: :ref:`howToWriteTutorial` and :how_to_contribute:`How to contribute <>`. diff --git a/doc/tutorials/definitions/tocDefinitions.rst b/doc/tutorials/definitions/tocDefinitions.rst index 946dbb0a4..4695623cc 100644 --- a/doc/tutorials/definitions/tocDefinitions.rst +++ b/doc/tutorials/definitions/tocDefinitions.rst @@ -3,7 +3,7 @@ .. |Author_AndreyK| unicode:: Andrey U+0020 Kamaev .. |Author_LeonidBLB| unicode:: Leonid U+0020 Beynenson .. |Author_VsevolodG| unicode:: Vsevolod U+0020 Glumov -.. |Author_VictorE| unicode:: Victor U+0020 Eruhimov +.. |Author_VictorE| unicode:: Victor U+0020 Eruhimov .. |Author_ArtemM| unicode:: Artem U+0020 Myagkov .. |Author_FernandoI| unicode:: Fernando U+0020 Iglesias U+0020 Garc U+00ED a .. |Author_EduardF| unicode:: Eduard U+0020 Feicho diff --git a/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst b/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst index 48d93ce0f..009d537d5 100644 --- a/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst +++ b/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst @@ -5,9 +5,9 @@ Detection of planar objects .. highlight:: cpp -The goal of this tutorial is to learn how to use *features2d* and *calib3d* modules for detecting known planar objects in scenes. +The goal of this tutorial is to learn how to use *features2d* and *calib3d* modules for detecting known planar objects in scenes. -*Test data*: use images in your data folder, for instance, ``box.png`` and ``box_in_scene.png``. +*Test data*: use images in your data folder, for instance, ``box.png`` and ``box_in_scene.png``. #. Create a new console project. Read two input images. :: @@ -22,7 +22,7 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu FastFeatureDetector detector(15); vector keypoints1; detector.detect(img1, keypoints1); - + ... // do the same for the second image #. @@ -32,7 +32,7 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu SurfDescriptorExtractor extractor; Mat descriptors1; extractor.compute(img1, keypoints1, descriptors1); - + ... // process keypoints from the second image as well #. @@ -69,4 +69,4 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu perspectiveTransform(Mat(points1), points1Projected, H); #. - Use ``drawMatches`` for drawing inliers. + Use ``drawMatches`` for drawing inliers. diff --git a/doc/tutorials/features2d/feature_description/feature_description.rst b/doc/tutorials/features2d/feature_description/feature_description.rst index 0329cc464..94b7c684e 100644 --- a/doc/tutorials/features2d/feature_description/feature_description.rst +++ b/doc/tutorials/features2d/feature_description/feature_description.rst @@ -100,6 +100,3 @@ Result .. image:: images/Feature_Description_BruteForce_Result.jpg :align: center :height: 200pt - - - diff --git a/doc/tutorials/features2d/feature_detection/feature_detection.rst b/doc/tutorials/features2d/feature_detection/feature_detection.rst index 26798f8f6..0e690404a 100644 --- a/doc/tutorials/features2d/feature_detection/feature_detection.rst +++ b/doc/tutorials/features2d/feature_detection/feature_detection.rst @@ -31,6 +31,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" + #include "opencv2/nonfree/nonfree.hpp" using namespace cv; @@ -94,4 +95,3 @@ Result .. image:: images/Feature_Detection_Result_b.jpg :align: center :height: 200pt - diff --git a/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst b/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst index 54d28890a..3bf757fc0 100644 --- a/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst +++ b/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst @@ -28,6 +28,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" + #include "opencv2/nonfree/nonfree.hpp" using namespace cv; diff --git a/doc/tutorials/features2d/feature_homography/feature_homography.rst b/doc/tutorials/features2d/feature_homography/feature_homography.rst index ad764ce9b..e195e53f4 100644 --- a/doc/tutorials/features2d/feature_homography/feature_homography.rst +++ b/doc/tutorials/features2d/feature_homography/feature_homography.rst @@ -30,6 +30,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/calib3d/calib3d.hpp" + #include "opencv2/nonfree/nonfree.hpp" using namespace cv; @@ -145,4 +146,3 @@ Result .. image:: images/Feature_Homography_Result.jpg :align: center :height: 200pt - diff --git a/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst b/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst index be0f515b5..f4107804b 100644 --- a/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst +++ b/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst @@ -5,166 +5,166 @@ Learn about how to use the feature points detectors, descriptors and matching framework found inside OpenCV. -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |Harris| **Title:** :ref:`harris_detector` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Why is it a good idea to track corners? We learn to use the Harris method to detect corners - + ===================== ============================================== - + .. |Harris| image:: images/trackingmotion/Harris_Detector_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |ShiTomasi| **Title:** :ref:`good_features_to_track` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we use an improved method to detect corners more accuratelyI - + ===================== ============================================== - + .. |ShiTomasi| image:: images/trackingmotion/Shi_Tomasi_Detector_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |GenericCorner| **Title:** :ref:`generic_corner_detector` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Here you will learn how to use OpenCV functions to make your personalized corner detector! - + ===================== ============================================== - + .. |GenericCorner| image:: images/trackingmotion/Generic_Corner_Detector_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |Subpixel| **Title:** :ref:`corner_subpixeles` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Is pixel resolution enough? Here we learn a simple method to improve our accuracy. - + ===================== ============================================== - + .. |Subpixel| image:: images/trackingmotion/Corner_Subpixeles_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureDetect| **Title:** :ref:`feature_detection` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use *features2d* to detect interest points. - + ===================== ============================================== - + .. |FeatureDetect| image:: images/Feature_Detection_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureDescript| **Title:** :ref:`feature_description` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use *features2d* to calculate feature vectors. - + ===================== ============================================== - + .. |FeatureDescript| image:: images/Feature_Description_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureFlann| **Title:** :ref:`feature_flann_matcher` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use the FLANN library to make a fast matching. - + ===================== ============================================== - + .. |FeatureFlann| image:: images/Feature_Flann_Matcher_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureHomo| **Title:** :ref:`feature_homography` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use *features2d* and *calib3d* to detect an object in a scene. - + ===================== ============================================== - + .. |FeatureHomo| image:: images/Feature_Homography_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -175,7 +175,7 @@ Learn about how to use the feature points detectors, descriptors and matching f *Author:* |Author_VictorE| - You will use *features2d* and *calib3d* modules for detecting known planar objects in scenes. + You will use *features2d* and *calib3d* modules for detecting known planar objects in scenes. ===================== ============================================== @@ -201,4 +201,3 @@ Learn about how to use the feature points detectors, descriptors and matching f ../feature_flann_matcher/feature_flann_matcher ../feature_homography/feature_homography ../detection_of_planar_objects/detection_of_planar_objects - diff --git a/doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.rst b/doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.rst index 1b405e46c..a0f184a23 100644 --- a/doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.rst +++ b/doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.rst @@ -87,14 +87,14 @@ This tutorial code's is shown lines below. You can also download it from `here < /// Apply corner detection goodFeaturesToTrack( src_gray, - corners, - maxCorners, - qualityLevel, - minDistance, - Mat(), - blockSize, - useHarrisDetector, - k ); + corners, + maxCorners, + qualityLevel, + minDistance, + Mat(), + blockSize, + useHarrisDetector, + k ); /// Draw corners detected @@ -135,4 +135,3 @@ Here is the result: .. image:: images/Corner_Subpixeles_Result.jpg :align: center - diff --git a/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst b/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst index 465ff216c..d33bf3df6 100644 --- a/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst +++ b/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst @@ -37,4 +37,3 @@ Result .. image:: images/My_Shi_Tomasi_corner_detector_Result.jpg :align: center - diff --git a/doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.rst b/doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.rst index e69937eaa..63b0d9749 100644 --- a/doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.rst +++ b/doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.rst @@ -118,5 +118,3 @@ Result .. image:: images/Feature_Detection_Result_a.jpg :align: center - - diff --git a/doc/tutorials/features2d/trackingmotion/harris_detector/harris_detector.rst b/doc/tutorials/features2d/trackingmotion/harris_detector/harris_detector.rst index cb9650775..8eb72ebfd 100644 --- a/doc/tutorials/features2d/trackingmotion/harris_detector/harris_detector.rst +++ b/doc/tutorials/features2d/trackingmotion/harris_detector/harris_detector.rst @@ -98,16 +98,16 @@ How does it work? u & v \end{bmatrix} \left ( - \displaystyle \sum_{x,y} + \displaystyle \sum_{x,y} w(x,y) \begin{bmatrix} I_x^{2} & I_{x}I_{y} \\ I_xI_{y} & I_{y}^{2} - \end{bmatrix} - \right ) - \begin{bmatrix} + \end{bmatrix} + \right ) + \begin{bmatrix} u \\ - v + v \end{bmatrix} * Let's denote: @@ -115,11 +115,11 @@ How does it work? .. math:: M = \displaystyle \sum_{x,y} - w(x,y) - \begin{bmatrix} - I_x^{2} & I_{x}I_{y} \\ - I_xI_{y} & I_{y}^{2} - \end{bmatrix} + w(x,y) + \begin{bmatrix} + I_x^{2} & I_{x}I_{y} \\ + I_xI_{y} & I_{y}^{2} + \end{bmatrix} * So, our equation now is: @@ -128,10 +128,10 @@ How does it work? E(u,v) \approx \begin{bmatrix} u & v \end{bmatrix} - M - \begin{bmatrix} + M + \begin{bmatrix} u \\ - v + v \end{bmatrix} @@ -243,5 +243,3 @@ The detected corners are surrounded by a small black circle .. image:: images/Harris_Detector_Result.jpg :align: center - - diff --git a/doc/tutorials/general/table_of_content_general/table_of_content_general.rst b/doc/tutorials/general/table_of_content_general/table_of_content_general.rst index 2a6d57ffb..6e127a98f 100644 --- a/doc/tutorials/general/table_of_content_general/table_of_content_general.rst +++ b/doc/tutorials/general/table_of_content_general/table_of_content_general.rst @@ -10,4 +10,3 @@ These tutorials are the bottom of the iceberg as they link together multiple of .. raw:: latex \pagebreak - diff --git a/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst b/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst index a3fef4328..91b25833b 100644 --- a/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst +++ b/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst @@ -7,7 +7,7 @@ Squeeze out every little computation power from your system by using the power o .. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -18,7 +18,7 @@ Squeeze out every little computation power from your system by using the power o *Author:* |Author_BernatG| - This will give a good grasp on how to approach coding on the GPU module, once you already know how to handle the other modules. As a test case it will port the similarity methods from the tutorial :ref:`videoInputPSNRMSSIM` to the GPU. + This will give a good grasp on how to approach coding on the GPU module, once you already know how to handle the other modules. As a test case it will port the similarity methods from the tutorial :ref:`videoInputPSNRMSSIM` to the GPU. =============== ====================================================== diff --git a/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst b/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst index 36fbc92da..ef6eacce2 100644 --- a/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst +++ b/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst @@ -3,30 +3,30 @@ *highgui* module. High Level GUI and Media ------------------------------------------ -This section contains valuable tutorials about how to read/save your image/video files and how to use the built-in graphical user interface of the library. +This section contains valuable tutorials about how to read/save your image/video files and how to use the built-in graphical user interface of the library. .. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + =============== ====================================================== |Beginners_5| *Title:* :ref:`Adding_Trackbars` - + *Compatibility:* > OpenCV 2.0 *Author:* |Author_AnaH| - + We will learn how to add a Trackbar to our applications - + =============== ====================================================== - + .. |Beginners_5| image:: images/Adding_Trackbars_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -34,7 +34,7 @@ This section contains valuable tutorials about how to read/save your image/video |hVideoInput| *Title:* :ref:`videoInputPSNRMSSIM` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_BernatG| You will learn how to read video streams, and how to calculate similarity values such as PSNR or SSIM. @@ -45,7 +45,7 @@ This section contains valuable tutorials about how to read/save your image/video :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -74,4 +74,4 @@ This section contains valuable tutorials about how to read/save your image/video ../trackbar/trackbar ../video-input-psnr-ssim/video-input-psnr-ssim - ../video-write/video-write \ No newline at end of file + ../video-write/video-write diff --git a/doc/tutorials/highgui/trackbar/trackbar.rst b/doc/tutorials/highgui/trackbar/trackbar.rst index 21f8b3687..e5fbb85f5 100644 --- a/doc/tutorials/highgui/trackbar/trackbar.rst +++ b/doc/tutorials/highgui/trackbar/trackbar.rst @@ -5,11 +5,11 @@ Adding a Trackbar to our applications! * In the previous tutorials (about *linear blending* and the *brightness and contrast adjustments*) you might have noted that we needed to give some **input** to our programs, such as :math:`\alpha` and :math:`beta`. We accomplished that by entering this data using the Terminal -* Well, it is time to use some fancy GUI tools. OpenCV provides some GUI utilities (*highgui.h*) for you. An example of this is a **Trackbar** +* Well, it is time to use some fancy GUI tools. OpenCV provides some GUI utilities (*highgui.h*) for you. An example of this is a **Trackbar** .. image:: images/Adding_Trackbars_Tutorial_Trackbar.png :alt: Trackbar example - :align: center + :align: center * In this tutorial we will just modify our two previous programs so that they get the input information from the trackbar. @@ -19,7 +19,7 @@ Goals In this tutorial you will learn how to: -* Add a Trackbar in an OpenCV window by using :create_trackbar:`createTrackbar <>` +* Add a Trackbar in an OpenCV window by using :create_trackbar:`createTrackbar <>` Code ===== @@ -33,13 +33,13 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let using namespace cv; - /// Global Variables + /// Global Variables const int alpha_slider_max = 100; - int alpha_slider; + int alpha_slider; double alpha; - double beta; + double beta; - /// Matrices to store images + /// Matrices to store images Mat src1; Mat src2; Mat dst; @@ -49,12 +49,12 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let * @brief Callback for trackbar */ void on_trackbar( int, void* ) - { + { alpha = (double) alpha_slider/alpha_slider_max ; beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + imshow( "Linear Blend", dst ); } @@ -67,7 +67,7 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let if( !src1.data ) { printf("Error loading src1 \n"); return -1; } if( !src2.data ) { printf("Error loading src2 \n"); return -1; } - /// Initialize values + /// Initialize values alpha_slider = 0; /// Create Windows @@ -75,13 +75,13 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let /// Create Trackbars char TrackbarName[50]; - sprintf( TrackbarName, "Alpha x %d", alpha_slider_max ); + sprintf( TrackbarName, "Alpha x %d", alpha_slider_max ); createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar ); /// Show some stuff on_trackbar( alpha_slider, 0 ); - + /// Wait until user press some key waitKey(0); return 0; @@ -113,7 +113,7 @@ We only analyze the code that is related to Trackbar: createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar ); Note the following: - + * Our Trackbar has a label **TrackbarName** * The Trackbar is located in the window named **"Linear Blend"** * The Trackbar values will be in the range from :math:`0` to **alpha_slider_max** (the minimum limit is always **zero**). @@ -125,21 +125,21 @@ We only analyze the code that is related to Trackbar: .. code-block:: cpp void on_trackbar( int, void* ) - { + { alpha = (double) alpha_slider/alpha_slider_max ; beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + imshow( "Linear Blend", dst ); } Note that: - - * We use the value of **alpha_slider** (integer) to get a double value for **alpha**. + + * We use the value of **alpha_slider** (integer) to get a double value for **alpha**. * **alpha_slider** is updated each time the trackbar is displaced by the user. * We define *src1*, *src2*, *dist*, *alpha*, *alpha_slider* and *beta* as global variables, so they can be used everywhere. - + Result ======= @@ -147,15 +147,10 @@ Result .. image:: images/Adding_Trackbars_Tutorial_Result_0.jpg :alt: Adding Trackbars - Windows Linux - :align: center + :align: center * As a manner of practice, you can also add 02 trackbars for the program made in :ref:`Basic_Linear_Transform`. One trackbar to set :math:`\alpha` and another for :math:`\beta`. The output might look like: .. image:: images/Adding_Trackbars_Tutorial_Result_1.jpg :alt: Adding Trackbars - Lena - :align: center - - - - - + :align: center diff --git a/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst b/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst index b9c1201b8..6f5476cf0 100644 --- a/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst +++ b/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst @@ -64,7 +64,7 @@ Closing the video is automatic when the objects destructor is called. However, i captRefrnc >> frameReference; captUndTst.open(frameUnderTest); -The upper read operations will leave empty the *Mat* objects if no frame could be acquired (either cause the video stream was closed or you got to the end of the video file). We can check this with a simple if: +The upper read operations will leave empty the *Mat* objects if no frame could be acquired (either cause the video stream was closed or you got to the end of the video file). We can check this with a simple if: .. code-block:: cpp @@ -111,7 +111,7 @@ Then the PSNR is expressed as: PSNR = 10 \cdot \log_{10} \left( \frac{MAX_I^2}{MSE} \right) -Here the :math:`MAX_I^2` is the maximum valid value for a pixel. In case of the simple single byte image per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we'll need to handle this case separately. The transition to a logarithmic scale is made because the pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks like: +Here the :math:`MAX_I^2` is the maximum valid value for a pixel. In case of the simple single byte image per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we'll need to handle this case separately. The transition to a logarithmic scale is made because the pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks like: .. code-block:: cpp @@ -136,13 +136,13 @@ Here the :math:`MAX_I^2` is the maximum valid value for a pixel. In case of the } } -Typically result values are anywhere between 30 and 50 for video compression, where higher is better. If the images significantly differ you'll get much lower ones like 15 and so. This similarity check is easy and fast to calculate, however in practice it may turn out somewhat inconsistent with human eye perception. The **structural similarity** algorithm aims to correct this. +Typically result values are anywhere between 30 and 50 for video compression, where higher is better. If the images significantly differ you'll get much lower ones like 15 and so. This similarity check is easy and fast to calculate, however in practice it may turn out somewhat inconsistent with human eye perception. The **structural similarity** algorithm aims to correct this. -Describing the methods goes well beyond the purpose of this tutorial. For that I invite you to read the article introducing it. Nevertheless, you can get a good image of it by looking at the OpenCV implementation below. +Describing the methods goes well beyond the purpose of this tutorial. For that I invite you to read the article introducing it. Nevertheless, you can get a good image of it by looking at the OpenCV implementation below. .. seealso:: - SSIM is described more in-depth in the: "Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: From error visibility to structural similarity," IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004." article. + SSIM is described more in-depth in the: "Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: From error visibility to structural similarity," IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004." article. .. code-block:: cpp @@ -162,7 +162,7 @@ Describing the methods goes well beyond the purpose of this tutorial. For that I /***********************PRELIMINARY COMPUTING ******************************/ - Mat mu1, mu2; // + Mat mu1, mu2; // GaussianBlur(I1, mu1, Size(11, 11), 1.5); GaussianBlur(I2, mu2, Size(11, 11), 1.5); @@ -199,7 +199,7 @@ Describing the methods goes well beyond the purpose of this tutorial. For that I return mssim; } -This will return a similarity index for each channel of the image. This value is between zero and one, where one corresponds to perfect fit. Unfortunately, the many Gaussian blurring is quite costly, so while the PSNR may work in a real time like environment (24 frame per second) this will take significantly more than to accomplish similar performance results. +This will return a similarity index for each channel of the image. This value is between zero and one, where one corresponds to perfect fit. Unfortunately, the many Gaussian blurring is quite costly, so while the PSNR may work in a real time like environment (24 frame per second) this will take significantly more than to accomplish similar performance results. Therefore, the source code presented at the start of the tutorial will perform the PSNR measurement for each frame, and the SSIM only for the frames where the PSNR falls below an input value. For visualization purpose we show both images in an OpenCV window and print the PSNR and MSSIM values to the console. Expect to see something like: @@ -207,7 +207,7 @@ Therefore, the source code presented at the start of the tutorial will perform t :alt: A sample output :align: center -You may observe a runtime instance of this on the `YouTube here `_. +You may observe a runtime instance of this on the `YouTube here `_. .. raw:: html diff --git a/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.rst b/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.rst index 9bd460d15..300327aba 100644 --- a/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.rst +++ b/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.rst @@ -112,21 +112,21 @@ This tutorial code's is shown lines below. You can also download it from `here < /// Create Erosion Trackbar createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo", - &erosion_elem, max_elem, - Erosion ); + &erosion_elem, max_elem, + Erosion ); createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo", - &erosion_size, max_kernel_size, - Erosion ); + &erosion_size, max_kernel_size, + Erosion ); /// Create Dilation Trackbar createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo", - &dilation_elem, max_elem, - Dilation ); + &dilation_elem, max_elem, + Dilation ); createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo", - &dilation_size, max_kernel_size, - Dilation ); + &dilation_size, max_kernel_size, + Dilation ); /// Default start Erosion( 0, 0 ); @@ -145,8 +145,8 @@ This tutorial code's is shown lines below. You can also download it from `here < else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; } Mat element = getStructuringElement( erosion_type, - Size( 2*erosion_size + 1, 2*erosion_size+1 ), - Point( erosion_size, erosion_size ) ); + Size( 2*erosion_size + 1, 2*erosion_size+1 ), + Point( erosion_size, erosion_size ) ); /// Apply the erosion operation erode( src, erosion_dst, element ); @@ -162,8 +162,8 @@ This tutorial code's is shown lines below. You can also download it from `here < else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; } Mat element = getStructuringElement( dilation_type, - Size( 2*dilation_size + 1, 2*dilation_size+1 ), - Point( dilation_size, dilation_size ) ); + Size( 2*dilation_size + 1, 2*dilation_size+1 ), + Point( dilation_size, dilation_size ) ); /// Apply the dilation operation dilate( src, dilation_dst, element ); imshow( "Dilation Demo", dilation_dst ); @@ -201,8 +201,8 @@ Explanation else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; } Mat element = getStructuringElement( erosion_type, - Size( 2*erosion_size + 1, 2*erosion_size+1 ), - Point( erosion_size, erosion_size ) ); + Size( 2*erosion_size + 1, 2*erosion_size+1 ), + Point( erosion_size, erosion_size ) ); /// Apply the erosion operation erode( src, erosion_dst, element ); imshow( "Erosion Demo", erosion_dst ); @@ -216,17 +216,17 @@ Explanation .. code-block:: cpp - Mat element = getStructuringElement( erosion_type, - Size( 2*erosion_size + 1, 2*erosion_size+1 ), - Point( erosion_size, erosion_size ) ); + Mat element = getStructuringElement( erosion_type, + Size( 2*erosion_size + 1, 2*erosion_size+1 ), + Point( erosion_size, erosion_size ) ); We can choose any of three shapes for our kernel: .. container:: enumeratevisibleitemswithsquare - + Rectangular box: MORPH_RECT - + Cross: MORPH_CROSS - + Ellipse: MORPH_ELLIPSE + + Rectangular box: MORPH_RECT + + Cross: MORPH_CROSS + + Ellipse: MORPH_ELLIPSE Then, we just have to specify the size of our kernel and the *anchor point*. If not specified, it is assumed to be in the center. @@ -251,8 +251,8 @@ The code is below. As you can see, it is completely similar to the snippet of co else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; } Mat element = getStructuringElement( dilation_type, - Size( 2*dilation_size + 1, 2*dilation_size+1 ), - Point( dilation_size, dilation_size ) ); + Size( 2*dilation_size + 1, 2*dilation_size+1 ), + Point( dilation_size, dilation_size ) ); /// Apply the dilation operation dilate( src, dilation_dst, element ); imshow( "Dilation Demo", dilation_dst ); diff --git a/doc/tutorials/imgproc/histograms/back_projection/back_projection.rst b/doc/tutorials/imgproc/histograms/back_projection/back_projection.rst index f8b134322..923137263 100644 --- a/doc/tutorials/imgproc/histograms/back_projection/back_projection.rst +++ b/doc/tutorials/imgproc/histograms/back_projection/back_projection.rst @@ -94,7 +94,7 @@ Code * Loads an image * Convert the original to HSV format and separate only *Hue* channel to be used for the Histogram (using the OpenCV function :mix_channels:`mixChannels <>`) * Let the user to enter the number of bins to be used in the calculation of the histogram. - * Calculate the histogram (and update it if the bins change) and the backprojection of the same image. + * Calculate the histogram (and update it if the bins change) and the backprojection of the same image. * Display the backprojection and the histogram in windows. * **Downloadable code**: diff --git a/doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.rst b/doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.rst index de1567abb..f2859c1be 100644 --- a/doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.rst +++ b/doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.rst @@ -329,4 +329,3 @@ Result .. image:: images/Histogram_Calculation_Result.jpg :align: center - diff --git a/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst b/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst index e0c643d05..e7f0e818e 100644 --- a/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst +++ b/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst @@ -369,4 +369,3 @@ Results .. image:: images/Template_Matching_Image_Result.jpg :align: center - diff --git a/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.rst b/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.rst index 52b10468b..06257fa1b 100644 --- a/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.rst +++ b/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.rst @@ -282,6 +282,3 @@ Result :align: center * Notice how the image is superposed to the black background on the edge regions. - - - diff --git a/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.rst b/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.rst index dfb57c03c..8c759c00c 100644 --- a/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.rst +++ b/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.rst @@ -290,4 +290,3 @@ We get the following result by using the Probabilistic Hough Line Transform: :align: center You may observe that the number of lines detected vary while you change the *threshold*. The explanation is sort of evident: If you establish a higher threshold, fewer lines will be detected (since you will need more points to declare a line detected). - diff --git a/doc/tutorials/imgproc/imgtrans/remap/remap.rst b/doc/tutorials/imgproc/imgtrans/remap/remap.rst index 27eef0d11..c4a6671fa 100644 --- a/doc/tutorials/imgproc/imgtrans/remap/remap.rst +++ b/doc/tutorials/imgproc/imgtrans/remap/remap.rst @@ -124,34 +124,34 @@ Code for( int j = 0; j < src.rows; j++ ) { for( int i = 0; i < src.cols; i++ ) - { + { switch( ind ) - { - case 0: - if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 ) + { + case 0: + if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 ) { - map_x.at(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ; - map_y.at(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ; - } - else - { map_x.at(j,i) = 0 ; - map_y.at(j,i) = 0 ; + map_x.at(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ; + map_y.at(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ; + } + else + { map_x.at(j,i) = 0 ; + map_y.at(j,i) = 0 ; } break; - case 1: - map_x.at(j,i) = i ; - map_y.at(j,i) = src.rows - j ; - break; + case 1: + map_x.at(j,i) = i ; + map_y.at(j,i) = src.rows - j ; + break; case 2: - map_x.at(j,i) = src.cols - i ; - map_y.at(j,i) = j ; - break; + map_x.at(j,i) = src.cols - i ; + map_y.at(j,i) = j ; + break; case 3: - map_x.at(j,i) = src.cols - i ; - map_y.at(j,i) = src.rows - j ; - break; + map_x.at(j,i) = src.cols - i ; + map_y.at(j,i) = src.rows - j ; + break; } // end of switch - } + } } ind++; } @@ -241,34 +241,34 @@ Explanation for( int j = 0; j < src.rows; j++ ) { for( int i = 0; i < src.cols; i++ ) - { + { switch( ind ) - { - case 0: - if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 ) + { + case 0: + if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 ) { - map_x.at(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ; - map_y.at(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ; - } - else - { map_x.at(j,i) = 0 ; - map_y.at(j,i) = 0 ; + map_x.at(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ; + map_y.at(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ; + } + else + { map_x.at(j,i) = 0 ; + map_y.at(j,i) = 0 ; } break; - case 1: - map_x.at(j,i) = i ; - map_y.at(j,i) = src.rows - j ; - break; + case 1: + map_x.at(j,i) = i ; + map_y.at(j,i) = src.rows - j ; + break; case 2: - map_x.at(j,i) = src.cols - i ; - map_y.at(j,i) = j ; - break; + map_x.at(j,i) = src.cols - i ; + map_y.at(j,i) = j ; + break; case 3: - map_x.at(j,i) = src.cols - i ; - map_y.at(j,i) = src.rows - j ; - break; + map_x.at(j,i) = src.cols - i ; + map_y.at(j,i) = src.rows - j ; + break; } // end of switch - } + } } ind++; } @@ -311,4 +311,3 @@ Result :alt: Result 0 for remapping :width: 250pt :align: center - diff --git a/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.rst b/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.rst index 8c08d22e4..b2e3be908 100644 --- a/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.rst +++ b/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.rst @@ -306,4 +306,3 @@ Result :alt: Original image :width: 250pt :align: center - diff --git a/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.rst b/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.rst index db96faa2d..9cb14ab02 100644 --- a/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.rst +++ b/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.rst @@ -154,13 +154,13 @@ This tutorial code's is shown lines below. You can also download it from `here < /// Create Trackbar to select kernel type createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name, - &morph_elem, max_elem, - Morphology_Operations ); + &morph_elem, max_elem, + Morphology_Operations ); /// Create Trackbar to choose kernel size createTrackbar( "Kernel size:\n 2n +1", window_name, - &morph_size, max_kernel_size, - Morphology_Operations ); + &morph_size, max_kernel_size, + Morphology_Operations ); /// Default start Morphology_Operations( 0, 0 ); @@ -211,16 +211,16 @@ Explanation .. code-block:: cpp createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name, - &morph_elem, max_elem, - Morphology_Operations ); + &morph_elem, max_elem, + Morphology_Operations ); * The final trackbar **"Kernel Size"** returns the size of the kernel to be used (**morph_size**) .. code-block:: cpp createTrackbar( "Kernel size:\n 2n +1", window_name, - &morph_size, max_kernel_size, - Morphology_Operations ); + &morph_size, max_kernel_size, + Morphology_Operations ); * Every time we move any slider, the user's function **Morphology_Operations** will be called to effectuate a new morphology operation and it will update the output image based on the current trackbar values. @@ -279,4 +279,3 @@ Results .. image:: images/Morphology_2_Tutorial_Cover.jpg :alt: Morphology 2: Result sample :align: center - diff --git a/doc/tutorials/imgproc/pyramids/pyramids.rst b/doc/tutorials/imgproc/pyramids/pyramids.rst index ee40bf72e..b9d0d4e93 100644 --- a/doc/tutorials/imgproc/pyramids/pyramids.rst +++ b/doc/tutorials/imgproc/pyramids/pyramids.rst @@ -259,5 +259,3 @@ Results .. image:: images/Pyramids_Tutorial_PyrUp_Result.jpg :alt: Pyramids: PyrUp Result :align: center - - diff --git a/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.rst b/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.rst index d073a1b92..069ba64b0 100644 --- a/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.rst +++ b/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.rst @@ -121,4 +121,3 @@ Result .. |BRC_1| image:: images/Bounding_Rects_Circles_Result.jpg :align: middle - diff --git a/doc/tutorials/imgproc/shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses.rst b/doc/tutorials/imgproc/shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses.rst index 894df8605..1bbfeb3d6 100644 --- a/doc/tutorials/imgproc/shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses.rst +++ b/doc/tutorials/imgproc/shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses.rst @@ -123,4 +123,3 @@ Result .. |BRE_1| image:: images/Bounding_Rotated_Ellipses_Result.jpg :align: middle - diff --git a/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.rst b/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.rst index decdf31ef..f145506ce 100644 --- a/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.rst +++ b/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.rst @@ -104,4 +104,3 @@ Result .. |contour_1| image:: images/Find_Contours_Result.jpg :align: middle - diff --git a/doc/tutorials/imgproc/shapedescriptors/hull/hull.rst b/doc/tutorials/imgproc/shapedescriptors/hull/hull.rst index c6abdd2c8..a17c3e9f4 100644 --- a/doc/tutorials/imgproc/shapedescriptors/hull/hull.rst +++ b/doc/tutorials/imgproc/shapedescriptors/hull/hull.rst @@ -113,4 +113,3 @@ Result .. |Hull_1| image:: images/Hull_Result.jpg :align: middle - diff --git a/doc/tutorials/imgproc/shapedescriptors/moments/moments.rst b/doc/tutorials/imgproc/shapedescriptors/moments/moments.rst index 6ef2de6ee..847bf362d 100644 --- a/doc/tutorials/imgproc/shapedescriptors/moments/moments.rst +++ b/doc/tutorials/imgproc/shapedescriptors/moments/moments.rst @@ -133,4 +133,3 @@ Result .. |MU_2| image:: images/Moments_Result2.jpg :width: 250pt :align: middle - diff --git a/doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.rst b/doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.rst index a73a8e92e..c333bf5d8 100644 --- a/doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.rst +++ b/doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.rst @@ -114,4 +114,3 @@ Result .. |PPT_1| image:: images/Point_Polygon_Test_Result.jpg :align: middle - diff --git a/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst b/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst index 011dedd36..a371c02c8 100644 --- a/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst +++ b/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst @@ -7,502 +7,502 @@ In this section you will learn about the image processing (manipulation) functio .. include:: ../../definitions/tocDefinitions.rst - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |ImageProcessing_1| **Title:** :ref:`Smoothing` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Let's take a look at some basic linear filters! - + ===================== ============================================== - + .. |ImageProcessing_1| image:: images/Smoothing_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |ImageProcessing_2| **Title:** :ref:`Morphology_1` - + *Compatibility:* > OpenCV 2.0 - + Author: |Author_AnaH| - + Let's *change* the shape of objects! - + ===================== ============================================== - + .. |ImageProcessing_2| image:: images/Morphology_1_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ================= ================================================== |Morphology_2| **Title:** :ref:`Morphology_2` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Here we investigate different morphology operators - + ================= ================================================== - + .. |Morphology_2| image:: images/Morphology_2_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Pyramids| **Title:** :ref:`Pyramids` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + What if I need a bigger/smaller image? - + ===================== ============================================== - + .. |Pyramids| image:: images/Pyramids_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Threshold| **Title:** :ref:`Basic_Threshold` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + After so much processing, it is time to decide which pixels stay! - + ===================== ============================================== - + .. |Threshold| image:: images/Threshold_Tutorial_Cover.jpg :height: 90pt :width: 90pt - + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - -+ + ++ ===================== ============================================== |Filter_2D| **Title:** :ref:`filter_2d` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn to design our own filters by using OpenCV functions - + ===================== ============================================== - + .. |Filter_2D| image:: images/imgtrans/Filter_2D_Tutorial_Cover.jpg :height: 90pt :width: 90pt - + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - -+ + ++ ===================== ============================================== |CopyMakeBorder| **Title:** :ref:`copyMakeBorderTutorial` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to pad our images! - + ===================== ============================================== - + .. |CopyMakeBorder| image:: images/imgtrans/CopyMakeBorder_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |SobelDerivatives| **Title:** :ref:`sobel_derivatives` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to calculate gradients and use them to detect edges! - + ===================== ============================================== - + .. |SobelDerivatives| image:: images/imgtrans/Sobel_Derivatives_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |LaplaceOperator| **Title:** :ref:`laplace_operator` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn about the *Laplace* operator and how to detect edges with it. - + ===================== ============================================== - + .. |LaplaceOperator| image:: images/imgtrans/Laplace_Operator_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |CannyDetector| **Title:** :ref:`canny_detector` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn a sophisticated alternative to detect edges. - + ===================== ============================================== - + .. |CannyDetector| image:: images/imgtrans/Canny_Detector_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HoughLines| **Title:** :ref:`hough_lines` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to detect lines - + ===================== ============================================== - + .. |HoughLines| image:: images/imgtrans/Hough_Lines_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HoughCircle| **Title:** :ref:`hough_circle` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to detect circles - + ===================== ============================================== - + .. |HoughCircle| image:: images/imgtrans/Hough_Circle_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Remap| **Title:** :ref:`remap` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to manipulate pixels locations - + ===================== ============================================== - + .. |Remap| image:: images/imgtrans/Remap_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |WarpAffine| **Title:** :ref:`warp_affine` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to rotate, translate and scale our images - + ===================== ============================================== - + .. |WarpAffine| image:: images/imgtrans/Warp_Affine_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HistEqualization| **Title:** :ref:`histogram_equalization` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to improve the contrast in our images ===================== ============================================== - + .. |HistEqualization| image:: images/histograms/Histogram_Equalization_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HistCalculation| **Title:** :ref:`histogram_calculation` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to create and generate histograms ===================== ============================================== - + .. |HistCalculation| image:: images/histograms/Histogram_Calculation_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HistComparison| **Title:** :ref:`histogram_comparison` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn to calculate metrics between histograms ===================== ============================================== - + .. |HistComparison| image:: images/histograms/Histogram_Comparison_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |BackProjection| **Title:** :ref:`back_projection` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to use histograms to find similar objects in images ===================== ============================================== - + .. |BackProjection| image:: images/histograms/Back_Projection_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |TemplateMatching| **Title:** :ref:`template_matching` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to match templates in an image ===================== ============================================== - + .. |TemplateMatching| image:: images/histograms/Template_Matching_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |FindContours| **Title:** :ref:`find_contours` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to find contours of objects in our image ===================== ============================================== - + .. |FindContours| image:: images/shapedescriptors/Find_Contours_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Hull| **Title:** :ref:`hull` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to get hull contours and draw them! ===================== ============================================== - + .. |Hull| image:: images/shapedescriptors/Hull_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |BRC| **Title:** :ref:`bounding_rects_circles` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to obtain bounding boxes and circles for our contours. ===================== ============================================== - + .. |BRC| image:: images/shapedescriptors/Bounding_Rects_Circles_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - - + + ===================== ============================================== |BRE| **Title:** :ref:`bounding_rotated_ellipses` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to obtain rotated bounding boxes and ellipses for our contours. ===================== ============================================== - + .. |BRE| image:: images/shapedescriptors/Bounding_Rotated_Ellipses_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - - + + ===================== ============================================== |MU| **Title:** :ref:`moments` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn to calculate the moments of an image ===================== ============================================== - + .. |MU| image:: images/shapedescriptors/Moments_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - - + + ===================== ============================================== |PPT| **Title:** :ref:`point_polygon_test` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to calculate distances from the image to contours ===================== ============================================== - + .. |PPT| image:: images/shapedescriptors/Point_Polygon_Test_Tutorial_Cover.jpg :height: 90pt :width: 90pt @@ -539,6 +539,3 @@ In this section you will learn about the image processing (manipulation) functio ../shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses ../shapedescriptors/moments/moments ../shapedescriptors/point_polygon_test/point_polygon_test - - - diff --git a/doc/tutorials/imgproc/threshold/threshold.rst b/doc/tutorials/imgproc/threshold/threshold.rst index 7788e6c51..889ec1154 100644 --- a/doc/tutorials/imgproc/threshold/threshold.rst +++ b/doc/tutorials/imgproc/threshold/threshold.rst @@ -174,12 +174,12 @@ The tutorial code's is shown lines below. You can also download it from `here `_ discussion group or OpenCV +If you encounter any error after thoroughly following these steps, feel free to contact us via +`OpenCV4Android `_ discussion group or OpenCV `Q&A forum `_ . We'll do our best to help you out. Using OpenCV Library Within Your Android Project ================================================ -In this section we will explain how to make some existing project to use OpenCV. -Starting with 2.4.2 release for Android, *OpenCV Manager* is used to provide apps with the best -available version of OpenCV. -You can get more information here: :ref:`Android_OpenCV_Manager` and in these +In this section we will explain how to make some existing project to use OpenCV. +Starting with 2.4.2 release for Android, *OpenCV Manager* is used to provide apps with the best +available version of OpenCV. +You can get more information here: :ref:`Android_OpenCV_Manager` and in these `slides `_. @@ -48,31 +48,31 @@ Java Application Development with Async Initialization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Using async initialization is a **recommended** way for application development. It uses the OpenCV +Using async initialization is a **recommended** way for application development. It uses the OpenCV Manager to access OpenCV libraries externally installed in the target system. -#. Add OpenCV library project to your workspace. Use menu +#. Add OpenCV library project to your workspace. Use menu :guilabel:`File -> Import -> Existing project in your workspace`. - Press :guilabel:`Browse` button and locate OpenCV4Android SDK + Press :guilabel:`Browse` button and locate OpenCV4Android SDK (:file:`OpenCV-2.4.6-android-sdk/sdk`). .. image:: images/eclipse_opencv_dependency0.png :alt: Add dependency from OpenCV library :align: center -#. In application project add a reference to the OpenCV Java SDK in +#. In application project add a reference to the OpenCV Java SDK in :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.6``. .. image:: images/eclipse_opencv_dependency1.png :alt: Add dependency from OpenCV library :align: center -In most cases OpenCV Manager may be installed automatically from Google Play. For the case, when -Google Play is not available, i.e. emulator, developer board, etc, you can install it manually +In most cases OpenCV Manager may be installed automatically from Google Play. For the case, when +Google Play is not available, i.e. emulator, developer board, etc, you can install it manually using adb tool. See :ref:`manager_selection` for details. -There is a very base code snippet implementing the async initialization. It shows basic principles. +There is a very base code snippet implementing the async initialization. It shows basic principles. See the "15-puzzle" OpenCV sample for details. .. code-block:: java @@ -107,47 +107,47 @@ See the "15-puzzle" OpenCV sample for details. ... } -It this case application works with OpenCV Manager in asynchronous fashion. ``OnManagerConnected`` -callback will be called in UI thread, when initialization finishes. Please note, that it is not -allowed to use OpenCV calls or load OpenCV-dependent native libs before invoking this callback. -Load your own native libraries that depend on OpenCV after the successful OpenCV initialization. -Default ``BaseLoaderCallback`` implementation treat application context as Activity and calls -``Activity.finish()`` method to exit in case of initialization failure. To override this behavior -you need to override ``finish()`` method of ``BaseLoaderCallback`` class and implement your own +It this case application works with OpenCV Manager in asynchronous fashion. ``OnManagerConnected`` +callback will be called in UI thread, when initialization finishes. Please note, that it is not +allowed to use OpenCV calls or load OpenCV-dependent native libs before invoking this callback. +Load your own native libraries that depend on OpenCV after the successful OpenCV initialization. +Default ``BaseLoaderCallback`` implementation treat application context as Activity and calls +``Activity.finish()`` method to exit in case of initialization failure. To override this behavior +you need to override ``finish()`` method of ``BaseLoaderCallback`` class and implement your own finalization method. Application Development with Static Initialization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -According to this approach all OpenCV binaries are included into your application package. It is -designed mostly for development purposes. This approach is deprecated for the production code, -release package is recommended to communicate with OpenCV Manager via the async initialization +According to this approach all OpenCV binaries are included into your application package. It is +designed mostly for development purposes. This approach is deprecated for the production code, +release package is recommended to communicate with OpenCV Manager via the async initialization described above. -#. Add the OpenCV library project to your workspace the same way as for the async initialization - above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`, - press :guilabel:`Browse` button and select OpenCV SDK path +#. Add the OpenCV library project to your workspace the same way as for the async initialization + above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`, + press :guilabel:`Browse` button and select OpenCV SDK path (:file:`OpenCV-2.4.6-android-sdk/sdk`). .. image:: images/eclipse_opencv_dependency0.png :alt: Add dependency from OpenCV library :align: center -#. In the application project add a reference to the OpenCV4Android SDK in +#. In the application project add a reference to the OpenCV4Android SDK in :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.6``; .. image:: images/eclipse_opencv_dependency1.png :alt: Add dependency from OpenCV library :align: center -#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV +#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV native libs from :file:`/sdk/native/libs/` to your project directory to folder :file:`libs/`. - In case of the application project **with a JNI part**, instead of manual libraries copying you + In case of the application project **with a JNI part**, instead of manual libraries copying you need to modify your ``Android.mk`` file: - add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before + add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before ``"include path_to_OpenCV-2.4.6-android-sdk/sdk/native/jni/OpenCV.mk"`` .. code-block:: make @@ -168,13 +168,13 @@ described above. OPENCV_INSTALL_MODULES:=on include ../../sdk/native/jni/OpenCV.mk - After that the OpenCV libraries will be copied to your application :file:`libs` folder during + After that the OpenCV libraries will be copied to your application :file:`libs` folder during the JNI build.v - Eclipse will automatically include all the libraries from the :file:`libs` folder to the - application package (APK). + Eclipse will automatically include all the libraries from the :file:`libs` folder to the + application package (APK). -#. The last step of enabling OpenCV in your application is Java initialization code before calling +#. The last step of enabling OpenCV in your application is Java initialization code before calling OpenCV API. It can be done, for example, in the static section of the ``Activity`` class: .. code-block:: java @@ -186,7 +186,7 @@ described above. } } - If you application includes other OpenCV-dependent native libraries you should load them + If you application includes other OpenCV-dependent native libraries you should load them **after** OpenCV initialization: .. code-block:: java @@ -205,16 +205,16 @@ described above. Native/C++ ---------- -To build your own Android application, using OpenCV as native part, the following steps should be +To build your own Android application, using OpenCV as native part, the following steps should be taken: -#. You can use an environment variable to specify the location of OpenCV package or just hardcode +#. You can use an environment variable to specify the location of OpenCV package or just hardcode absolute or relative path in the :file:`jni/Android.mk` of your projects. -#. The file :file:`jni/Android.mk` should be written for the current application using the common +#. The file :file:`jni/Android.mk` should be written for the current application using the common rules for this file. - For detailed information see the Android NDK documentation from the Android NDK archive, in the + For detailed information see the Android NDK documentation from the Android NDK archive, in the file :file:`/docs/ANDROID-MK.html`. #. The following line: @@ -229,7 +229,7 @@ taken: include $(CLEAR_VARS) -#. Several variables can be used to customize OpenCV stuff, but you **don't need** to use them when +#. Several variables can be used to customize OpenCV stuff, but you **don't need** to use them when your application uses the `async initialization` via the `OpenCV Manager` API. .. note:: These variables should be set **before** the ``"include .../OpenCV.mk"`` line: @@ -238,7 +238,7 @@ taken: OPENCV_INSTALL_MODULES:=on - Copies necessary OpenCV dynamic libs to the project ``libs`` folder in order to include them + Copies necessary OpenCV dynamic libs to the project ``libs`` folder in order to include them into the APK. .. code-block:: make @@ -251,7 +251,7 @@ taken: OPENCV_LIB_TYPE:=STATIC - Perform static linking with OpenCV. By default dynamic link is used and the project JNI lib + Perform static linking with OpenCV. By default dynamic link is used and the project JNI lib depends on ``libopencv_java.so``. #. The file :file:`Application.mk` should exist and should contain lines: @@ -269,8 +269,8 @@ taken: Should specify the application target platforms. - In some cases a linkage error (like ``"In function 'cv::toUtf16(std::basic_string<...>... - undefined reference to 'mbstowcs'"``) happens when building an application JNI library, + In some cases a linkage error (like ``"In function 'cv::toUtf16(std::basic_string<...>... + undefined reference to 'mbstowcs'"``) happens when building an application JNI library, depending on OpenCV. The following line in the :file:`Application.mk` usually fixes it: .. code-block:: make @@ -278,19 +278,19 @@ taken: APP_PLATFORM := android-9 -#. Either use :ref:`manual ` ``ndk-build`` invocation or - :ref:`setup Eclipse CDT Builder ` to build native JNI lib before (re)building the Java +#. Either use :ref:`manual ` ``ndk-build`` invocation or + :ref:`setup Eclipse CDT Builder ` to build native JNI lib before (re)building the Java part and creating an APK. Hello OpenCV Sample =================== -Here are basic steps to guide you trough the process of creating a simple OpenCV-centric -application. It will be capable of accessing camera output, processing it and displaying the +Here are basic steps to guide you trough the process of creating a simple OpenCV-centric +application. It will be capable of accessing camera output, processing it and displaying the result. -#. Open Eclipse IDE, create a new clean workspace, create a new Android project +#. Open Eclipse IDE, create a new clean workspace, create a new Android project :menuselection:`File --> New --> Android Project` #. Set name, target, package and ``minSDKVersion`` accordingly. The minimal SDK version for build diff --git a/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst b/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst index ee89a4e9c..30620bafc 100644 --- a/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst +++ b/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst @@ -384,4 +384,4 @@ upcoming issues. During this it may take a little longer to see your work online however if you submitted it, be sure that eventually it will show up. If you have any questions or advices relating to this tutorial you can contact us at --delete-admin@-delete-opencv.org (delete the -delete- parts of that email address). \ No newline at end of file +-delete-admin@-delete-opencv.org (delete the -delete- parts of that email address). diff --git a/doc/tutorials/introduction/ios_install/ios_install.rst b/doc/tutorials/introduction/ios_install/ios_install.rst index 8d117a0b4..2973b7ec2 100644 --- a/doc/tutorials/introduction/ios_install/ios_install.rst +++ b/doc/tutorials/introduction/ios_install/ios_install.rst @@ -20,7 +20,7 @@ In MacOS it can be done using the following command in Terminal: cd ~/ git clone https://github.com/Itseez/opencv.git - + Building OpenCV from Source, using CMake and Command Line ========================================================= @@ -28,10 +28,10 @@ Building OpenCV from Source, using CMake and Command Line #. Make symbolic link for Xcode to let OpenCV build scripts find the compiler, header files etc. .. code-block:: bash - + cd / sudo ln -s /Applications/Xcode.app/Contents/Developer Developer - + #. Build OpenCV framework: .. code-block:: bash diff --git a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst index 41c161ce3..6aaf7c087 100644 --- a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst +++ b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst @@ -11,7 +11,7 @@ Prerequisites 1. Having installed `Eclipse `_ in your workstation (only the CDT plugin for C/C++ is needed). You can follow the following steps: - * Go to the Eclipse site + * Go to the Eclipse site * Download `Eclipse IDE for C/C++ Developers `_ . Choose the link according to your workstation. @@ -20,7 +20,7 @@ Prerequisites Making a project ================= -1. Start Eclipse. Just run the executable that comes in the folder. +1. Start Eclipse. Just run the executable that comes in the folder. #. Go to **File -> New -> C/C++ Project** @@ -28,13 +28,13 @@ Making a project :alt: Eclipse Tutorial Screenshot 0 :align: center -#. Choose a name for your project (i.e. DisplayImage). An **Empty Project** should be okay for this example. +#. Choose a name for your project (i.e. DisplayImage). An **Empty Project** should be okay for this example. .. image:: images/a1.png :alt: Eclipse Tutorial Screenshot 1 :align: center -#. Leave everything else by default. Press **Finish**. +#. Leave everything else by default. Press **Finish**. #. Your project (in this case DisplayImage) should appear in the **Project Navigator** (usually at the left side of your window). @@ -45,7 +45,7 @@ Making a project #. Now, let's add a source file using OpenCV: - * Right click on **DisplayImage** (in the Navigator). **New -> Folder** . + * Right click on **DisplayImage** (in the Navigator). **New -> Folder** . .. image:: images/a4.png :alt: Eclipse Tutorial Screenshot 4 @@ -76,9 +76,9 @@ Making a project image = imread( argv[1], 1 ); if( argc != 2 || !image.data ) - { + { printf( "No image data \n" ); - return -1; + return -1; } namedWindow( "Display Image", CV_WINDOW_AUTOSIZE ); @@ -102,7 +102,7 @@ Making a project :align: center .. note:: - If you do not know where your opencv files are, open the **Terminal** and type: + If you do not know where your opencv files are, open the **Terminal** and type: .. code-block:: bash @@ -112,56 +112,56 @@ Making a project .. code-block:: bash - -I/usr/local/include/opencv -I/usr/local/include + -I/usr/local/include/opencv -I/usr/local/include b. Now go to **GCC C++ Linker**,there you have to fill two spaces: First in **Library search path (-L)** you have to write the path to where the opencv libraries reside, in my case the path is: :: - + /usr/local/lib - + Then in **Libraries(-l)** add the OpenCV libraries that you may need. Usually just the 3 first on the list below are enough (for simple applications) . In my case, I am putting all of them since I plan to use the whole bunch: - opencv_core - opencv_imgproc + opencv_core + opencv_imgproc opencv_highgui - opencv_ml - opencv_video + opencv_ml + opencv_video opencv_features2d - opencv_calib3d - opencv_objdetect + opencv_calib3d + opencv_objdetect opencv_contrib - opencv_legacy + opencv_legacy opencv_flann .. image:: images/a10.png :alt: Eclipse Tutorial Screenshot 10 - :align: center - + :align: center + If you don't know where your libraries are (or you are just psychotic and want to make sure the path is fine), type in **Terminal**: .. code-block:: bash - + pkg-config --libs opencv My output (in case you want to check) was: .. code-block:: bash - - -L/usr/local/lib -lopencv_core -lopencv_imgproc -lopencv_highgui -lopencv_ml -lopencv_video -lopencv_features2d -lopencv_calib3d -lopencv_objdetect -lopencv_contrib -lopencv_legacy -lopencv_flann + + -L/usr/local/lib -lopencv_core -lopencv_imgproc -lopencv_highgui -lopencv_ml -lopencv_video -lopencv_features2d -lopencv_calib3d -lopencv_objdetect -lopencv_contrib -lopencv_legacy -lopencv_flann Now you are done. Click **OK** - * Your project should be ready to be built. For this, go to **Project->Build all** + * Your project should be ready to be built. For this, go to **Project->Build all** - In the Console you should get something like + In the Console you should get something like .. image:: images/a12.png :alt: Eclipse Tutorial Screenshot 12 - :align: center + :align: center If you check in your folder, there should be an executable there. @@ -179,21 +179,21 @@ So, now we have an executable ready to run. If we were to use the Terminal, we w Assuming that the image to use as the argument would be located in /images/HappyLittleFish.png. We can still do this, but let's do it from Eclipse: -#. Go to **Run->Run Configurations** +#. Go to **Run->Run Configurations** -#. Under C/C++ Application you will see the name of your executable + Debug (if not, click over C/C++ Application a couple of times). Select the name (in this case **DisplayImage Debug**). +#. Under C/C++ Application you will see the name of your executable + Debug (if not, click over C/C++ Application a couple of times). Select the name (in this case **DisplayImage Debug**). #. Now, in the right side of the window, choose the **Arguments** Tab. Write the path of the image file we want to open (path relative to the workspace/DisplayImage folder). Let's use **HappyLittleFish.png**: .. image:: images/a14.png :alt: Eclipse Tutorial Screenshot 14 - :align: center + :align: center #. Click on the **Apply** button and then in Run. An OpenCV window should pop up with the fish image (or whatever you used). .. image:: images/a15.jpg :alt: Eclipse Tutorial Screenshot 15 - :align: center + :align: center #. Congratulations! You are ready to have fun with OpenCV using Eclipse. @@ -236,7 +236,7 @@ Say you have or create a new file, *helloworld.cpp* in a directory called *foo*: ADD_EXECUTABLE( helloworld helloworld.cxx ) TARGET_LINK_LIBRARIES( helloworld ${OpenCV_LIBS} ) -#. Run: ``cmake-gui ..`` and make sure you fill in where opencv was built. +#. Run: ``cmake-gui ..`` and make sure you fill in where opencv was built. #. Then click ``configure`` and then ``generate``. If it's OK, **quit cmake-gui** @@ -253,6 +253,3 @@ Say you have or create a new file, *helloworld.cpp* in a directory called *foo*: a. You can also optionally modify the ``Build command:`` from ``make`` to something like ``make VERBOSE=1 -j4`` which tells the compiler to produce detailed symbol files for debugging and also to compile in 4 parallel threads. #. Done! - - - diff --git a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst index e603e5c28..f582d3208 100644 --- a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst +++ b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst @@ -11,7 +11,7 @@ Using OpenCV with gcc and CMake * The easiest way of using OpenCV in your code is to use `CMake `_. A few advantages (taken from the Wiki): #. No need to change anything when porting between Linux and Windows - #. Can easily be combined with other tools by CMake( i.e. Qt, ITK and VTK ) + #. Can easily be combined with other tools by CMake( i.e. Qt, ITK and VTK ) * If you are not familiar with CMake, checkout the `tutorial `_ on its website. @@ -21,7 +21,7 @@ Steps Create a program using OpenCV ------------------------------- -Let's use a simple program such as DisplayImage.cpp shown below. +Let's use a simple program such as DisplayImage.cpp shown below. .. code-block:: cpp @@ -36,9 +36,9 @@ Let's use a simple program such as DisplayImage.cpp shown below. image = imread( argv[1], 1 ); if( argc != 2 || !image.data ) - { + { printf( "No image data \n" ); - return -1; + return -1; } namedWindow( "Display Image", CV_WINDOW_AUTOSIZE ); diff --git a/doc/tutorials/introduction/linux_install/linux_install.rst b/doc/tutorials/introduction/linux_install/linux_install.rst index e3039ca07..1e02b64c9 100644 --- a/doc/tutorials/introduction/linux_install/linux_install.rst +++ b/doc/tutorials/introduction/linux_install/linux_install.rst @@ -11,8 +11,8 @@ Required Packages .. code-block:: bash - sudo apt-get install build-essential - + sudo apt-get install build-essential + * CMake 2.6 or higher; * Git; * GTK+2.x or higher, including headers (libgtk2.0-dev); @@ -48,7 +48,7 @@ In Linux it can be achieved with the following command in Terminal: cd ~/ git clone https://github.com/Itseez/opencv.git - + Building OpenCV from Source Using CMake, Using the Command Line =============================================================== @@ -58,26 +58,25 @@ Building OpenCV from Source Using CMake, Using the Command Line #. Enter the and type .. code-block:: bash - + cmake [] For example .. code-block:: bash - + cd ~/opencv mkdir release cd release cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. - + #. Enter the created temporary directory () and proceed with: .. code-block:: bash - + make sudo make install .. note:: - - If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter. + If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter. diff --git a/doc/tutorials/introduction/load_save_image/load_save_image.rst b/doc/tutorials/introduction/load_save_image/load_save_image.rst index 1a757cfab..50fb9ea37 100644 --- a/doc/tutorials/introduction/load_save_image/load_save_image.rst +++ b/doc/tutorials/introduction/load_save_image/load_save_image.rst @@ -5,8 +5,8 @@ Load, Modify, and Save an Image .. note:: - We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise. - + We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise. + Goals ====== @@ -35,9 +35,9 @@ Here it is: { char* imageName = argv[1]; - Mat image; + Mat image; image = imread( imageName, 1 ); - + if( argc != 2 || !image.data ) { printf( " No image data \n " ); @@ -53,7 +53,7 @@ Here it is: namedWindow( "Gray image", CV_WINDOW_AUTOSIZE ); imshow( imageName, image ); - imshow( "Gray image", gray_image ); + imshow( "Gray image", gray_image ); waitKey(0); @@ -67,18 +67,18 @@ Explanation * Creating a Mat object to store the image information * Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image. - -#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations: + +#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations: .. code-block:: cpp - + cvtColor( image, gray_image, CV_BGR2GRAY ); As you can see, :cvt_color:`cvtColor <>` takes as arguments: .. container:: enumeratevisibleitemswithsquare - * a source image (*image*) + * a source image (*image*) * a destination image (*gray_image*), in which we will save the converted image. * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images). @@ -86,7 +86,7 @@ Explanation .. code-block:: cpp - imwrite( "../../images/Gray_Image.jpg", gray_image ); + imwrite( "../../images/Gray_Image.jpg", gray_image ); Which will save our *gray_image* as *Gray_Image.jpg* in the folder *images* located two levels up of my current location. diff --git a/doc/tutorials/introduction/windows_install/windows_install.rst b/doc/tutorials/introduction/windows_install/windows_install.rst index cbfd0f66f..c29c13aed 100644 --- a/doc/tutorials/introduction/windows_install/windows_install.rst +++ b/doc/tutorials/introduction/windows_install/windows_install.rst @@ -126,7 +126,7 @@ Building the library #. Install |TortoiseGit|_. Choose the 32 or 64 bit version according to the type of OS you work in. While installing, locate your msysgit (if it doesn't do that automatically). Follow the wizard -- the default options are OK for the most part. -#. Choose a directory in your file system, where you will download the OpenCV libraries to. I recommend creating a new one that has short path and no special charachters in it, for example :file:`D:/OpenCV`. For this tutorial I'll suggest you do so. If you use your own path and know, what you're doing -- it's OK. +#. Choose a directory in your file system, where you will download the OpenCV libraries to. I recommend creating a new one that has short path and no special charachters in it, for example :file:`D:/OpenCV`. For this tutorial I'll suggest you do so. If you use your own path and know, what you're doing -- it's OK. a) Clone the repository to the selected directory. After clicking *Clone* button, a window will appear where you can select from what repository you want to download source files (https://github.com/Itseez/opencv.git) and to what directory (:file:`D:/OpenCV`). @@ -312,9 +312,13 @@ First we set an enviroment variable to make easier our work. This will hold the :: - setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc10 + setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc10 (suggested for Visual Studio 2010 - 32 bit Windows) + setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc10 (suggested for Visual Studio 2010 - 64 bit Windows) -Here the directory is where you have your OpenCV binaries (*extracted* or *built*). You can have different platform (e.g. x64 instead of x86) or compiler type, so substitute appropriate value. Inside this you should have folders like *bin* and *include*. The -m should be added if you wish to make the settings computer wise, instead of user wise. + setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc11 (suggested for Visual Studio 2012 - 32 bit Windows) + setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc11 (suggested for Visual Studio 2012 - 64 bit Windows) + +Here the directory is where you have your OpenCV binaries (*extracted* or *built*). You can have different platform (e.g. x64 instead of x86) or compiler type, so substitute appropriate value. Inside this you should have two folders called *lib* and *bin*. The -m should be added if you wish to make the settings computer wise, instead of user wise. If you built static libraries then you are done. Otherwise, you need to add the *bin* folders path to the systems path. This is cause you will use the OpenCV library in form of *\"Dynamic-link libraries\"* (also known as **DLL**). Inside these are stored all the algorithms and information the OpenCV library contains. The operating system will load them only on demand, during runtime. However, to do this he needs to know where they are. The systems **PATH** contains a list of folders where DLLs can be found. Add the OpenCV library path to this and the OS will know where to look if he ever needs the OpenCV binaries. Otherwise, you will need to copy the used DLLs right beside the applications executable file (*exe*) for the OS to find it, which is highly unpleasent if you work on many projects. To do this start up again the |PathEditor|_ and add the following new entry (right click in the application to bring up the menu): diff --git a/doc/tutorials/introduction/windows_visual_studio_Opencv/images/PropertySheetOpenCVInclude.jpg b/doc/tutorials/introduction/windows_visual_studio_Opencv/images/PropertySheetOpenCVInclude.jpg index 5826f4dd6..bc61d3386 100644 Binary files a/doc/tutorials/introduction/windows_visual_studio_Opencv/images/PropertySheetOpenCVInclude.jpg and b/doc/tutorials/introduction/windows_visual_studio_Opencv/images/PropertySheetOpenCVInclude.jpg differ diff --git a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst index d5f80f2f1..f3058a74d 100644 --- a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst +++ b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst @@ -10,16 +10,16 @@ I start out from the assumption that you have read and completed with success th :alt: You should have a folder looking like this. :align: center -The OpenCV libraries, distributed by us, on the Microsoft Windows operating system are in a **D**\ ynamic **L**\ inked **L**\ ibraries (*DLL*). These have the advantage that all the content of the library are loaded only at runtime, on demand, and that countless programs may use the same library file. This means that if you have ten applications using the OpenCV library, no need to have around a version for each one of them. Of course you need to have the *dll* of the OpenCV on all systems where you want to run your application. +The OpenCV libraries, distributed by us, on the Microsoft Windows operating system are in a **D**\ ynamic **L**\ inked **L**\ ibraries (*DLL*). These have the advantage that all the content of the library are loaded only at runtime, on demand, and that countless programs may use the same library file. This means that if you have ten applications using the OpenCV library, no need to have around a version for each one of them. Of course you need to have the *dll* of the OpenCV on all systems where you want to run your application. Another approach is to use static libraries that have *lib* extensions. You may build these by using our source files as described in the :ref:`Windows_Installation` tutorial. When you use this the library will be built-in inside your *exe* file. So there is no chance that the user deletes them, for some reason. As a drawback your application will be larger one and as, it will take more time to load it during its startup. -To build an application with OpenCV you need to do two things: +To build an application with OpenCV you need to do two things: .. container:: enumeratevisibleitemswithsquare - + *Tell* to the compiler how the OpenCV library *looks*. You do this by *showing* it the header files. - + *Tell* to the linker from where to get the functions or data structures of OpenCV, when they are needed. + + *Tell* to the compiler how the OpenCV library *looks*. You do this by *showing* it the header files. + + *Tell* to the linker from where to get the functions or data structures of OpenCV, when they are needed. If you use the *lib* system you must set the path where the library files are and specify in which one of them to look. During the build the linker will look into these libraries and add the definitions and implementation of all *used* functions and data structures to the executable file. @@ -27,7 +27,7 @@ To build an application with OpenCV you need to do two things: To pass on all this information to the Visual Studio IDE you can either do it globally (so all your future projects will get these information) or locally (so only for you current project). The advantage of the global one is that you only need to do it once; however, it may be undesirable to clump all your projects all the time with all these information. In case of the global one how you do it depends on the Microsoft Visual Studio you use. There is a **2008 and previous versions** and a **2010 way** of doing it. Inside the global section of this tutorial I'll show what the main differences are. -The base item of a project in Visual Studio is a solution. A solution may contain multiple projects. Projects are the building blocks of an application. Every project will realize something and you will have a main project in which you can put together this project puzzle. In case of the many simple applications (like many of the tutorials will be) you do not need to break down the application into modules. In these cases your main project will be the only existing one. Now go create a new solution inside Visual studio by going through the :menuselection:`File --> New --> Project` menu selection. Choose *Win32 Console Application* as type. Enter its name and select the path where to create it. Then in the upcoming dialog make sure you create an empty project. +The base item of a project in Visual Studio is a solution. A solution may contain multiple projects. Projects are the building blocks of an application. Every project will realize something and you will have a main project in which you can put together this project puzzle. In case of the many simple applications (like many of the tutorials will be) you do not need to break down the application into modules. In these cases your main project will be the only existing one. Now go create a new solution inside Visual studio by going through the :menuselection:`File --> New --> Project` menu selection. Choose *Win32 Console Application* as type. Enter its name and select the path where to create it. Then in the upcoming dialog make sure you create an empty project. .. image:: images/NewProjectVisualStudio.jpg :alt: Which options to select @@ -36,7 +36,7 @@ The base item of a project in Visual Studio is a solution. A solution may contai The *local* method ================== -Every project is built separately from the others. Due to this every project has its own rule package. Inside this rule packages are stored all the information the *IDE* needs to know to build your project. For any application there are at least two build modes: a *Release* and a *Debug* one. The *Debug* has many features that exist so you can find and resolve easier bugs inside your application. In contrast the *Release* is an optimized version, where the goal is to make the application run as fast as possible or to be as small as possible. You may figure that these modes also require different rules to use during build. Therefore, there exist different rule packages for each of your build modes. These rule packages are called inside the IDE as *project properties* and you can view and modify them by using the *Property Manger*. You can bring up this with :menuselection:`View --> Property Pages`. Expand it and you can see the existing rule packages (called *Proporty Sheets*). +Every project is built separately from the others. Due to this every project has its own rule package. Inside this rule packages are stored all the information the *IDE* needs to know to build your project. For any application there are at least two build modes: a *Release* and a *Debug* one. The *Debug* has many features that exist so you can find and resolve easier bugs inside your application. In contrast the *Release* is an optimized version, where the goal is to make the application run as fast as possible or to be as small as possible. You may figure that these modes also require different rules to use during build. Therefore, there exist different rule packages for each of your build modes. These rule packages are called inside the IDE as *project properties* and you can view and modify them by using the *Property Manger*. You can bring up this with :menuselection:`View --> Property Pages`. Expand it and you can see the existing rule packages (called *Proporty Sheets*). .. image:: images/PropertyPageExample.jpg :alt: An example of Property Sheet @@ -52,22 +52,22 @@ Use for example the *OpenCV_Debug* name. Then by selecting the sheet :menuselect .. code-block:: bash - $(OPENCV_DIR)\include + $(OPENCV_DIR)\..\..\include .. image:: images/PropertySheetOpenCVInclude.jpg - :alt: Add the include dir like this. + :alt: Add the include dir like this. :align: center -When adding third party libraries settings it is generally a good idea to use the power behind the environment variables. The full location of the OpenCV library may change on each system. Moreover, you may even end up yourself with moving the install directory for some reason. If you would give explicit paths inside your property sheet your project will end up not working when you pass it further to someone else who has a different OpenCV install path. Moreover, fixing this would require to manually modifying every explicit path. A more elegant solution is to use the environment variables. Anything that you put inside a parenthesis started with a dollar sign will be replaced at runtime with the current environment variables value. Here comes in play the environment variable setting we already made in our :ref:`previous tutorial `. +When adding third party libraries settings it is generally a good idea to use the power behind the environment variables. The full location of the OpenCV library may change on each system. Moreover, you may even end up yourself with moving the install directory for some reason. If you would give explicit paths inside your property sheet your project will end up not working when you pass it further to someone else who has a different OpenCV install path. Moreover, fixing this would require to manually modifying every explicit path. A more elegant solution is to use the environment variables. Anything that you put inside a parenthesis started with a dollar sign will be replaced at runtime with the current environment variables value. Here comes in play the environment variable setting we already made in our :ref:`previous tutorial `. Next go to the :menuselection:`Linker --> General` and under the *"Additional Library Directories"* add the libs directory: .. code-block:: bash - $(OPENCV_DIR)\libs + $(OPENCV_DIR)\lib .. image:: images/PropertySheetOpenCVLib.jpg - :alt: Add the library folder like this. + :alt: Add the library folder like this. :align: center Then you need to specify the libraries in which the linker should look into. To do this go to the :menuselection:`Linker --> Input` and under the *"Additional Dependencies"* entry add the name of all modules which you want to use: @@ -77,7 +77,7 @@ Then you need to specify the libraries in which the linker should look into. To :align: center .. image:: images/PropertySheetOpenCVLibrariesDebug.jpg - :alt: Like this. + :alt: Like this. :align: center The names of the libraries are as follow: @@ -105,33 +105,33 @@ A full list, for the latest version would contain: The letter *d* at the end just indicates that these are the libraries required for the debug. Now click ok to save and do the same with a new property inside the Release rule section. Make sure to omit the *d* letters from the library names and to save the property sheets with the save icon above them. .. image:: images/PropertySheetOpenCVLibrariesRelease.jpg - :alt: And the release ones. + :alt: And the release ones. :align: center -You can find your property sheets inside your projects directory. At this point it is a wise decision to back them up into some special directory, to always have them at hand in the future, whenever you create an OpenCV project. Note that for Visual Studio 2010 the file extension is *props*, while for 2008 this is *vsprops*. +You can find your property sheets inside your projects directory. At this point it is a wise decision to back them up into some special directory, to always have them at hand in the future, whenever you create an OpenCV project. Note that for Visual Studio 2010 the file extension is *props*, while for 2008 this is *vsprops*. .. image:: images/PropertySheetInsideFolder.jpg - :alt: And the release ones. + :alt: And the release ones. :align: center -Next time when you make a new OpenCV project just use the "Add Existing Property Sheet..." menu entry inside the Property Manager to easily add the OpenCV build rules. +Next time when you make a new OpenCV project just use the "Add Existing Property Sheet..." menu entry inside the Property Manager to easily add the OpenCV build rules. .. image:: images/PropertyPageAddExisting.jpg - :alt: Use this option. + :alt: Use this option. :align: center The *global* method =================== -In case you find to troublesome to add the property pages to each and every one of your projects you can also add this rules to a *"global property page"*. However, this applies only to the additional include and library directories. The name of the libraries to use you still need to specify manually by using for instance: a Property page. +In case you find to troublesome to add the property pages to each and every one of your projects you can also add this rules to a *"global property page"*. However, this applies only to the additional include and library directories. The name of the libraries to use you still need to specify manually by using for instance: a Property page. -In Visual Studio 2008 you can find this under the: :menuselection:`Tools --> Options --> Projects and Solutions --> VC++ Directories`. +In Visual Studio 2008 you can find this under the: :menuselection:`Tools --> Options --> Projects and Solutions --> VC++ Directories`. .. image:: images/VCDirectories2008.jpg :alt: VC++ Directories in VS 2008. :align: center -In Visual Studio 2010 this has been moved to a global property sheet which is automatically added to every project you create: +In Visual Studio 2010 this has been moved to a global property sheet which is automatically added to every project you create: .. image:: images/VCDirectories2010.jpg :alt: VC++ Directories in VS 2010. @@ -153,10 +153,10 @@ You can start a Visual Studio build from two places. Either inside from the *IDE .. |voila| unicode:: voil U+00E1 -This is important to remember when you code inside the code open and save commands. You're resources will be saved ( and queried for at opening!!!) relatively to your working directory. This is unless you give a full, explicit path as parameter for the I/O functions. In the code above we open :download:`this OpenCV logo<../../../../samples/cpp/tutorial_code/images/opencv-logo.png>`. Before starting up the application make sure you place the image file in your current working directory. Modify the image file name inside the code to try it out on other images too. Run it and |voila|: +This is important to remember when you code inside the code open and save commands. You're resources will be saved ( and queried for at opening!!!) relatively to your working directory. This is unless you give a full, explicit path as parameter for the I/O functions. In the code above we open :download:`this OpenCV logo<../../../../samples/cpp/tutorial_code/images/opencv-logo.png>`. Before starting up the application make sure you place the image file in your current working directory. Modify the image file name inside the code to try it out on other images too. Run it and |voila|: .. image:: images/SuccessVisualStudioWindows.jpg - :alt: You should have this. + :alt: You should have this. :align: center Command line arguments with Visual Studio @@ -167,11 +167,11 @@ Throughout some of our future tutorials you'll see that the programs main input .. code-block:: bash :linenos: - D: + D: CD OpenCV\MySolutionName\Release MySolutionName.exe exampleImage.jpg -Here I first changed my drive (if your project isn't on the OS local drive), navigated to my project and start it with an example image argument. While under Linux system it is common to fiddle around with the console window on the Microsoft Windows many people come to use it almost never. Besides, adding the same argument again and again while you are testing your application is, somewhat, a cumbersome task. Luckily, in the Visual Studio there is a menu to automate all this: +Here I first changed my drive (if your project isn't on the OS local drive), navigated to my project and start it with an example image argument. While under Linux system it is common to fiddle around with the console window on the Microsoft Windows many people come to use it almost never. Besides, adding the same argument again and again while you are testing your application is, somewhat, a cumbersome task. Luckily, in the Visual Studio there is a menu to automate all this: .. image:: images/VisualStudioCommandLineArguments.jpg :alt: Visual Studio Command Line Arguments diff --git a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst index 7b201b977..1337ff3a1 100644 --- a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst +++ b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst @@ -97,14 +97,14 @@ Now you can inspect the state of you program. For example, you can bring up the Note that the built-in *Locals* window will display text only. This is where the Image Watch plug-in comes in. Image Watch is like another *Locals* window, but with an image viewer built into it. To bring up Image Watch, select :menuselection:`View --> Other Windows --> Image Watch`. Like Visual Studio's *Locals* window, Image Watch can dock to the Visual Studio IDE. Also, Visual Studio will remember whether you had Image Watch open, and where it was located between debugging sessions. This means you only have to do this once--the next time you start debugging, Image Watch will be back where you left it. Here's what the docked Image Watch window looks like at our breakpoint: .. image:: images/toolwindow.jpg - :height: 320pt + :height: 320pt The radio button at the top left (*Locals/Watch*) selects what is shown in the *Image List* below: *Locals* lists all OpenCV image objects in the current scope (this list is automatically populated). *Watch* shows image expressions that have been pinned for continuous inspection (not described here, see `Image Watch documentation `_ for details). The image list shows basic information such as width, height, number of channels, and, if available, a thumbnail. In our example, the image list contains our two local image variables, *input* and *edges*. If an image has a thumbnail, left-clicking on that image will select it for detailed viewing in the *Image Viewer* on the right. The viewer lets you pan (drag mouse) and zoom (mouse wheel). It also displays the pixel coordinate and value at the current mouse position. .. image:: images/viewer.jpg - :height: 160pt + :height: 160pt Note that the second image in the list, *edges*, is shown as "invalid". This indicates that some data members of this image object have corrupt or invalid values (for example, a negative image width). This is expected at this point in the program, since the C++ constructor for *edges* has not run yet, and so its members have undefined values (in debug mode they are usually filled with "0xCD" bytes). @@ -113,18 +113,18 @@ From here you can single-step through your code (:menuselection:`Debug->Step Ove Now assume you want to do a visual sanity check of the *cv::Canny()* implementation. Bring the *edges* image into the viewer by selecting it in the *Image List* and zoom into a region with a clearly defined edge: .. image:: images/edges_zoom.png - :height: 160pt - + :height: 160pt + Right-click on the *Image Viewer* to bring up the view context menu and enable :menuselection:`Link Views` (a check box next to the menu item indicates whether the option is enabled). .. image:: images/viewer_context_menu.png - :height: 120pt + :height: 120pt The :menuselection:`Link Views` feature keeps the view region fixed when flipping between images of the same size. To see how this works, select the input image from the image list--you should now see the corresponding zoomed-in region in the input image: .. image:: images/input_zoom.png - :height: 160pt - + :height: 160pt + You may also switch back and forth between viewing input and edges with your up/down cursor keys. That way you can easily verify that the detected edges line up nicely with the data in the input image. More ... @@ -141,4 +141,4 @@ Image watch has a number of more advanced features, such as Please refer to the online `Image Watch Documentation `_ for details--you also can get to the documentation page by clicking on the *Help* link in the Image Watch window: .. image:: images/help_button.jpg - :height: 80pt + :height: 80pt diff --git a/doc/tutorials/ios/hello/hello.rst b/doc/tutorials/ios/hello/hello.rst index 8e6ddb88a..a7ca1f6f0 100644 --- a/doc/tutorials/ios/hello/hello.rst +++ b/doc/tutorials/ios/hello/hello.rst @@ -19,7 +19,7 @@ Follow this step by step guide to link OpenCV to iOS. 1. Create a new XCode project. -2. Now we need to link *opencv2.framework* with Xcode. Select the project Navigator in the left hand panel and click on project name. +2. Now we need to link *opencv2.framework* with Xcode. Select the project Navigator in the left hand panel and click on project name. 3. Under the TARGETS click on Build Phases. Expand Link Binary With Libraries option. @@ -29,10 +29,10 @@ Follow this step by step guide to link OpenCV to iOS. .. image:: images/linking_opencv_ios.png :alt: OpenCV iOS in Xcode - :align: center + :align: center *Hello OpenCV iOS Application* -=============================== +=============================== Now we will learn how to write a simple Hello World Application in Xcode using OpenCV. @@ -43,13 +43,13 @@ Now we will learn how to write a simple Hello World Application in Xcode using O .. code-block:: cpp - #ifdef __cplusplus - #import - #endif + #ifdef __cplusplus + #import + #endif .. image:: images/header_directive.png :alt: header - :align: center + :align: center .. container:: enumeratevisibleitemswithsquare @@ -61,7 +61,7 @@ Now we will learn how to write a simple Hello World Application in Xcode using O .. image:: images/view_did_load.png :alt: view did load - :align: center + :align: center .. container:: enumeratevisibleitemswithsquare @@ -73,4 +73,3 @@ Now we will learn how to write a simple Hello World Application in Xcode using O .. image:: images/output.png :alt: output :align: center - diff --git a/doc/tutorials/ios/image_manipulation/image_manipulation.rst b/doc/tutorials/ios/image_manipulation/image_manipulation.rst index e8d4aad5b..c4cde1990 100644 --- a/doc/tutorials/ios/image_manipulation/image_manipulation.rst +++ b/doc/tutorials/ios/image_manipulation/image_manipulation.rst @@ -21,9 +21,9 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage); CGFloat cols = image.size.width; CGFloat rows = image.size.height; - + cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels - + CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data cols, // Width of bitmap rows, // Height of bitmap @@ -32,11 +32,11 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm colorSpace, // Colorspace kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault); // Bitmap info flags - + CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage); CGContextRelease(contextRef); CGColorSpaceRelease(colorSpace); - + return cvMat; } @@ -47,9 +47,9 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage); CGFloat cols = image.size.width; CGFloat rows = image.size.height; - + cv::Mat cvMat(rows, cols, CV_8UC1); // 8 bits per component, 1 channels - + CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data cols, // Width of bitmap rows, // Height of bitmap @@ -58,11 +58,11 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm colorSpace, // Colorspace kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault); // Bitmap info flags - + CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage); CGContextRelease(contextRef); CGColorSpaceRelease(colorSpace); - + return cvMat; } @@ -81,15 +81,15 @@ After the processing we need to convert it back to UIImage. { NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()]; CGColorSpaceRef colorSpace; - + if (cvMat.elemSize() == 1) { colorSpace = CGColorSpaceCreateDeviceGray(); } else { colorSpace = CGColorSpaceCreateDeviceRGB(); } - + CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data); - + // Creating CGImage from cv::Mat CGImageRef imageRef = CGImageCreate(cvMat.cols, //width cvMat.rows, //height @@ -103,15 +103,15 @@ After the processing we need to convert it back to UIImage. false, //should interpolate kCGRenderingIntentDefault //intent ); - - + + // Getting UIImage from CGImage UIImage *finalImage = [UIImage imageWithCGImage:imageRef]; CGImageRelease(imageRef); CGDataProviderRelease(provider); CGColorSpaceRelease(colorSpace); - - return finalImage; + + return finalImage; } *Output* @@ -119,12 +119,12 @@ After the processing we need to convert it back to UIImage. .. image:: images/output.jpg :alt: header - :align: center + :align: center -Check out an instance of running code with more Image Effects on `YouTube `_ . +Check out an instance of running code with more Image Effects on `YouTube `_ . .. raw:: html
-
\ No newline at end of file + diff --git a/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst b/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst index 5ecda41c2..377446dee 100644 --- a/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst +++ b/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst @@ -69,7 +69,7 @@ .. toctree:: :hidden: - + ../hello/hello ../image_manipulation/image_manipulation ../video_processing/video_processing diff --git a/doc/tutorials/ios/video_processing/video_processing.rst b/doc/tutorials/ios/video_processing/video_processing.rst index 6143f7717..bd83ace75 100644 --- a/doc/tutorials/ios/video_processing/video_processing.rst +++ b/doc/tutorials/ios/video_processing/video_processing.rst @@ -17,35 +17,35 @@ Including OpenCV library in your iOS project The OpenCV library comes as a so-called framework, which you can directly drag-and-drop into your XCode project. Download the latest binary from . Alternatively follow this guide :ref:`iOS-Installation` to compile the framework manually. Once you have the framework, just drag-and-drop into XCode: - .. image:: images/xcode_hello_ios_framework_drag_and_drop.png - - + .. image:: images/xcode_hello_ios_framework_drag_and_drop.png + + Also you have to locate the prefix header that is used for all header files in the project. The file is typically located at "ProjectName/Supporting Files/ProjectName-Prefix.pch". There, you have add an include statement to import the opencv library. However, make sure you include opencv before you include UIKit and Foundation, because else you will get some weird compile errors that some macros like min and max are defined multiple times. For example the prefix header could look like the following: .. code-block:: objc - :linenos: - - // - // Prefix header for all source files of the 'VideoFilters' target in the 'VideoFilters' project - // - - #import - - #ifndef __IPHONE_4_0 - #warning "This project uses features only available in iOS SDK 4.0 and later." - #endif - - #ifdef __cplusplus - #import - #endif - - #ifdef __OBJC__ - #import - #import - #endif - - - + :linenos: + + // + // Prefix header for all source files of the 'VideoFilters' target in the 'VideoFilters' project + // + + #import + + #ifndef __IPHONE_4_0 + #warning "This project uses features only available in iOS SDK 4.0 and later." + #endif + + #ifdef __cplusplus + #import + #endif + + #ifdef __OBJC__ + #import + #import + #endif + + + Example video frame processing project -------------------------------------- User Interface @@ -53,63 +53,63 @@ User Interface First, we create a simple iOS project, for example Single View Application. Then, we create and add an UIImageView and UIButton to start the camera and display the video frames. The storyboard could look like that: - .. image:: images/xcode_hello_ios_viewcontroller_layout.png + .. image:: images/xcode_hello_ios_viewcontroller_layout.png Make sure to add and connect the IBOutlets and IBActions to the corresponding ViewController: .. code-block:: objc - :linenos: - - @interface ViewController : UIViewController - { - IBOutlet UIImageView* imageView; - IBOutlet UIButton* button; - } - - - (IBAction)actionStart:(id)sender; - - @end - - + :linenos: + + @interface ViewController : UIViewController + { + IBOutlet UIImageView* imageView; + IBOutlet UIButton* button; + } + + - (IBAction)actionStart:(id)sender; + + @end + + Adding the Camera ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We add a camera controller to the view controller and initialize it when the view has loaded: .. code-block:: objc - :linenos: - - #import - using namespace cv; - - - @interface ViewController : UIViewController - { - ... - CvVideoCamera* videoCamera; - } - ... - @property (nonatomic, retain) CvVideoCamera* videoCamera; - - @end - -.. code-block:: objc - :linenos: + :linenos: + + #import + using namespace cv; + + + @interface ViewController : UIViewController + { + ... + CvVideoCamera* videoCamera; + } + ... + @property (nonatomic, retain) CvVideoCamera* videoCamera; + + @end + +.. code-block:: objc + :linenos: + + - (void)viewDidLoad + { + [super viewDidLoad]; + // Do any additional setup after loading the view, typically from a nib. + + self.videoCamera = [[CvVideoCamera alloc] initWithParentView:imageView]; + self.videoCamera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront; + self.videoCamera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288; + self.videoCamera.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait; + self.videoCamera.defaultFPS = 30; + self.videoCamera.grayscale = NO; + } - - (void)viewDidLoad - { - [super viewDidLoad]; - // Do any additional setup after loading the view, typically from a nib. - - self.videoCamera = [[CvVideoCamera alloc] initWithParentView:imageView]; - self.videoCamera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront; - self.videoCamera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288; - self.videoCamera.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait; - self.videoCamera.defaultFPS = 30; - self.videoCamera.grayscale = NO; - } - In this case, we initialize the camera and provide the imageView as a target for rendering each frame. CvVideoCamera is basically a wrapper around AVFoundation, so we provie as properties some of the AVFoundation camera options. For example we want to use the front camera, set the video size to 352x288 and a video orientation (the video camera normally outputs in landscape mode, which results in transposed data when you design a portrait application). The property defaultFPS sets the FPS of the camera. If the processing is less fast than the desired FPS, frames are automatically dropped. @@ -143,7 +143,7 @@ Additionally, we have to manually add framework dependencies of the opencv frame * Foundation - .. image:: images/xcode_hello_ios_frameworks_add_dependencies.png + .. image:: images/xcode_hello_ios_frameworks_add_dependencies.png Processing frames @@ -152,35 +152,35 @@ Processing frames We follow the delegation pattern, which is very common in iOS, to provide access to each camera frame. Basically, the View Controller has to implement the CvVideoCameraDelegate protocol and has to be set as delegate to the video camera: .. code-block:: objc - :linenos: - - @interface ViewController : UIViewController - + :linenos: + + @interface ViewController : UIViewController + .. code-block:: objc - :linenos: - - - (void)viewDidLoad - { - ... - self.videoCamera = [[CvVideoCamera alloc] initWithParentView:imageView]; - self.videoCamera.delegate = self; - ... - } + :linenos: + + - (void)viewDidLoad + { + ... + self.videoCamera = [[CvVideoCamera alloc] initWithParentView:imageView]; + self.videoCamera.delegate = self; + ... + } .. code-block:: objc - :linenos: + :linenos: - #pragma mark - Protocol CvVideoCameraDelegate + #pragma mark - Protocol CvVideoCameraDelegate - #ifdef __cplusplus - - (void)processImage:(Mat&)image; - { - // Do some OpenCV stuff with the image - } - #endif + #ifdef __cplusplus + - (void)processImage:(Mat&)image; + { + // Do some OpenCV stuff with the image + } + #endif Note that we are using C++ here (cv::Mat). Important: You have to rename the view controller's extension .m into .mm, so that the compiler compiles it under the assumption of Objective-C++ (Objective-C and C++ mixed). Then, __cplusplus is defined when the compiler is processing the file for C++ code. Therefore, we put our code within a block where __cplusplus is defined. @@ -193,18 +193,18 @@ From here you can start processing video frames. For example the following snipp .. code-block:: objc - :linenos: - - - (void)processImage:(Mat&)image; - { - // Do some OpenCV stuff with the image - Mat image_copy; - cvtColor(image, image_copy, CV_BGRA2BGR); - - // invert image - bitwise_not(image_copy, image_copy); - cvtColor(image_copy, image, CV_BGR2BGRA); - } + :linenos: + + - (void)processImage:(Mat&)image; + { + // Do some OpenCV stuff with the image + Mat image_copy; + cvtColor(image, image_copy, CV_BGRA2BGR); + + // invert image + bitwise_not(image_copy, image_copy); + cvtColor(image_copy, image, CV_BGR2BGRA); + } Start! @@ -213,14 +213,14 @@ Start! Finally, we have to tell the camera to actually start/stop working. The following code will start the camera when you press the button, assuming you connected the UI properly: .. code-block:: objc - :linenos: - - #pragma mark - UI Actions - - - (IBAction)actionStart:(id)sender; - { - [self.videoCamera start]; - } + :linenos: + + #pragma mark - UI Actions + + - (IBAction)actionStart:(id)sender; + { + [self.videoCamera start]; + } diff --git a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst index 6cd66e52b..50f734803 100644 --- a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst +++ b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst @@ -10,7 +10,7 @@ In this tutorial you will learn how to: .. container:: enumeratevisibleitemswithsquare - + Use the OpenCV functions :svms:`CvSVM::train ` to build a classifier based on SVMs and :svms:`CvSVM::predict ` to test its performance. + + Use the OpenCV functions :svms:`CvSVM::train ` to build a classifier based on SVMs and :svms:`CvSVM::predict ` to test its performance. What is a SVM? ============== @@ -36,14 +36,14 @@ Then, the operation of the SVM algorithm is based on finding the hyperplane that .. image:: images/optimal-hyperplane.png :alt: The Optimal hyperplane - :align: center + :align: center How is the optimal hyperplane computed? ======================================= Let's introduce the notation used to define formally a hyperplane: -.. math:: +.. math:: f(x) = \beta_{0} + \beta^{T} x, where :math:`\beta` is known as the *weight vector* and :math:`\beta_{0}` as the *bias*. @@ -106,7 +106,7 @@ Explanation .. code-block:: cpp Mat trainingDataMat(3, 2, CV_32FC1, trainingData); - Mat labelsMat (3, 1, CV_32FC1, labels); + Mat labelsMat (3, 1, CV_32FC1, labels); 2. **Set up SVM's parameters** @@ -143,7 +143,7 @@ Explanation .. code-block:: cpp Vec3b green(0,255,0), blue (255,0,0); - + for (int i = 0; i < image.rows; ++i) for (int j = 0; j < image.cols; ++j) { @@ -152,8 +152,8 @@ Explanation if (response == 1) image.at(j, i) = green; - else - if (response == -1) + else + if (response == -1) image.at(j, i) = blue; } @@ -184,5 +184,4 @@ Results .. image:: images/result.png :alt: The seperated planes - :align: center - + :align: center diff --git a/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst b/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst index c09a8e3f8..5e46bb003 100644 --- a/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst +++ b/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst @@ -216,4 +216,4 @@ You may observe a runtime instance of this on the `YouTube here - \ No newline at end of file + diff --git a/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst b/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst index 452b89636..4691756a9 100644 --- a/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst +++ b/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst @@ -5,9 +5,9 @@ Use the powerfull machine learning classes for statistical classification, regression and clustering of data. -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -18,7 +18,7 @@ Use the powerfull machine learning classes for statistical classification, regre *Author:* |Author_FernandoI| - Learn what a Suport Vector Machine is. + Learn what a Suport Vector Machine is. ============ ============================================== @@ -26,7 +26,7 @@ Use the powerfull machine learning classes for statistical classification, regre :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -51,6 +51,6 @@ Use the powerfull machine learning classes for statistical classification, regre .. toctree:: :hidden: - + ../introduction_to_svm/introduction_to_svm ../non_linear_svms/non_linear_svms diff --git a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst index cabb81c01..03080fec5 100644 --- a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst +++ b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst @@ -131,4 +131,3 @@ Result .. image:: images/Cascade_Classifier_Tutorial_Result_LBP.jpg :align: center :height: 300pt - diff --git a/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst b/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst index 64ed10945..c9df3eb1d 100644 --- a/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst +++ b/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst @@ -5,23 +5,23 @@ Ever wondered how your digital camera detects peoples and faces? Look here to find out! -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |CascadeClassif| **Title:** :ref:`cascade_classifier` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Here we learn how to use *objdetect* to find objects in our images or videos - + ===================== ============================================== - + .. |CascadeClassif| image:: images/Cascade_Classifier_Tutorial_Cover.jpg :height: 90pt :width: 90pt diff --git a/doc/tutorials/video/table_of_content_video/table_of_content_video.rst b/doc/tutorials/video/table_of_content_video/table_of_content_video.rst index 778bc5cdb..a2521d695 100644 --- a/doc/tutorials/video/table_of_content_video/table_of_content_video.rst +++ b/doc/tutorials/video/table_of_content_video/table_of_content_video.rst @@ -3,7 +3,7 @@ *video* module. Video analysis ----------------------------------------------------------- -Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions. +Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions. .. include:: ../../definitions/noContent.rst diff --git a/doc/user_guide/ug_features2d.rst b/doc/user_guide/ug_features2d.rst index ac5633671..e3ef302dc 100644 --- a/doc/user_guide/ug_features2d.rst +++ b/doc/user_guide/ug_features2d.rst @@ -78,7 +78,7 @@ First, we create an instance of a keypoint detector. All detectors inherit the a extractor.compute(img1, keypoints1, descriptors1); extractor.compute(img2, keypoints2, descriptors2); -We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). :: +We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). :: // matching descriptors BruteForceMatcher > matcher; diff --git a/doc/user_guide/ug_mat.rst b/doc/user_guide/ug_mat.rst index 5fd163053..d4cef8f23 100644 --- a/doc/user_guide/ug_mat.rst +++ b/doc/user_guide/ug_mat.rst @@ -13,7 +13,7 @@ Images Load an image from a file: :: Mat img = imread(filename) - + If you read a jpg file, a 3 channel image is created by default. If you need a grayscale image, use: :: Mat img = imread(filename, 0); @@ -23,14 +23,14 @@ If you read a jpg file, a 3 channel image is created by default. If you need a g Save an image to a file: :: imwrite(filename, img); - + .. note:: format of the file is determined by its extension. .. note:: use ``imdecode`` and ``imencode`` to read and write image from/to memory rather than a file. XML/YAML -------- - + TBD Basic operations with images @@ -71,7 +71,7 @@ There are functions in OpenCV, especially from calib3d module, such as ``project //... fill the array Mat pointsMat = Mat(points); -One can access a point in this matrix using the same method ``Mat::at`` : +One can access a point in this matrix using the same method ``Mat::at`` : :: @@ -87,7 +87,7 @@ Memory management and reference counting // .. fill the array Mat pointsMat = Mat(points).reshape(1); -As a result we get a 32FC1 matrix with 3 columns instead of 32FC3 matrix with 1 column. ``pointsMat`` uses data from ``points`` and will not deallocate the memory when destroyed. In this particular instance, however, developer has to make sure that lifetime of ``points`` is longer than of ``pointsMat``. +As a result we get a 32FC1 matrix with 3 columns instead of 32FC3 matrix with 1 column. ``pointsMat`` uses data from ``points`` and will not deallocate the memory when destroyed. In this particular instance, however, developer has to make sure that lifetime of ``points`` is longer than of ``pointsMat``. If we need to copy the data, this is done using, for example, ``Mat::copyTo`` or ``Mat::clone``: :: Mat img = imread("image.jpg"); @@ -117,7 +117,7 @@ A convertion from ``Mat`` to C API data structures: :: IplImage img1 = img; CvMat m = img; -Note that there is no data copying here. +Note that there is no data copying here. Conversion from color to grey scale: :: diff --git a/doc/user_guide/ug_traincascade.rst b/doc/user_guide/ug_traincascade.rst index cb5190bc4..601f50438 100644 --- a/doc/user_guide/ug_traincascade.rst +++ b/doc/user_guide/ug_traincascade.rst @@ -6,7 +6,7 @@ Cascade Classifier Training Introduction ============ -The work with a cascade classifier inlcudes two major stages: training and detection. +The work with a cascade classifier inlcudes two major stages: training and detection. Detection stage is described in a documentation of ``objdetect`` module of general OpenCV documentation. Documentation gives some basic information about cascade classifier. Current guide is describing how to train a cascade classifier: preparation of a training data and running the training application. @@ -18,10 +18,10 @@ There are two applications in OpenCV to train cascade classifier: ``opencv_haart Note that ``opencv_traincascade`` application can use TBB for multi-threading. To use it in multicore mode OpenCV must be built with TBB. -Also there are some auxilary utilities related to the training. +Also there are some auxilary utilities related to the training. * ``opencv_createsamples`` is used to prepare a training dataset of positive and test samples. ``opencv_createsamples`` produces dataset of positive samples in a format that is supported by both ``opencv_haartraining`` and ``opencv_traincascade`` applications. The output is a file with \*.vec extension, it is a binary format which contains images. - + * ``opencv_performance`` may be used to evaluate the quality of classifiers, but for trained by ``opencv_haartraining`` only. It takes a collection of marked up images, runs the classifier and reports the performance, i.e. number of found objects, number of missed objects, number of false alarms and other information. Since ``opencv_haartraining`` is an obsolete application, only ``opencv_traincascade`` will be described futher. ``opencv_createsamples`` utility is needed to prepare a training data for ``opencv_traincascade``, so it will be described too. @@ -36,7 +36,7 @@ Negative Samples Negative samples are taken from arbitrary images. These images must not contain detected objects. Negative samples are enumerated in a special file. It is a text file in which each line contains an image filename (relative to the directory of the description file) of negative sample image. This file must be created manually. Note that negative samples and sample images are also called background samples or background samples images, and are used interchangeably in this document. Described images may be of different sizes. But each image should be (but not nessesarily) larger then a training window size, because these images are used to subsample negative image to the training size. An example of description file: - + Directory structure: .. code-block:: text @@ -45,14 +45,14 @@ Directory structure: img1.jpg img2.jpg bg.txt - + File bg.txt: .. code-block:: text img/img1.jpg img/img2.jpg - + Positive Samples ---------------- Positive samples are created by ``opencv_createsamples`` utility. They may be created from a single image with object or from a collection of previously marked up images. @@ -66,37 +66,37 @@ Command line arguments: * ``-vec `` Name of the output file containing the positive samples for training. - + * ``-img `` Source object image (e.g., a company logo). - + * ``-bg `` Background description file; contains a list of images which are used as a background for randomly distorted versions of the object. * ``-num `` - + Number of positive samples to generate. - + * ``-bgcolor `` Background color (currently grayscale images are assumed); the background color denotes the transparent color. Since there might be compression artifacts, the amount of color tolerance can be specified by ``-bgthresh``. All pixels withing ``bgcolor-bgthresh`` and ``bgcolor+bgthresh`` range are interpreted as transparent. - + * ``-bgthresh `` * ``-inv`` - + If specified, colors will be inverted. - + * ``-randinv`` If specified, colors will be inverted randomly. - + * ``-maxidev `` - + Maximal intensity deviation of pixels in foreground samples. - + * ``-maxxangle `` * ``-maxyangle `` @@ -104,15 +104,15 @@ Command line arguments: * ``-maxzangle `` Maximum rotation angles must be given in radians. - + * ``-show`` Useful debugging option. If specified, each sample will be shown. Pressing ``Esc`` will continue the samples creation process without. - + * ``-w `` Width (in pixels) of the output samples. - + * ``-h `` Height (in pixels) of the output samples. @@ -123,7 +123,7 @@ The source image is rotated randomly around all three axes. The chosen angle is Positive samples also may be obtained from a collection of previously marked up images. This collection is described by a text file similar to background description file. Each line of this file corresponds to an image. The first element of the line is the filename. It is followed by the number of object instances. The following numbers are the coordinates of objects bounding rectangles (x, y, width, height). An example of description file: - + Directory structure: .. code-block:: text @@ -132,27 +132,27 @@ Directory structure: img1.jpg img2.jpg info.dat - + File info.dat: .. code-block:: text - + img/img1.jpg 1 140 100 45 45 img/img2.jpg 2 100 200 50 50 50 30 25 25 - + Image img1.jpg contains single object instance with the following coordinates of bounding rectangle: (140, 100, 45, 45). Image img2.jpg contains two object instances. - + In order to create positive samples from such collection, ``-info`` argument should be specified instead of ``-img``: * ``-info `` Description file of marked up images collection. - + The scheme of samples creation in this case is as follows. The object instances are taken from images. Then they are resized to target samples size and stored in output vec-file. No distortion is applied, so the only affecting arguments are ``-w``, ``-h``, ``-show`` and ``-num``. - + ``opencv_createsamples`` utility may be used for examining samples stored in positive samples file. In order to do this only ``-vec``, ``-w`` and ``-h`` parameters should be specified. - -Note that for training, it does not matter how vec-files with positive samples are generated. But ``opencv_createsamples`` utility is the only one way to collect/create a vector file of positive samples, provided by OpenCV. + +Note that for training, it does not matter how vec-files with positive samples are generated. But ``opencv_createsamples`` utility is the only one way to collect/create a vector file of positive samples, provided by OpenCV. Example of vec-file is available here ``opencv/data/vec_files/trainingfaces_24-24.vec``. It can be used to train a face detector with the following window size: ``-w 24 -h 24``. @@ -165,99 +165,99 @@ Command line arguments of ``opencv_traincascade`` application grouped by purpose #. Common arguments: - + * ``-data `` - + Where the trained classifier should be stored. - + * ``-vec `` - + vec-file with positive samples (created by ``opencv_createsamples`` utility). - + * ``-bg `` - + Background description file. - + * ``-numPos `` - + * ``-numNeg `` - + Number of positive/negative samples used in training for every classifier stage. - + * ``-numStages `` - + Number of cascade stages to be trained. - + * ``-precalcValBufSize `` - + Size of buffer for precalculated feature values (in Mb). - + * ``-precalcIdxBufSize `` - + Size of buffer for precalculated feature indices (in Mb). The more memory you have the faster the training process. - + * ``-baseFormatSave`` - + This argument is actual in case of Haar-like features. If it is specified, the cascade will be saved in the old format. - + #. Cascade parameters: * ``-stageType `` - + Type of stages. Only boosted classifier are supported as a stage type at the moment. - + * ``-featureType<{HAAR(default), LBP}>`` - + Type of features: ``HAAR`` - Haar-like features, ``LBP`` - local binary patterns. - + * ``-w `` - + * ``-h `` - + Size of training samples (in pixels). Must have exactly the same values as used during training samples creation (``opencv_createsamples`` utility). - + #. Boosted classifer parameters: - + * ``-bt <{DAB, RAB, LB, GAB(default)}>`` - + Type of boosted classifiers: ``DAB`` - Discrete AdaBoost, ``RAB`` - Real AdaBoost, ``LB`` - LogitBoost, ``GAB`` - Gentle AdaBoost. - + * ``-minHitRate `` - + Minimal desired hit rate for each stage of the classifier. Overall hit rate may be estimated as (min_hit_rate^number_of_stages). - + * ``-maxFalseAlarmRate `` - + Maximal desired false alarm rate for each stage of the classifier. Overall false alarm rate may be estimated as (max_false_alarm_rate^number_of_stages). - + * ``-weightTrimRate `` - + Specifies whether trimming should be used and its weight. A decent choice is 0.95. - + * ``-maxDepth `` - + Maximal depth of a weak tree. A decent choice is 1, that is case of stumps. - + * ``-maxWeakCount `` - + Maximal count of weak trees for every cascade stage. The boosted classifier (stage) will have so many weak trees (``<=maxWeakCount``), as needed to achieve the given ``-maxFalseAlarmRate``. - + #. Haar-like feature parameters: - + * ``-mode `` - + Selects the type of Haar features set used in training. ``BASIC`` use only upright features, while ``ALL`` uses the full set of upright and 45 degree rotated feature set. See [Rainer2002]_ for more details. - -#. + +#. Local Binary Patterns parameters: - + Local Binary Patterns don't have parameters. After the ``opencv_traincascade`` application has finished its work, the trained cascade will be saved in cascade.xml file in the folder, which was passed as ``-data`` parameter. Other files in this folder are created for the case of interrupted training, so you may delete them after completion of training. diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt index 1d5909616..ed3b85a8f 100644 --- a/include/CMakeLists.txt +++ b/include/CMakeLists.txt @@ -5,5 +5,3 @@ install(FILES ${old_hdrs} install(FILES "opencv2/opencv.hpp" DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2 COMPONENT main) - - diff --git a/include/opencv/cv.h b/include/opencv/cv.h index f9831cf5c..77d0971a2 100644 --- a/include/opencv/cv.h +++ b/include/opencv/cv.h @@ -80,4 +80,3 @@ #endif //__cplusplus #endif // __OPENCV_OLD_CV_H_ - diff --git a/include/opencv/cxeigen.hpp b/include/opencv/cxeigen.hpp index c503f713d..1f04d1a3a 100644 --- a/include/opencv/cxeigen.hpp +++ b/include/opencv/cxeigen.hpp @@ -46,4 +46,3 @@ #include "opencv2/core/eigen.hpp" #endif - diff --git a/modules/androidcamera/camera_wrapper/camera_wrapper.cpp b/modules/androidcamera/camera_wrapper/camera_wrapper.cpp index 2d0ebc7a1..ca631fc21 100644 --- a/modules/androidcamera/camera_wrapper/camera_wrapper.cpp +++ b/modules/androidcamera/camera_wrapper/camera_wrapper.cpp @@ -1,5 +1,8 @@ -#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1) && !defined(ANDROID_r4_0_0) && !defined(ANDROID_r4_0_3) && !defined(ANDROID_r4_1_1) && !defined(ANDROID_r4_2_0) -# error Building camera wrapper for your version of Android is not supported by OpenCV. You need to modify OpenCV sources in order to compile camera wrapper for your version of Android. +#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1) && \ + !defined(ANDROID_r4_0_0) && !defined(ANDROID_r4_0_3) && !defined(ANDROID_r4_1_1) && \ + !defined(ANDROID_r4_2_0) && !defined(ANDROID_r4_3_0) +# error Building camera wrapper for your version of Android is not supported by OpenCV.\ + You need to modify OpenCV sources in order to compile camera wrapper for your version of Android. #endif #include @@ -16,17 +19,18 @@ //Include SurfaceTexture.h file with the SurfaceTexture class # include # define MAGIC_OPENCV_TEXTURE_ID (0x10) -#else // defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) -//TODO: This is either 2.2 or 2.3. Include the headers for ISurface.h access -#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) -#include -#include +#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) +# include +# include +#elif defined(ANDROID_r4_3_0) +# include +# include #else # include -#endif // defined(ANDROID_r4_1_1) -#endif // defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) +#endif #include +#include //undef logging macro from /system/core/libcutils/loghack.h #ifdef LOGD @@ -45,7 +49,6 @@ # undef LOGE #endif - // LOGGING #include #define CAMERA_LOG_TAG "OpenCV_NativeCamera" @@ -60,7 +63,7 @@ using namespace android; void debugShowFPS(); -#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) +#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) class ConsumerListenerStub: public BufferQueue::ConsumerListener { public: @@ -73,6 +76,29 @@ public: }; #endif +std::string getProcessName() +{ + std::string result; + std::ifstream f; + + f.open("/proc/self/cmdline"); + if (f.is_open()) + { + std::string fullPath; + std::getline(f, fullPath, '\0'); + if (!fullPath.empty()) + { + int i = fullPath.size()-1; + while ((i >= 0) && (fullPath[i] != '/')) i--; + result = fullPath.substr(i+1, std::string::npos); + } + } + + f.close(); + + return result; +} + void debugShowFPS() { static int mFrameCount = 0; @@ -280,7 +306,7 @@ public: } virtual void postData(int32_t msgType, const sp& dataPtr - #if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) + #if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) ,camera_frame_metadata_t* #endif ) @@ -361,7 +387,9 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, typedef sp (*Android22ConnectFuncType)(); typedef sp (*Android23ConnectFuncType)(int); typedef sp (*Android3DConnectFuncType)(int, int); + typedef sp (*Android43ConnectFuncType)(int, const String16&, int); + const int ANY_CAMERA_INDEX = -1; const int BACK_CAMERA_INDEX = 99; const int FRONT_CAMERA_INDEX = 98; @@ -372,14 +400,24 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, CAMERA_SUPPORT_MODE_ZSL = 0x08 /* Camera Sensor supports ZSL mode. */ }; + // used for Android 4.3 + enum { + USE_CALLING_UID = -1 + }; + const char Android22ConnectName[] = "_ZN7android6Camera7connectEv"; const char Android23ConnectName[] = "_ZN7android6Camera7connectEi"; const char Android3DConnectName[] = "_ZN7android6Camera7connectEii"; + const char Android43ConnectName[] = "_ZN7android6Camera7connectEiRKNS_8String16Ei"; int localCameraIndex = cameraId; + if (cameraId == ANY_CAMERA_INDEX) + { + localCameraIndex = 0; + } #if !defined(ANDROID_r2_2_0) - if (cameraId == BACK_CAMERA_INDEX) + else if (cameraId == BACK_CAMERA_INDEX) { LOGD("Back camera selected"); for (int i = 0; i < Camera::getNumberOfCameras(); i++) @@ -450,6 +488,12 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, LOGD("Connecting to CameraService v 3D"); camera = Android3DConnect(localCameraIndex, CAMERA_SUPPORT_MODE_2D); } + else if (Android43ConnectFuncType Android43Connect = (Android43ConnectFuncType)dlsym(CameraHALHandle, Android43ConnectName)) + { + std::string currentProcName = getProcessName(); + LOGD("Current process name for camera init: %s", currentProcName.c_str()); + camera = Android43Connect(localCameraIndex, String16(currentProcName.c_str()), USE_CALLING_UID); + } else { dlclose(CameraHALHandle); @@ -471,7 +515,7 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, handler->camera = camera; handler->cameraId = localCameraIndex; - if (prevCameraParameters != 0) + if (prevCameraParameters != NULL) { LOGI("initCameraConnect: Setting paramers from previous camera handler"); camera->setParameters(prevCameraParameters->flatten()); @@ -503,11 +547,11 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, const char* available_focus_modes = handler->params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES); if (available_focus_modes != 0) { - if (strstr(available_focus_modes, "continuous-video") != NULL) - { - handler->params.set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO); + if (strstr(available_focus_modes, "continuous-video") != NULL) + { + handler->params.set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO); - status_t resParams = handler->camera->setParameters(handler->params.flatten()); + status_t resParams = handler->camera->setParameters(handler->params.flatten()); if (resParams != 0) { @@ -517,8 +561,8 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, { LOGD("initCameraConnect: autofocus is set to mode \"continuous-video\""); } + } } - } #endif //check if yuv420sp format available. Set this format as preview format. @@ -560,26 +604,25 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, } } - status_t pdstatus; + status_t bufferStatus; #if defined(ANDROID_r2_2_0) - pdstatus = camera->setPreviewDisplay(sp(0 /*new DummySurface*/)); - if (pdstatus != 0) - LOGE("initCameraConnect: failed setPreviewDisplay(0) call; camera migth not work correctly on some devices"); + bufferStatus = camera->setPreviewDisplay(sp(0 /*new DummySurface*/)); + if (bufferStatus != 0) + LOGE("initCameraConnect: failed setPreviewDisplay(0) call (status %d); camera might not work correctly on some devices", bufferStatus); #elif defined(ANDROID_r2_3_3) /* Do nothing in case of 2.3 for now */ - #elif defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) sp surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID); - pdstatus = camera->setPreviewTexture(surfaceTexture); - if (pdstatus != 0) - LOGE("initCameraConnect: failed setPreviewTexture call; camera migth not work correctly"); -#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) + bufferStatus = camera->setPreviewTexture(surfaceTexture); + if (bufferStatus != 0) + LOGE("initCameraConnect: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus); +#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) sp bufferQueue = new BufferQueue(); sp queueListener = new ConsumerListenerStub(); bufferQueue->consumerConnect(queueListener); - pdstatus = camera->setPreviewTexture(bufferQueue); - if (pdstatus != 0) - LOGE("initCameraConnect: failed setPreviewTexture call; camera migth not work correctly"); + bufferStatus = camera->setPreviewTexture(bufferQueue); + if (bufferStatus != 0) + LOGE("initCameraConnect: failed setPreviewTexture call; camera might not work correctly"); #endif #if (defined(ANDROID_r2_2_0) || defined(ANDROID_r2_3_3) || defined(ANDROID_r3_0_1)) @@ -595,9 +638,9 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, #endif //!(defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)) LOGD("Starting preview"); - status_t resStart = camera->startPreview(); + status_t previewStatus = camera->startPreview(); - if (resStart != 0) + if (previewStatus != 0) { LOGE("initCameraConnect: startPreview() fails. Closing camera connection..."); handler->closeCameraConnect(); @@ -605,7 +648,7 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, } else { - LOGD("Preview started successfully"); + LOGD("Preview started successfully"); } return handler; @@ -620,9 +663,11 @@ void CameraHandler::closeCameraConnect() } camera->stopPreview(); +#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) + camera->setPreviewCallbackFlags(CAMERA_FRAME_CALLBACK_FLAG_NOOP); +#endif camera->disconnect(); camera.clear(); - camera=NULL; // ATTENTION!!!!!!!!!!!!!!!!!!!!!!!!!! // When we set @@ -863,14 +908,60 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler) if (*ppcameraHandler == 0) { - LOGE("applyProperties: Passed null *ppcameraHandler"); + LOGE("applyProperties: Passed NULL *ppcameraHandler"); return; } - LOGD("CameraHandler::applyProperties()"); - CameraHandler* previousCameraHandler=*ppcameraHandler; - CameraParameters curCameraParameters(previousCameraHandler->params.flatten()); + CameraParameters curCameraParameters((*ppcameraHandler)->params.flatten()); +#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) + CameraHandler* handler=*ppcameraHandler; + + handler->camera->stopPreview(); + handler->camera->setPreviewCallbackFlags(CAMERA_FRAME_CALLBACK_FLAG_NOOP); + + status_t reconnectStatus = handler->camera->reconnect(); + if (reconnectStatus != 0) + { + LOGE("applyProperties: failed to reconnect camera (status %d)", reconnectStatus); + return; + } + + handler->camera->setParameters(curCameraParameters.flatten()); + handler->params.unflatten(curCameraParameters.flatten()); + + status_t bufferStatus; +# if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) + sp surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID); + bufferStatus = handler->camera->setPreviewTexture(surfaceTexture); + if (bufferStatus != 0) + LOGE("applyProperties: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus); +# elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) + sp bufferQueue = new BufferQueue(); + sp queueListener = new ConsumerListenerStub(); + bufferQueue->consumerConnect(queueListener); + bufferStatus = handler->camera->setPreviewTexture(bufferQueue); + if (bufferStatus != 0) + LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly"); +# endif + + handler->camera->setPreviewCallbackFlags( CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK | CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK);//with copy + + LOGD("Starting preview"); + status_t previewStatus = handler->camera->startPreview(); + + if (previewStatus != 0) + { + LOGE("initCameraConnect: startPreview() fails. Closing camera connection..."); + handler->closeCameraConnect(); + handler = NULL; + } + else + { + LOGD("Preview started successfully"); + } +#else + CameraHandler* previousCameraHandler=*ppcameraHandler; CameraCallback cameraCallback=previousCameraHandler->cameraCallback; void* userData=previousCameraHandler->userData; int cameraId=previousCameraHandler->cameraId; @@ -879,7 +970,6 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler) previousCameraHandler->closeCameraConnect(); LOGD("CameraHandler::applyProperties(): after previousCameraHandler->closeCameraConnect"); - LOGD("CameraHandler::applyProperties(): before initCameraConnect"); CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, &curCameraParameters); LOGD("CameraHandler::applyProperties(): after initCameraConnect, handler=0x%x", (int)handler); @@ -892,6 +982,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler) } } (*ppcameraHandler)=handler; +#endif } diff --git a/modules/androidcamera/camera_wrapper/camera_wrapper.h b/modules/androidcamera/camera_wrapper/camera_wrapper.h index 88c9c4100..430dea2dd 100644 --- a/modules/androidcamera/camera_wrapper/camera_wrapper.h +++ b/modules/androidcamera/camera_wrapper/camera_wrapper.h @@ -14,4 +14,3 @@ double getCameraPropertyC(void* camera, int propIdx); void setCameraPropertyC(void* camera, int propIdx, double value); void applyCameraPropertiesC(void** camera); } - diff --git a/modules/androidcamera/include/camera_activity.hpp b/modules/androidcamera/include/camera_activity.hpp index 76a63b06e..a6a62e767 100644 --- a/modules/androidcamera/include/camera_activity.hpp +++ b/modules/androidcamera/include/camera_activity.hpp @@ -45,4 +45,4 @@ private: int frameHeight; }; -#endif \ No newline at end of file +#endif diff --git a/modules/androidcamera/src/camera_activity.cpp b/modules/androidcamera/src/camera_activity.cpp index 508159b17..84db3e1f3 100644 --- a/modules/androidcamera/src/camera_activity.cpp +++ b/modules/androidcamera/src/camera_activity.cpp @@ -431,14 +431,14 @@ void CameraActivity::applyProperties() int CameraActivity::getFrameWidth() { if (frameWidth <= 0) - frameWidth = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH); + frameWidth = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH); return frameWidth; } int CameraActivity::getFrameHeight() { if (frameHeight <= 0) - frameHeight = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT); + frameHeight = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT); return frameHeight; } diff --git a/modules/calib3d/doc/calib3d.rst b/modules/calib3d/doc/calib3d.rst index 0ac8d5196..0c056843d 100644 --- a/modules/calib3d/doc/calib3d.rst +++ b/modules/calib3d/doc/calib3d.rst @@ -6,4 +6,3 @@ calib3d. Camera Calibration and 3D Reconstruction :maxdepth: 2 camera_calibration_and_3d_reconstruction - diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index b90fba7e1..37159b016 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -105,7 +105,16 @@ The functions below use the above model to do the following: * Estimate the relative position and orientation of the stereo camera "heads" and compute the *rectification* transformation that makes the camera optical axes parallel. +.. note:: + * A calibration sample for 3 cameras in horizontal position can be found at opencv_source_code/samples/cpp/3calibration.cpp + * A calibration sample based on a sequence of images can be found at opencv_source_code/samples/cpp/calibration.cpp + * A calibration sample in order to do 3D reconstruction can be found at opencv_source_code/samples/cpp/build3dmodel.cpp + * A calibration sample of an artificially generated camera and chessboard patterns can be found at opencv_source_code/samples/cpp/calibration_artificial.cpp + * A calibration example on stereo calibration can be found at opencv_source_code/samples/cpp/stereo_calib.cpp + * A calibration example on stereo matching can be found at opencv_source_code/samples/cpp/stereo_match.cpp + + * (Python) A camera calibration sample can be found at opencv_source_code/samples/python2/calibrate.py calibrateCamera --------------- @@ -579,7 +588,9 @@ Finds an object pose from 3D-2D point correspondences. The function estimates the object pose given a set of object points, their corresponding image projections, as well as the camera matrix and the distortion coefficients. +.. note:: + * An example of how to use solvePNP for planar augmented reality can be found at opencv_source_code/samples/python2/plane_ar.py solvePnPRansac ------------------ @@ -766,6 +777,9 @@ Homography matrix is determined up to a scale. Thus, it is normalized so that :ocv:func:`warpPerspective`, :ocv:func:`perspectiveTransform` +.. note:: + + * A example on calculating a homography for image matching can be found at opencv_source_code/samples/cpp/video_homography.cpp estimateAffine3D -------------------- @@ -1072,6 +1086,9 @@ Class for computing stereo correspondence using the block matching algorithm. :: The class is a C++ wrapper for the associated functions. In particular, :ocv:funcx:`StereoBM::operator()` is the wrapper for :ocv:cfunc:`cvFindStereoCorrespondenceBM`. +.. Sample code: + + (Ocl) An example for using the stereoBM matching algorithm can be found at opencv_source_code/samples/ocl/stereo_match.cpp StereoBM::StereoBM ------------------ @@ -1171,7 +1188,9 @@ The class implements the modified H. Hirschmuller algorithm [HH08]_ that differs * Some pre- and post- processing steps from K. Konolige algorithm :ocv:funcx:`StereoBM::operator()` are included, for example: pre-filtering (``CV_STEREO_BM_XSOBEL`` type) and post-filtering (uniqueness check, quadratic interpolation and speckle filtering). +.. note:: + * (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found at opencv_source_code/samples/python2/stereo_match.py StereoSGBM::StereoSGBM -------------------------- @@ -1462,7 +1481,7 @@ Reconstructs points by triangulation. :param points4D: 4xN array of reconstructed points in homogeneous coordinates. -The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from :ocv:func:`stereoRectify`. +The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from :ocv:func:`stereoRectify`. .. seealso:: diff --git a/modules/calib3d/perf/perf_precomp.cpp b/modules/calib3d/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d4..000000000 --- a/modules/calib3d/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/calib3d/src/_modelest.h b/modules/calib3d/src/_modelest.h index 2488a934b..4aef46edf 100644 --- a/modules/calib3d/src/_modelest.h +++ b/modules/calib3d/src/_modelest.h @@ -78,4 +78,3 @@ protected: }; #endif // _CV_MODEL_EST_H_ - diff --git a/modules/calib3d/src/epnp.cpp b/modules/calib3d/src/epnp.cpp index 7f4782ce0..30786f946 100644 --- a/modules/calib3d/src/epnp.cpp +++ b/modules/calib3d/src/epnp.cpp @@ -622,4 +622,3 @@ void epnp::qr_solve(CvMat * A, CvMat * b, CvMat * X) pX[i] = (pb[i] - sum) / A2[i]; } } - diff --git a/modules/calib3d/src/p3p.cpp b/modules/calib3d/src/p3p.cpp index a02da3ecb..674df2c8a 100644 --- a/modules/calib3d/src/p3p.cpp +++ b/modules/calib3d/src/p3p.cpp @@ -413,4 +413,3 @@ bool p3p::jacobi_4x4(double * A, double * D, double * U) return false; } - diff --git a/modules/calib3d/src/p3p.h b/modules/calib3d/src/p3p.h index a58b0fa14..57f8d7df8 100644 --- a/modules/calib3d/src/p3p.h +++ b/modules/calib3d/src/p3p.h @@ -59,4 +59,3 @@ class p3p }; #endif // P3P_H - diff --git a/modules/calib3d/src/precomp.cpp b/modules/calib3d/src/precomp.cpp deleted file mode 100644 index 3e0ec42de..000000000 --- a/modules/calib3d/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/calib3d/src/precomp.hpp b/modules/calib3d/src/precomp.hpp index 9b1f433ad..e96ba533f 100644 --- a/modules/calib3d/src/precomp.hpp +++ b/modules/calib3d/src/precomp.hpp @@ -42,9 +42,7 @@ #ifndef __OPENCV_PRECOMP_H__ #define __OPENCV_PRECOMP_H__ -#ifdef HAVE_CVCONFIG_H #include "cvconfig.h" -#endif #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/modules/calib3d/src/solvepnp.cpp b/modules/calib3d/src/solvepnp.cpp index 3d2c0c2c4..b0ef1d983 100644 --- a/modules/calib3d/src/solvepnp.cpp +++ b/modules/calib3d/src/solvepnp.cpp @@ -346,4 +346,3 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints, } return; } - diff --git a/modules/calib3d/test/test_affine3d_estimator.cpp b/modules/calib3d/test/test_affine3d_estimator.cpp index eedfa687c..732400b91 100644 --- a/modules/calib3d/test/test_affine3d_estimator.cpp +++ b/modules/calib3d/test/test_affine3d_estimator.cpp @@ -193,4 +193,3 @@ void CV_Affine3D_EstTest::run( int /* start_from */) } TEST(Calib3d_EstimateAffineTransform, accuracy) { CV_Affine3D_EstTest test; test.safe_run(); } - diff --git a/modules/calib3d/test/test_cameracalibration_badarg.cpp b/modules/calib3d/test/test_cameracalibration_badarg.cpp index b805e71a4..b5ce3fca6 100644 --- a/modules/calib3d/test/test_cameracalibration_badarg.cpp +++ b/modules/calib3d/test/test_cameracalibration_badarg.cpp @@ -734,5 +734,3 @@ protected: TEST(Calib3d_CalibrateCamera_C, badarg) { CV_CameraCalibrationBadArgTest test; test.safe_run(); } TEST(Calib3d_Rodrigues_C, badarg) { CV_Rodrigues2BadArgTest test; test.safe_run(); } TEST(Calib3d_ProjectPoints_C, badarg) { CV_ProjectPoints2BadArgTest test; test.safe_run(); } - - diff --git a/modules/calib3d/test/test_chessboardgenerator.cpp b/modules/calib3d/test/test_chessboardgenerator.cpp index d8ec943a4..01a4c019c 100644 --- a/modules/calib3d/test/test_chessboardgenerator.cpp +++ b/modules/calib3d/test/test_chessboardgenerator.cpp @@ -329,4 +329,3 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const return generateChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, squareSize.width, squareSize.height, pts3d, corners); } - diff --git a/modules/calib3d/test/test_compose_rt.cpp b/modules/calib3d/test/test_compose_rt.cpp index b71288e6d..577cc0627 100644 --- a/modules/calib3d/test/test_compose_rt.cpp +++ b/modules/calib3d/test/test_compose_rt.cpp @@ -212,4 +212,3 @@ protected: }; TEST(Calib3d_ComposeRT, accuracy) { CV_composeRT_Test test; test.safe_run(); } - diff --git a/modules/calib3d/test/test_precomp.cpp b/modules/calib3d/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3..000000000 --- a/modules/calib3d/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/calib3d/test/test_precomp.hpp b/modules/calib3d/test/test_precomp.hpp index e0fcd486d..76381b825 100644 --- a/modules/calib3d/test/test_precomp.hpp +++ b/modules/calib3d/test/test_precomp.hpp @@ -22,4 +22,3 @@ namespace cvtest } #endif - diff --git a/modules/calib3d/test/test_reproject_image_to_3d.cpp b/modules/calib3d/test/test_reproject_image_to_3d.cpp index a93804f74..0c03831c9 100644 --- a/modules/calib3d/test/test_reproject_image_to_3d.cpp +++ b/modules/calib3d/test/test_reproject_image_to_3d.cpp @@ -172,4 +172,3 @@ protected: }; TEST(Calib3d_ReprojectImageTo3D, accuracy) { CV_ReprojectImageTo3DTest test; test.safe_run(); } - diff --git a/modules/calib3d/test/test_solvepnp_ransac.cpp b/modules/calib3d/test/test_solvepnp_ransac.cpp index 3377a57c6..6c924a580 100644 --- a/modules/calib3d/test/test_solvepnp_ransac.cpp +++ b/modules/calib3d/test/test_solvepnp_ransac.cpp @@ -303,4 +303,4 @@ TEST(DISABLED_Calib3d_SolvePnPRansac, concurrency) EXPECT_LT(tnorm, 1e-6); } -#endif \ No newline at end of file +#endif diff --git a/modules/calib3d/test/test_undistort_points.cpp b/modules/calib3d/test/test_undistort_points.cpp index c1ace3d4d..5dabd213d 100644 --- a/modules/calib3d/test/test_undistort_points.cpp +++ b/modules/calib3d/test/test_undistort_points.cpp @@ -94,4 +94,4 @@ void CV_UndistortTest::run(int /* start_from */) } } -TEST(Calib3d_Undistort, accuracy) { CV_UndistortTest test; test.safe_run(); } \ No newline at end of file +TEST(Calib3d_Undistort, accuracy) { CV_UndistortTest test; test.safe_run(); } diff --git a/modules/contrib/doc/facerec/facerec_api.rst b/modules/contrib/doc/facerec/facerec_api.rst index 8bea7070a..9e8170d48 100644 --- a/modules/contrib/doc/facerec/facerec_api.rst +++ b/modules/contrib/doc/facerec/facerec_api.rst @@ -3,6 +3,12 @@ FaceRecognizer .. highlight:: cpp +.. Sample code:: + + * An example using the FaceRecognizer class can be found at opencv_source_code/samples/cpp/facerec_demo.cpp + + * (Python) An example using the FaceRecognizer class can be found at opencv_source_code/samples/python2/facerec_demo.py + FaceRecognizer -------------- diff --git a/modules/contrib/doc/facerec/facerec_changelog.rst b/modules/contrib/doc/facerec/facerec_changelog.rst index fc8b1aded..107135818 100644 --- a/modules/contrib/doc/facerec/facerec_changelog.rst +++ b/modules/contrib/doc/facerec/facerec_changelog.rst @@ -4,19 +4,19 @@ Changelog Release 0.05 ------------ -This library is now included in the official OpenCV distribution (from 2.4 on). +This library is now included in the official OpenCV distribution (from 2.4 on). The :ocv:class`FaceRecognizer` is now an :ocv:class:`Algorithm`, which better fits into the overall -OpenCV API. +OpenCV API. -To reduce the confusion on user side and minimize my work, libfacerec and OpenCV -have been synchronized and are now based on the same interfaces and implementation. +To reduce the confusion on user side and minimize my work, libfacerec and OpenCV +have been synchronized and are now based on the same interfaces and implementation. The library now has an extensive documentation: * The API is explained in detail and with a lot of code examples. -* The face recognition guide I had written for Python and GNU Octave/MATLAB has been adapted to the new OpenCV C++ ``cv::FaceRecognizer``. +* The face recognition guide I had written for Python and GNU Octave/MATLAB has been adapted to the new OpenCV C++ ``cv::FaceRecognizer``. * A tutorial for gender classification with Fisherfaces. -* A tutorial for face recognition in videos (e.g. webcam). +* A tutorial for face recognition in videos (e.g. webcam). Release highlights @@ -27,8 +27,8 @@ Release highlights Release 0.04 ------------ -This version is fully Windows-compatible and works with OpenCV 2.3.1. Several -bugfixes, but none influenced the recognition rate. +This version is fully Windows-compatible and works with OpenCV 2.3.1. Several +bugfixes, but none influenced the recognition rate. Release highlights ++++++++++++++++++ @@ -40,9 +40,9 @@ Release highlights Release 0.03 ------------ -Reworked the library to provide separate implementations in cpp files, because -it's the preferred way of contributing OpenCV libraries. This means the library -is not header-only anymore. Slight API changes were done, please see the +Reworked the library to provide separate implementations in cpp files, because +it's the preferred way of contributing OpenCV libraries. This means the library +is not header-only anymore. Slight API changes were done, please see the documentation for details. Release highlights @@ -55,9 +55,9 @@ Release highlights Release 0.02 ------------ -Reworked the library to provide separate implementations in cpp files, because -it's the preferred way of contributing OpenCV libraries. This means the library -is not header-only anymore. Slight API changes were done, please see the +Reworked the library to provide separate implementations in cpp files, because +it's the preferred way of contributing OpenCV libraries. This means the library +is not header-only anymore. Slight API changes were done, please see the documentation for details. Release highlights @@ -80,7 +80,7 @@ Release highlights * Eigenfaces [TP91]_ * Fisherfaces [BHK97]_ * Local Binary Patterns Histograms [AHP04]_ - + * Added persistence facilities to store the models with a common API. * Unit Tests (using `gtest `_). * Providing a CMakeLists.txt to enable easy cross-platform building. diff --git a/modules/contrib/doc/facerec/facerec_tutorial.rst b/modules/contrib/doc/facerec/facerec_tutorial.rst index 61cd882da..cbfb41797 100644 --- a/modules/contrib/doc/facerec/facerec_tutorial.rst +++ b/modules/contrib/doc/facerec/facerec_tutorial.rst @@ -201,7 +201,7 @@ For the first source code example, I'll go through it with you. I am first givin .. literalinclude:: src/facerec_eigenfaces.cpp :language: cpp :linenos: - + The source code for this demo application is also available in the ``src`` folder coming with this documentation: * :download:`src/facerec_eigenfaces.cpp ` @@ -626,5 +626,3 @@ CSV for the AT&T Facedatabase .. literalinclude:: etc/at.txt :language: none :linenos: - - diff --git a/modules/contrib/doc/facerec/index.rst b/modules/contrib/doc/facerec/index.rst index ce8a15e67..b871448c5 100644 --- a/modules/contrib/doc/facerec/index.rst +++ b/modules/contrib/doc/facerec/index.rst @@ -30,4 +30,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/modules/contrib/doc/facerec/src/CMakeLists.txt b/modules/contrib/doc/facerec/src/CMakeLists.txt index e56762ea4..94aa36fbe 100644 --- a/modules/contrib/doc/facerec/src/CMakeLists.txt +++ b/modules/contrib/doc/facerec/src/CMakeLists.txt @@ -23,4 +23,3 @@ target_link_libraries(facerec_fisherfaces opencv_contrib opencv_core opencv_imgp add_executable(facerec_lbph facerec_lbph.cpp) target_link_libraries(facerec_lbph opencv_contrib opencv_core opencv_imgproc opencv_highgui) - diff --git a/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst b/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst index 0c7398d2f..770083170 100644 --- a/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst +++ b/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst @@ -231,5 +231,3 @@ Here are some examples: +---------------------------------+----------------------------------------------------------------------------+ | 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg | +---------------------------------+----------------------------------------------------------------------------+ - - diff --git a/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst b/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst index faa77df7c..2d0b65dff 100644 --- a/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst +++ b/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst @@ -6,7 +6,7 @@ Introduction Saving and loading a :ocv:class:`FaceRecognizer` is very important. Training a FaceRecognizer can be a very time-intense task, plus it's often impossible to ship the whole face database to the user of your product. The task of saving and loading a FaceRecognizer is easy with :ocv:class:`FaceRecognizer`. You only have to call :ocv:func:`FaceRecognizer::load` for loading and :ocv:func:`FaceRecognizer::save` for saving a :ocv:class:`FaceRecognizer`. -I'll adapt the Eigenfaces example from the :doc:`../facerec_tutorial`: Imagine we want to learn the Eigenfaces of the `AT&T Facedatabase `_, store the model to a YAML file and then load it again. +I'll adapt the Eigenfaces example from the :doc:`../facerec_tutorial`: Imagine we want to learn the Eigenfaces of the `AT&T Facedatabase `_, store the model to a YAML file and then load it again. From the loaded model, we'll get a prediction, show the mean, Eigenfaces and the image reconstruction. @@ -44,4 +44,3 @@ And here is the Reconstruction, which is the same as the original: .. image:: ../img/eigenface_reconstruction_opencv.png :align: center - diff --git a/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst b/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst index ecb979d1a..76e76eebe 100644 --- a/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst +++ b/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst @@ -111,7 +111,7 @@ An example. If the haar-cascade is at ``C:/opencv/data/haarcascades/haarcascade_ facerec_video.exe C:/opencv/data/haarcascades/haarcascade_frontalface_default.xml C:/facerec/data/celebrities.txt 1 -That's it. +That's it. Results ------- @@ -205,5 +205,3 @@ Here are some examples: +---------------------------------+----------------------------------------------------------------------------+ | 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg | +---------------------------------+----------------------------------------------------------------------------+ - - diff --git a/modules/contrib/doc/openfabmap.rst b/modules/contrib/doc/openfabmap.rst index 2f2ad4074..e2f157a07 100644 --- a/modules/contrib/doc/openfabmap.rst +++ b/modules/contrib/doc/openfabmap.rst @@ -9,6 +9,10 @@ FAB-MAP is an approach to appearance-based place recognition. FAB-MAP compares i openFABMAP requires training data (e.g. a collection of images from a similar but not identical environment) to construct a visual vocabulary for the visual bag-of-words model, along with a Chow-Liu tree representation of feature likelihood and for use in the Sampled new place method (see below). +.. note:: + + * An example using the openFABMAP package can be found at opencv_source_code/samples/cpp/fabmap_sample.cpp + of2::FabMap -------------------- diff --git a/modules/contrib/doc/retina/index.rst b/modules/contrib/doc/retina/index.rst index a3a5bc82f..3d1f59c8e 100644 --- a/modules/contrib/doc/retina/index.rst +++ b/modules/contrib/doc/retina/index.rst @@ -63,6 +63,11 @@ The retina can be settled up with various parameters, by default, the retina can void activateContoursProcessing (const bool activate); }; +.. Sample code:: + + * An example on retina tone mapping can be found at opencv_source_code/samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp + * An example on retina tone mapping on video input can be found at opencv_source_code/samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp + * A complete example illustrating the retina interface can be found at opencv_source_code/samples/cpp/retinaDemo.cpp Description +++++++++++ diff --git a/modules/contrib/doc/stereo.rst b/modules/contrib/doc/stereo.rst index d5f2d0080..103bd0f3f 100644 --- a/modules/contrib/doc/stereo.rst +++ b/modules/contrib/doc/stereo.rst @@ -113,5 +113,3 @@ The method executes the variational algorithm on a rectified stereo pair. See `` **Note**: The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously. - - diff --git a/modules/contrib/include/opencv2/contrib/contrib.hpp b/modules/contrib/include/opencv2/contrib/contrib.hpp index f18a5f93c..7d881c359 100644 --- a/modules/contrib/include/opencv2/contrib/contrib.hpp +++ b/modules/contrib/include/opencv2/contrib/contrib.hpp @@ -983,4 +983,3 @@ namespace cv #endif #endif - diff --git a/modules/contrib/include/opencv2/contrib/retina.hpp b/modules/contrib/include/opencv2/contrib/retina.hpp index 6f660efca..3d7c847be 100644 --- a/modules/contrib/include/opencv2/contrib/retina.hpp +++ b/modules/contrib/include/opencv2/contrib/retina.hpp @@ -353,4 +353,3 @@ protected: } #endif /* __OPENCV_CONTRIB_RETINA_HPP__ */ - diff --git a/modules/contrib/src/adaptiveskindetector.cpp b/modules/contrib/src/adaptiveskindetector.cpp index a44c42077..0865ad70b 100644 --- a/modules/contrib/src/adaptiveskindetector.cpp +++ b/modules/contrib/src/adaptiveskindetector.cpp @@ -284,5 +284,3 @@ void CvAdaptiveSkinDetector::Histogram::mergeWith(CvAdaptiveSkinDetector::Histog } } }; - - diff --git a/modules/contrib/src/ba.cpp b/modules/contrib/src/ba.cpp index 80047877b..0e2afd95b 100644 --- a/modules/contrib/src/ba.cpp +++ b/modules/contrib/src/ba.cpp @@ -744,7 +744,7 @@ static void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, C CvMat* _mp = cvCreateMat(1, 1, CV_64FC2 ); //projection of the point //split camera params into different matrices - CvMat _ri, _ti, _k; + CvMat _ri, _ti, _k = cvMat(0, 0, CV_64F, NULL); // dummy initialization to fix warning of cl.exe cvGetRows( cam_params, &_ri, 0, 3 ); cvGetRows( cam_params, &_ti, 3, 6 ); diff --git a/modules/contrib/src/basicretinafilter.hpp b/modules/contrib/src/basicretinafilter.hpp index 439f0d15f..8bd136d68 100644 --- a/modules/contrib/src/basicretinafilter.hpp +++ b/modules/contrib/src/basicretinafilter.hpp @@ -653,5 +653,3 @@ namespace cv } #endif - - diff --git a/modules/contrib/src/bowmsctrainer.cpp b/modules/contrib/src/bowmsctrainer.cpp index 448505c5b..12d9781b2 100644 --- a/modules/contrib/src/bowmsctrainer.cpp +++ b/modules/contrib/src/bowmsctrainer.cpp @@ -136,4 +136,3 @@ Mat BOWMSCTrainer::cluster(const Mat& _descriptors) const { } } - diff --git a/modules/contrib/src/chowliutree.cpp b/modules/contrib/src/chowliutree.cpp index ba1ef6561..64417d1d9 100644 --- a/modules/contrib/src/chowliutree.cpp +++ b/modules/contrib/src/chowliutree.cpp @@ -287,4 +287,3 @@ bool ChowLiuTree::reduceEdgesToMinSpan(std::list& edges) { } } - diff --git a/modules/contrib/src/colortracker.cpp b/modules/contrib/src/colortracker.cpp index 03cdf07ae..dfa3ed5d0 100644 --- a/modules/contrib/src/colortracker.cpp +++ b/modules/contrib/src/colortracker.cpp @@ -133,5 +133,3 @@ Point2f CvMeanShiftTracker::getTrackingCenter() { return prev_center; } - - diff --git a/modules/contrib/src/contrib_init.cpp b/modules/contrib/src/contrib_init.cpp index a80f6f5e1..317867a59 100644 --- a/modules/contrib/src/contrib_init.cpp +++ b/modules/contrib/src/contrib_init.cpp @@ -41,4 +41,3 @@ //M*/ #include "precomp.hpp" - diff --git a/modules/contrib/src/detection_based_tracker.cpp b/modules/contrib/src/detection_based_tracker.cpp index a1e5dea4d..0b3fcce65 100644 --- a/modules/contrib/src/detection_based_tracker.cpp +++ b/modules/contrib/src/detection_based_tracker.cpp @@ -853,4 +853,3 @@ const DetectionBasedTracker::Parameters& DetectionBasedTracker::getParameters() } #endif - diff --git a/modules/contrib/src/facerec.cpp b/modules/contrib/src/facerec.cpp index fbf124a48..bc41a86a0 100644 --- a/modules/contrib/src/facerec.cpp +++ b/modules/contrib/src/facerec.cpp @@ -306,7 +306,7 @@ void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) { dynamic_cast(this)->update( src, labels ); return; } - + string error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str()); CV_Error(CV_StsNotImplemented, error_msg); } diff --git a/modules/contrib/src/featuretracker.cpp b/modules/contrib/src/featuretracker.cpp index 44d561052..b8d40deb0 100644 --- a/modules/contrib/src/featuretracker.cpp +++ b/modules/contrib/src/featuretracker.cpp @@ -59,6 +59,7 @@ CvFeatureTracker::CvFeatureTracker(CvFeatureTrackerParams _params) : dd->set("nOctaveLayers", 5); dd->set("contrastThreshold", 0.04); dd->set("edgeThreshold", 10.7); + break; case CvFeatureTrackerParams::SURF: dd = Algorithm::create("Feature2D.SURF"); if( dd.empty() ) @@ -66,8 +67,10 @@ CvFeatureTracker::CvFeatureTracker(CvFeatureTrackerParams _params) : dd->set("hessianThreshold", 400); dd->set("nOctaves", 3); dd->set("nOctaveLayers", 4); + break; default: CV_Error(CV_StsBadArg, "Unknown feature type"); + break; } matcher = new BFMatcher(NORM_L2); @@ -218,4 +221,3 @@ Point2f CvFeatureTracker::getTrackingCenter() center.y = (float)(prev_center.y + prev_trackwindow.height/2.0); return center; } - diff --git a/modules/contrib/src/fuzzymeanshifttracker.cpp b/modules/contrib/src/fuzzymeanshifttracker.cpp index c83f915b0..d221d49c0 100644 --- a/modules/contrib/src/fuzzymeanshifttracker.cpp +++ b/modules/contrib/src/fuzzymeanshifttracker.cpp @@ -720,4 +720,3 @@ void CvFuzzyMeanShiftTracker::track(IplImage *maskImage, IplImage *depthMap, int searchMode = tsTracking; } }; - diff --git a/modules/contrib/src/hybridtracker.cpp b/modules/contrib/src/hybridtracker.cpp index 362de7c04..0fd31f8f7 100644 --- a/modules/contrib/src/hybridtracker.cpp +++ b/modules/contrib/src/hybridtracker.cpp @@ -234,4 +234,3 @@ void CvHybridTracker::updateTrackerWithLowPassFilter(Mat) { Rect CvHybridTracker::getTrackingWindow() { return prev_window; } - diff --git a/modules/contrib/src/inputoutput.cpp b/modules/contrib/src/inputoutput.cpp index a711f242a..37510c666 100644 --- a/modules/contrib/src/inputoutput.cpp +++ b/modules/contrib/src/inputoutput.cpp @@ -1,5 +1,6 @@ #include "opencv2/contrib/contrib.hpp" +#include #if defined(WIN32) || defined(_WIN32) #include @@ -10,16 +11,27 @@ namespace cv { - std::vector Directory::GetListFiles( const std::string& path, const std::string & exten, bool addPath ) + std::vector Directory::GetListFiles( const std::string& path, const std::string & exten, bool addPath ) { std::vector list; list.clear(); std::string path_f = path + "/" + exten; #ifdef WIN32 - WIN32_FIND_DATA FindFileData; - HANDLE hFind; + #ifdef HAVE_WINRT + WIN32_FIND_DATAW FindFileData; + #else + WIN32_FIND_DATAA FindFileData; + #endif + HANDLE hFind; - hFind = FindFirstFile((LPCSTR)path_f.c_str(), &FindFileData); + #ifdef HAVE_WINRT + wchar_t wpath[MAX_PATH]; + size_t copied = mbstowcs(wpath, path_f.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0); + #else + hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData); + #endif if (hFind == INVALID_HANDLE_VALUE) { return list; @@ -34,13 +46,26 @@ namespace cv FindFileData.dwFileAttributes == FILE_ATTRIBUTE_SYSTEM || FindFileData.dwFileAttributes == FILE_ATTRIBUTE_READONLY) { + char* fname; + #ifdef HAVE_WINRT + char fname_tmp[MAX_PATH] = {0}; + size_t copied = wcstombs(fname_tmp, FindFileData.cFileName, MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + fname = fname_tmp; + #else + fname = FindFileData.cFileName; + #endif if (addPath) - list.push_back(path + "/" + FindFileData.cFileName); + list.push_back(path + "/" + std::string(fname)); else - list.push_back(FindFileData.cFileName); + list.push_back(std::string(fname)); } } - while(FindNextFile(hFind, &FindFileData)); + #ifdef HAVE_WINRT + while(FindNextFileW(hFind, &FindFileData)); + #else + while(FindNextFileA(hFind, &FindFileData)); + #endif FindClose(hFind); } #else @@ -75,10 +100,22 @@ namespace cv std::string path_f = path + "/" + exten; list.clear(); #ifdef WIN32 - WIN32_FIND_DATA FindFileData; + #ifdef HAVE_WINRT + WIN32_FIND_DATAW FindFileData; + #else + WIN32_FIND_DATAA FindFileData; + #endif HANDLE hFind; - hFind = FindFirstFile((LPCSTR)path_f.c_str(), &FindFileData); + #ifdef HAVE_WINRT + wchar_t wpath [MAX_PATH]; + size_t copied = mbstowcs(wpath, path_f.c_str(), path_f.size()); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + + hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0); + #else + hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData); + #endif if (hFind == INVALID_HANDLE_VALUE) { return list; @@ -87,17 +124,37 @@ namespace cv { do { +#ifdef HAVE_WINRT + if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY && + wcscmp(FindFileData.cFileName, L".") != 0 && + wcscmp(FindFileData.cFileName, L"..") != 0) +#else if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY && strcmp(FindFileData.cFileName, ".") != 0 && strcmp(FindFileData.cFileName, "..") != 0) +#endif { + char* fname; + #ifdef HAVE_WINRT + char fname_tmp[MAX_PATH]; + size_t copied = wcstombs(fname_tmp, FindFileData.cFileName, MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + fname = fname_tmp; + #else + fname = FindFileData.cFileName; + #endif + if (addPath) - list.push_back(path + "/" + FindFileData.cFileName); + list.push_back(path + "/" + std::string(fname)); else - list.push_back(FindFileData.cFileName); + list.push_back(std::string(fname)); } } - while(FindNextFile(hFind, &FindFileData)); + #ifdef HAVE_WINRT + while(FindNextFileW(hFind, &FindFileData)); + #else + while(FindNextFileA(hFind, &FindFileData)); + #endif FindClose(hFind); } diff --git a/modules/contrib/src/lda.cpp b/modules/contrib/src/lda.cpp index 5ff94ce6e..98c696612 100644 --- a/modules/contrib/src/lda.cpp +++ b/modules/contrib/src/lda.cpp @@ -1111,4 +1111,3 @@ Mat LDA::reconstruct(InputArray src) { } } - diff --git a/modules/contrib/src/logpolar_bsm.cpp b/modules/contrib/src/logpolar_bsm.cpp index 3de6a6182..b96eea803 100644 --- a/modules/contrib/src/logpolar_bsm.cpp +++ b/modules/contrib/src/logpolar_bsm.cpp @@ -649,4 +649,3 @@ LogPolar_Adjacent::~LogPolar_Adjacent() } } - diff --git a/modules/contrib/src/magnoretinafilter.cpp b/modules/contrib/src/magnoretinafilter.cpp index 6f72c5bbe..57a4466a3 100644 --- a/modules/contrib/src/magnoretinafilter.cpp +++ b/modules/contrib/src/magnoretinafilter.cpp @@ -207,5 +207,3 @@ const std::valarray &MagnoRetinaFilter::runFilter(const std::valarrayactivateContoursProcessing(activate);} } // end of namespace cv - diff --git a/modules/contrib/src/retinacolor.hpp b/modules/contrib/src/retinacolor.hpp index 056d006e4..7b7294442 100644 --- a/modules/contrib/src/retinacolor.hpp +++ b/modules/contrib/src/retinacolor.hpp @@ -338,5 +338,3 @@ namespace cv } #endif /*RETINACOLOR_HPP_*/ - - diff --git a/modules/contrib/src/retinafilter.hpp b/modules/contrib/src/retinafilter.hpp index 7fa2a078c..572ae4c25 100644 --- a/modules/contrib/src/retinafilter.hpp +++ b/modules/contrib/src/retinafilter.hpp @@ -543,7 +543,3 @@ private: } #endif /*RETINACLASSES_H_*/ - - - - diff --git a/modules/contrib/src/stereovar.cpp b/modules/contrib/src/stereovar.cpp index 1b542bbf5..e08d6328b 100644 --- a/modules/contrib/src/stereovar.cpp +++ b/modules/contrib/src/stereovar.cpp @@ -408,4 +408,4 @@ void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp ) u.release(); } -} // namespace \ No newline at end of file +} // namespace diff --git a/modules/contrib/src/templatebuffer.hpp b/modules/contrib/src/templatebuffer.hpp index c49e49358..e50cd442c 100644 --- a/modules/contrib/src/templatebuffer.hpp +++ b/modules/contrib/src/templatebuffer.hpp @@ -550,6 +550,3 @@ namespace cv } #endif - - - diff --git a/modules/contrib/test/test_precomp.cpp b/modules/contrib/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3..000000000 --- a/modules/contrib/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/contrib/test/test_precomp.hpp b/modules/contrib/test/test_precomp.hpp index d477eddbb..ea73ce932 100644 --- a/modules/contrib/test/test_precomp.hpp +++ b/modules/contrib/test/test_precomp.hpp @@ -14,4 +14,3 @@ #include #endif - diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt index dc62a884f..fe13daabb 100644 --- a/modules/core/CMakeLists.txt +++ b/modules/core/CMakeLists.txt @@ -2,6 +2,10 @@ set(the_description "The Core Functionality") ocv_add_module(core ${ZLIB_LIBRARIES}) ocv_module_include_directories(${ZLIB_INCLUDE_DIR}) +if (HAVE_WINRT) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW /GS /Gm- /AI\"${WINDOWS_SDK_PATH}/References/CommonConfiguration/Neutral\" /AI\"${VISUAL_STUDIO_PATH}/vcpackages\"") +endif() + if(HAVE_CUDA) ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/include") ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef) diff --git a/modules/core/doc/basic_structures.rst b/modules/core/doc/basic_structures.rst index 370587922..16d7e500a 100644 --- a/modules/core/doc/basic_structures.rst +++ b/modules/core/doc/basic_structures.rst @@ -49,6 +49,43 @@ Point\_ ------- .. ocv:class:: Point_ +:: + + template class CV_EXPORTS Point_ + { + public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! cross-product + double cross(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates + }; + Template class for 2D points specified by its coordinates :math:`x` and :math:`y` . @@ -84,6 +121,39 @@ Point3\_ -------- .. ocv:class:: Point3_ +:: + + template class CV_EXPORTS Point3_ + { + public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates + }; + Template class for 3D points specified by its coordinates :math:`x`, :math:`y` and @@ -100,6 +170,35 @@ Size\_ ------ .. ocv:class:: Size_ +:: + + template class CV_EXPORTS Size_ + { + public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height + }; + Template class for specifying the size of an image or rectangle. The class includes two members called ``width`` and ``height``. The structure can be converted to and from the old OpenCV structures ``CvSize`` and ``CvSize2D32f`` . The same set of arithmetic and comparison operations as for ``Point_`` is available. @@ -113,6 +212,43 @@ Rect\_ ------ .. ocv:class:: Rect_ +:: + + template class CV_EXPORTS Rect_ + { + public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle + }; + Template class for 2D rectangles, described by the following parameters: * Coordinates of the top-left corner. This is a default interpretation of ``Rect_::x`` and ``Rect_::y`` in OpenCV. Though, in your algorithms you may count ``x`` and ``y`` from the bottom-left corner. @@ -171,6 +307,28 @@ RotatedRect ----------- .. ocv:class:: RotatedRect +:: + + class CV_EXPORTS RotatedRect + { + public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& center, const Size2f& size, float angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. + }; + The class represents rotated (i.e. not up-right) rectangles on a plane. Each rectangle is specified by the center point (mass center), length of each side (represented by cv::Size2f structure) and the rotation angle in degrees. .. ocv:function:: RotatedRect::RotatedRect() @@ -219,7 +377,33 @@ TermCriteria ------------ .. ocv:class:: TermCriteria - The class defining termination criteria for iterative algorithms. You can initialize it by default constructor and then override any parameters, or the structure may be fully initialized using the advanced variant of the constructor. +:: + + class CV_EXPORTS TermCriteria + { + public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int type, int maxCount, double epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion to CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy + }; + +The class defining termination criteria for iterative algorithms. You can initialize it by default constructor and then override any parameters, or the structure may be fully initialized using the advanced variant of the constructor. TermCriteria::TermCriteria -------------------------- @@ -330,9 +514,36 @@ Scalar\_ -------- .. ocv:class:: Scalar_ -Template class for a 4-element vector derived from Vec. :: +Template class for a 4-element vector derived from Vec. - template class Scalar_ : public Vec<_Tp, 4> { ... }; +:: + + template class CV_EXPORTS Scalar_ : public Vec<_Tp, 4> + { + public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; + }; typedef Scalar_ Scalar; @@ -342,12 +553,21 @@ Range ----- .. ocv:class:: Range -Template class specifying a continuous subsequence (slice) of a sequence. :: +Template class specifying a continuous subsequence (slice) of a sequence. - class Range +:: + + class CV_EXPORTS Range { public: - ... + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + int start, end; }; @@ -536,8 +756,8 @@ Ptr::operator -> ---------------- Provide access to the object fields and methods. - .. ocv:function:: template _Tp* Ptr::operator -> () - .. ocv:function:: template const _Tp* Ptr::operator -> () const +.. ocv:function:: template _Tp* Ptr::operator -> () +.. ocv:function:: template const _Tp* Ptr::operator -> () const Ptr::operator _Tp* @@ -545,15 +765,16 @@ Ptr::operator _Tp* Returns the underlying object pointer. Thanks to the methods, the ``Ptr<_Tp>`` can be used instead of ``_Tp*``. - .. ocv:function:: template Ptr::operator _Tp* () - .. ocv:function:: template Ptr::operator const _Tp*() const +.. ocv:function:: template Ptr::operator _Tp* () +.. ocv:function:: template Ptr::operator const _Tp*() const Mat --- .. ocv:class:: Mat -OpenCV C++ n-dimensional dense array class :: +OpenCV C++ n-dimensional dense array class +:: class CV_EXPORTS Mat { @@ -583,7 +804,6 @@ OpenCV C++ n-dimensional dense array class :: ... }; - The class ``Mat`` represents an n-dimensional dense numerical single-channel or multi-channel array. It can be used to store real or complex-valued vectors and matrices, grayscale or color images, voxel volumes, vector fields, point clouds, tensors, histograms (though, very high-dimensional histograms may be better stored in a ``SparseMat`` ). The data layout of the array :math:`M` is defined by the array ``M.step[]``, so that the address of element :math:`(i_0,...,i_{M.dims-1})`, where @@ -803,6 +1023,9 @@ Finally, there are STL-style iterators that are smart enough to skip gaps betwee The matrix iterators are random-access iterators, so they can be passed to any STL algorithm, including ``std::sort()`` . +.. note:: + + * An example demonstrating the serial out capabilities of cv::Mat can be found at opencv_source_code/samples/cpp/cout_mat.cpp .. _MatrixExpressions: @@ -1069,7 +1292,7 @@ The method makes a new header for the specified row span of the matrix. Similarl Mat::colRange ------------- -Creates a matrix header for the specified row span. +Creates a matrix header for the specified column span. .. ocv:function:: Mat Mat::colRange(int startcol, int endcol) const @@ -2439,6 +2662,82 @@ Algorithm --------- .. ocv:class:: Algorithm +:: + + class CV_EXPORTS_W Algorithm + { + public: + Algorithm(); + virtual ~Algorithm(); + string name() const; + + template typename ParamType<_Tp>::member_type get(const string& name) const; + template typename ParamType<_Tp>::member_type get(const char* name) const; + + CV_WRAP int getInt(const string& name) const; + CV_WRAP double getDouble(const string& name) const; + CV_WRAP bool getBool(const string& name) const; + CV_WRAP string getString(const string& name) const; + CV_WRAP Mat getMat(const string& name) const; + CV_WRAP vector getMatVector(const string& name) const; + CV_WRAP Ptr getAlgorithm(const string& name) const; + + void set(const string& name, int value); + void set(const string& name, double value); + void set(const string& name, bool value); + void set(const string& name, const string& value); + void set(const string& name, const Mat& value); + void set(const string& name, const vector& value); + void set(const string& name, const Ptr& value); + template void set(const string& name, const Ptr<_Tp>& value); + + CV_WRAP void setInt(const string& name, int value); + CV_WRAP void setDouble(const string& name, double value); + CV_WRAP void setBool(const string& name, bool value); + CV_WRAP void setString(const string& name, const string& value); + CV_WRAP void setMat(const string& name, const Mat& value); + CV_WRAP void setMatVector(const string& name, const vector& value); + CV_WRAP void setAlgorithm(const string& name, const Ptr& value); + template void setAlgorithm(const string& name, const Ptr<_Tp>& value); + + void set(const char* name, int value); + void set(const char* name, double value); + void set(const char* name, bool value); + void set(const char* name, const string& value); + void set(const char* name, const Mat& value); + void set(const char* name, const vector& value); + void set(const char* name, const Ptr& value); + template void set(const char* name, const Ptr<_Tp>& value); + + void setInt(const char* name, int value); + void setDouble(const char* name, double value); + void setBool(const char* name, bool value); + void setString(const char* name, const string& value); + void setMat(const char* name, const Mat& value); + void setMatVector(const char* name, const vector& value); + void setAlgorithm(const char* name, const Ptr& value); + template void setAlgorithm(const char* name, const Ptr<_Tp>& value); + + CV_WRAP string paramHelp(const string& name) const; + int paramType(const char* name) const; + CV_WRAP int paramType(const string& name) const; + CV_WRAP void getParams(CV_OUT vector& names) const; + + + virtual void write(FileStorage& fs) const; + virtual void read(const FileNode& fn); + + typedef Algorithm* (*Constructor)(void); + typedef int (Algorithm::*Getter)() const; + typedef void (Algorithm::*Setter)(int); + + CV_WRAP static void getList(CV_OUT vector& algorithms); + CV_WRAP static Ptr _create(const string& name); + template static Ptr<_Tp> create(const string& name); + + virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; } + }; + This is a base class for all more or less complex algorithms in OpenCV, especially for classes of algorithms, for which there can be multiple implementations. The examples are stereo correspondence (for which there are algorithms like block matching, semi-global block matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck etc.). The class provides the following features for all derived classes: @@ -2593,4 +2892,3 @@ The above methods are usually enough for users. If you want to make your own alg * Add public virtual method ``AlgorithmInfo* info() const;`` to your class. * Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take http://code.opencv.org/projects/opencv/repository/revisions/master/entry/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters. * Add some public function (e.g. ``initModule_()``) that calls info() of your algorithm and put it into the same source file as ``info()`` implementation. This is to force C++ linker to include this object file into the target application. See ``Algorithm::create()`` for details. - diff --git a/modules/core/doc/clustering.rst b/modules/core/doc/clustering.rst index f58e99ce2..0f9fa6cf8 100644 --- a/modules/core/doc/clustering.rst +++ b/modules/core/doc/clustering.rst @@ -66,6 +66,12 @@ Basically, you can use only the core of the function, set the number of attempts to 1, initialize labels each time using a custom algorithm, pass them with the ( ``flags`` = ``KMEANS_USE_INITIAL_LABELS`` ) flag, and then choose the best (most-compact) clustering. +.. note:: + + * An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp + + * (Python) An example on K-means clustering can be found at opencv_source_code/samples/python2/kmeans.py + partition ------------- Splits an element set into equivalency classes. @@ -74,8 +80,8 @@ Splits an element set into equivalency classes. :param vec: Set of elements stored as a vector. - :param labels: Output vector of labels. It contains as many elements as ``vec``. Each label ``labels[i]`` is a 0-based cluster index of ``vec[i]`` . - + :param labels: Output vector of labels. It contains as many elements as ``vec``. Each label ``labels[i]`` is a 0-based cluster index of ``vec[i]`` . + :param predicate: Equivalence predicate (pointer to a boolean function of two arguments or an instance of the class that has the method ``bool operator()(const _Tp& a, const _Tp& b)`` ). The predicate returns ``true`` when the elements are certainly in the same class, and returns ``false`` if they may or may not be in the same class. The generic function ``partition`` implements an diff --git a/modules/core/doc/core.rst b/modules/core/doc/core.rst index 7eb4e3e63..c4fb195b0 100644 --- a/modules/core/doc/core.rst +++ b/modules/core/doc/core.rst @@ -14,4 +14,4 @@ core. The Core Functionality old_xml_yaml_persistence clustering utility_and_system_functions_and_macros - + opengl_interop diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst index 342301db9..b277e01d2 100644 --- a/modules/core/doc/drawing_functions.rst +++ b/modules/core/doc/drawing_functions.rst @@ -26,6 +26,10 @@ If a drawn figure is partially or completely outside the image, the drawing func .. note:: The functions do not support alpha-transparency when the target image is 4-channel. In this case, the ``color[3]`` is simply copied to the repainted pixels. Thus, if you want to paint semi-transparent shapes, you can paint them in a separate buffer and then blend it with the main image. +.. note:: + + * An example on using variate drawing functions like line, rectangle, ... can be found at opencv_source_code/samples/cpp/drawing.cpp + circle ---------- Draws a circle. @@ -412,8 +416,8 @@ The number of pixels along the line is stored in ``LineIterator::count`` . The m for(int i = 0; i < it.count; i++, ++it) buf[i] = *(const Vec3b)*it; - - // alternative way of iterating through the line + + // alternative way of iterating through the line for(int i = 0; i < it2.count; i++, ++it2) { Vec3b val = img.at(it2.pos()); @@ -527,4 +531,3 @@ The function ``putText`` renders the specified text string in the image. Symbols that cannot be rendered using the specified font are replaced by question marks. See :ocv:func:`getTextSize` for a text rendering code example. - diff --git a/modules/core/doc/dynamic_structures.rst b/modules/core/doc/dynamic_structures.rst index e333971c0..d56ee494e 100644 --- a/modules/core/doc/dynamic_structures.rst +++ b/modules/core/doc/dynamic_structures.rst @@ -1584,4 +1584,3 @@ Gathers all node pointers to a single sequence. :param storage: Container for the sequence The function puts pointers of all nodes reachable from ``first`` into a single sequence. The pointers are written sequentially in the depth-first order. - diff --git a/modules/core/doc/intro.rst b/modules/core/doc/intro.rst index 0f8a3b0d5..ae95b57ad 100644 --- a/modules/core/doc/intro.rst +++ b/modules/core/doc/intro.rst @@ -91,8 +91,8 @@ you can use:: Ptr ptr = new T(...); -That is, ``Ptr ptr`` encapsulates a pointer to a ``T`` instance and a reference counter associated with the pointer. See the -:ocv:class:`Ptr` +That is, ``Ptr ptr`` encapsulates a pointer to a ``T`` instance and a reference counter associated with the pointer. See the +:ocv:class:`Ptr` description for details. .. _AutomaticAllocation: diff --git a/modules/core/doc/old_basic_structures.rst b/modules/core/doc/old_basic_structures.rst index 0596e04bf..c8de17ada 100644 --- a/modules/core/doc/old_basic_structures.rst +++ b/modules/core/doc/old_basic_structures.rst @@ -1751,4 +1751,3 @@ For example, `NumPy `_ arrays support the array interfa (480, 640, 3) 1 .. note:: In the new Python wrappers (**cv2** module) the function is not needed, since cv2 can process Numpy arrays (and this is the only supported array type). - diff --git a/modules/core/doc/opengl_interop.rst b/modules/core/doc/opengl_interop.rst new file mode 100644 index 000000000..e0db36aec --- /dev/null +++ b/modules/core/doc/opengl_interop.rst @@ -0,0 +1,543 @@ +OpenGL interoperability +======================= + +.. highlight:: cpp + + + +General Information +------------------- +This section describes OpenGL interoperability. + +To enable OpenGL support, configure OpenCV using ``CMake`` with ``WITH_OPENGL=ON`` . +Currently OpenGL is supported only with WIN32, GTK and Qt backends on Windows and Linux (MacOS and Android are not supported). +For GTK backend ``gtkglext-1.0`` library is required. + +To use OpenGL functionality you should first create OpenGL context (window or frame buffer). +You can do this with :ocv:func:`namedWindow` function or with other OpenGL toolkit (GLUT, for example). + + + +ogl::Buffer +----------- +Smart pointer for OpenGL buffer object with reference counting. + +.. ocv:class:: ogl::Buffer + +Buffer Objects are OpenGL objects that store an array of unformatted memory allocated by the OpenGL context. +These can be used to store vertex data, pixel data retrieved from images or the framebuffer, and a variety of other things. + +``ogl::Buffer`` has interface similar with :ocv:class:`Mat` interface and represents 2D array memory. + +``ogl::Buffer`` supports memory transfers between host and device and also can be mapped to CUDA memory. + + + +ogl::Buffer::Target +------------------- +The target defines how you intend to use the buffer object. + +.. ocv:enum:: ogl::Buffer::Target + + .. ocv:emember:: ARRAY_BUFFER + + The buffer will be used as a source for vertex data. + + .. ocv:emember:: ELEMENT_ARRAY_BUFFER + + The buffer will be used for indices (in ``glDrawElements`` or :ocv:func:`ogl::render`, for example). + + .. ocv:emember:: PIXEL_PACK_BUFFER + + The buffer will be used for reading from OpenGL textures. + + .. ocv:emember:: PIXEL_UNPACK_BUFFER + + The buffer will be used for writing to OpenGL textures. + + + +ogl::Buffer::Buffer +------------------- +The constructors. + +.. ocv:function:: ogl::Buffer::Buffer() + +.. ocv:function:: ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false) + +.. ocv:function:: ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false) + +.. ocv:function:: ogl::Buffer::Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false) + +.. ocv:function:: ogl::Buffer::Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false) + +.. ocv:function:: ogl::Buffer::Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) + + :param arows: Number of rows in a 2D array. + + :param acols: Number of columns in a 2D array. + + :param asize: 2D array size. + + :param atype: Array type ( ``CV_8UC1, ..., CV_64FC4`` ). See :ocv:class:`Mat` for details. + + :param abufId: Buffer object name. + + :param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or ``std::vector`` ). + + :param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` . + + :param autoRelease: Auto release mode (if true, release will be called in object's destructor). + +Creates empty ``ogl::Buffer`` object, creates ``ogl::Buffer`` object from existed buffer ( ``abufId`` parameter), +allocates memory for ``ogl::Buffer`` object or copies from host/device memory. + + + +ogl::Buffer::create +------------------- +Allocates memory for ``ogl::Buffer`` object. + +.. ocv:function:: void ogl::Buffer::create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false) + +.. ocv:function:: void ogl::Buffer::create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false) + + :param arows: Number of rows in a 2D array. + + :param acols: Number of columns in a 2D array. + + :param asize: 2D array size. + + :param atype: Array type ( ``CV_8UC1, ..., CV_64FC4`` ). See :ocv:class:`Mat` for details. + + :param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` . + + :param autoRelease: Auto release mode (if true, release will be called in object's destructor). + + + +ogl::Buffer::release +-------------------- +Decrements the reference counter and destroys the buffer object if needed. + +.. ocv:function:: void ogl::Buffer::release() + + + +ogl::Buffer::setAutoRelease +--------------------------- +Sets auto release mode. + +.. ocv:function:: void ogl::Buffer::setAutoRelease(bool flag) + + :param flag: Auto release mode (if true, release will be called in object's destructor). + +The lifetime of the OpenGL object is tied to the lifetime of the context. +If OpenGL context was bound to a window it could be released at any time (user can close a window). +If object's destructor is called after destruction of the context it will cause an error. +Thus ``ogl::Buffer`` doesn't destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context). +This function can force ``ogl::Buffer`` destructor to destroy OpenGL object. + + + +ogl::Buffer::copyFrom +--------------------- +Copies from host/device memory to OpenGL buffer. + +.. ocv:function:: void ogl::Buffer::copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) + + :param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or ``std::vector`` ). + + :param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` . + + :param autoRelease: Auto release mode (if true, release will be called in object's destructor). + + + +ogl::Buffer::copyTo +------------------- +Copies from OpenGL buffer to host/device memory or another OpenGL buffer object. + +.. ocv:function:: void ogl::Buffer::copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const + + :param arr: Destination array (host or device memory, can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` , ``std::vector`` or ``ogl::Buffer`` ). + + :param target: Buffer usage for destination buffer (if ``arr`` is OpenGL buffer). + + :param autoRelease: Auto release mode for destination buffer (if ``arr`` is OpenGL buffer). + + + +ogl::Buffer::clone +------------------ +Creates a full copy of the buffer object and the underlying data. + +.. ocv:function:: Buffer ogl::Buffer::clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const + + :param target: Buffer usage for destination buffer. + + :param autoRelease: Auto release mode for destination buffer. + + + +ogl::Buffer::bind +----------------- +Binds OpenGL buffer to the specified buffer binding point. + +.. ocv:function:: void ogl::Buffer::bind(Target target) const + + :param target: Binding point. See :ocv:enum:`ogl::Buffer::Target` . + + + +ogl::Buffer::unbind +------------------- +Unbind any buffers from the specified binding point. + +.. ocv:function:: static void ogl::Buffer::unbind(Target target) + + :param target: Binding point. See :ocv:enum:`ogl::Buffer::Target` . + + + +ogl::Buffer::mapHost +-------------------- +Maps OpenGL buffer to host memory. + +.. ocv:function:: Mat ogl::Buffer::mapHost(Access access) + + :param access: Access policy, indicating whether it will be possible to read from, write to, or both read from and write to the buffer object's mapped data store. The symbolic constant must be ``ogl::Buffer::READ_ONLY`` , ``ogl::Buffer::WRITE_ONLY`` or ``ogl::Buffer::READ_WRITE`` . + +``mapHost`` maps to the client's address space the entire data store of the buffer object. +The data can then be directly read and/or written relative to the returned pointer, depending on the specified ``access`` policy. + +A mapped data store must be unmapped with :ocv:func:`ogl::Buffer::unmapHost` before its buffer object is used. + +This operation can lead to memory transfers between host and device. + +Only one buffer object can be mapped at a time. + + + +ogl::Buffer::unmapHost +---------------------- +Unmaps OpenGL buffer. + +.. ocv:function:: void ogl::Buffer::unmapHost() + + + +ogl::Buffer::mapDevice +---------------------- +Maps OpenGL buffer to CUDA device memory. + +.. ocv:function:: gpu::GpuMat ogl::Buffer::mapDevice() + +This operatation doesn't copy data. +Several buffer objects can be mapped to CUDA memory at a time. + +A mapped data store must be unmapped with :ocv:func:`ogl::Buffer::unmapDevice` before its buffer object is used. + + + +ogl::Buffer::unmapDevice +------------------------ +Unmaps OpenGL buffer. + +.. ocv:function:: void ogl::Buffer::unmapDevice() + + + +ogl::Texture2D +-------------- +Smart pointer for OpenGL 2D texture memory with reference counting. + +.. ocv:class:: ogl::Texture2D + + + +ogl::Texture2D::Format +---------------------- +An Image Format describes the way that the images in Textures store their data. + +.. ocv:enum:: ogl::Texture2D::Format + + .. ocv:emember:: NONE + .. ocv:emember:: DEPTH_COMPONENT + .. ocv:emember:: RGB + .. ocv:emember:: RGBA + + + +ogl::Texture2D::Texture2D +------------------------- +The constructors. + +.. ocv:function:: ogl::Texture2D::Texture2D() + +.. ocv:function:: ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false) + +.. ocv:function:: ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false) + +.. ocv:function:: ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, bool autoRelease = false) + +.. ocv:function:: ogl::Texture2D::Texture2D(Size asize, Format aformat, bool autoRelease = false) + +.. ocv:function:: ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease = false) + + :param arows: Number of rows. + + :param acols: Number of columns. + + :param asize: 2D array size. + + :param aformat: Image format. See :ocv:enum:`ogl::Texture2D::Format` . + + :param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or :ocv:class:`ogl::Buffer` ). + + :param autoRelease: Auto release mode (if true, release will be called in object's destructor). + +Creates empty ``ogl::Texture2D`` object, allocates memory for ``ogl::Texture2D`` object or copies from host/device memory. + + + +ogl::Texture2D::create +---------------------- +Allocates memory for ``ogl::Texture2D`` object. + +.. ocv:function:: void ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease = false) + +.. ocv:function:: void ogl::Texture2D::create(Size asize, Format aformat, bool autoRelease = false) + + :param arows: Number of rows. + + :param acols: Number of columns. + + :param asize: 2D array size. + + :param aformat: Image format. See :ocv:enum:`ogl::Texture2D::Format` . + + :param autoRelease: Auto release mode (if true, release will be called in object's destructor). + + + +ogl::Texture2D::release +----------------------- +Decrements the reference counter and destroys the texture object if needed. + +.. ocv:function:: void ogl::Texture2D::release() + + + +ogl::Texture2D::setAutoRelease +------------------------------ +Sets auto release mode. + +.. ocv:function:: void ogl::Texture2D::setAutoRelease(bool flag) + + :param flag: Auto release mode (if true, release will be called in object's destructor). + +The lifetime of the OpenGL object is tied to the lifetime of the context. +If OpenGL context was bound to a window it could be released at any time (user can close a window). +If object's destructor is called after destruction of the context it will cause an error. +Thus ``ogl::Texture2D`` doesn't destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context). +This function can force ``ogl::Texture2D`` destructor to destroy OpenGL object. + + + +ogl::Texture2D::copyFrom +------------------------ +Copies from host/device memory to OpenGL texture. + +.. ocv:function:: void ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease = false) + + :param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or :ocv:class:`ogl::Buffer` ). + + :param autoRelease: Auto release mode (if true, release will be called in object's destructor). + + + +ogl::Texture2D::copyTo +---------------------- +Copies from OpenGL texture to host/device memory or another OpenGL texture object. + +.. ocv:function:: void ogl::Texture2D::copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const + + :param arr: Destination array (host or device memory, can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` , :ocv:class:`ogl::Buffer` or ``ogl::Texture2D`` ). + + :param ddepth: Destination depth. + + :param autoRelease: Auto release mode for destination buffer (if ``arr`` is OpenGL buffer or texture). + + + +ogl::Texture2D::bind +-------------------- +Binds texture to current active texture unit for ``GL_TEXTURE_2D`` target. + +.. ocv:function:: void ogl::Texture2D::bind() const + + + +ogl::Arrays +----------- +Wrapper for OpenGL Client-Side Vertex arrays. + +.. ocv:class:: ogl::Arrays + +``ogl::Arrays`` stores vertex data in :ocv:class:`ogl::Buffer` objects. + + + +ogl::Arrays::setVertexArray +--------------------------- +Sets an array of vertex coordinates. + +.. ocv:function:: void ogl::Arrays::setVertexArray(InputArray vertex) + + :param vertex: array with vertex coordinates, can be both host and device memory. + + + +ogl::Arrays::resetVertexArray +----------------------------- +Resets vertex coordinates. + +.. ocv:function:: void ogl::Arrays::resetVertexArray() + + + +ogl::Arrays::setColorArray +-------------------------- +Sets an array of vertex colors. + +.. ocv:function:: void ogl::Arrays::setColorArray(InputArray color) + + :param color: array with vertex colors, can be both host and device memory. + + + +ogl::Arrays::resetColorArray +---------------------------- +Resets vertex colors. + +.. ocv:function:: void ogl::Arrays::resetColorArray() + + + +ogl::Arrays::setNormalArray +--------------------------- +Sets an array of vertex normals. + +.. ocv:function:: void ogl::Arrays::setNormalArray(InputArray normal) + + :param normal: array with vertex normals, can be both host and device memory. + + + +ogl::Arrays::resetNormalArray +----------------------------- +Resets vertex normals. + +.. ocv:function:: void ogl::Arrays::resetNormalArray() + + + +ogl::Arrays::setTexCoordArray +----------------------------- +Sets an array of vertex texture coordinates. + +.. ocv:function:: void ogl::Arrays::setTexCoordArray(InputArray texCoord) + + :param texCoord: array with vertex texture coordinates, can be both host and device memory. + + + +ogl::Arrays::resetTexCoordArray +------------------------------- +Resets vertex texture coordinates. + +.. ocv:function:: void ogl::Arrays::resetTexCoordArray() + + + +ogl::Arrays::release +-------------------- +Releases all inner buffers. + +.. ocv:function:: void ogl::Arrays::release() + + + +ogl::Arrays::setAutoRelease +--------------------------- +Sets auto release mode all inner buffers. + +.. ocv:function:: void ogl::Arrays::setAutoRelease(bool flag) + + :param flag: Auto release mode. + + + +ogl::Arrays::bind +----------------- +Binds all vertex arrays. + +.. ocv:function:: void ogl::Arrays::bind() const + + + +ogl::Arrays::size +----------------- +Returns the vertex count. + +.. ocv:function:: int ogl::Arrays::size() const + + + +ogl::render +----------- +Render OpenGL texture or primitives. + +.. ocv:function:: void ogl::render(const Texture2D& tex, Rect_ wndRect = Rect_(0.0, 0.0, 1.0, 1.0), Rect_ texRect = Rect_(0.0, 0.0, 1.0, 1.0)) + +.. ocv:function:: void ogl::render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255)) + +.. ocv:function:: void ogl::render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255)) + + :param tex: Texture to draw. + + :param wndRect: Region of window, where to draw a texture (normalized coordinates). + + :param texRect: Region of texture to draw (normalized coordinates). + + :param arr: Array of privitives vertices. + + :param indices: Array of vertices indices (host or device memory). + + :param mode: Render mode. Available options: + + * **POINTS** + * **LINES** + * **LINE_LOOP** + * **LINE_STRIP** + * **TRIANGLES** + * **TRIANGLE_STRIP** + * **TRIANGLE_FAN** + * **QUADS** + * **QUAD_STRIP** + * **POLYGON** + + :param color: Color for all vertices. Will be used if ``arr`` doesn't contain color array. + + + +gpu::setGlDevice +---------------- +Sets a CUDA device and initializes it for the current thread with OpenGL interoperability. + +.. ocv:function:: void gpu::setGlDevice( int device = 0 ) + + :param device: System index of a GPU device starting with 0. + +This function should be explicitly called after OpenGL context creation and before any CUDA calls. diff --git a/modules/core/doc/operations_on_arrays.rst b/modules/core/doc/operations_on_arrays.rst index bd55993af..a312818da 100644 --- a/modules/core/doc/operations_on_arrays.rst +++ b/modules/core/doc/operations_on_arrays.rst @@ -532,7 +532,7 @@ Performs the per-element comparison of two arrays or an array and scalar value. :param value: scalar value. - :param dst: output array that has the same size as the input arrays and type= ``CV_8UC1`` . + :param dst: output array that has the same size and type as the input arrays. :param cmpop: a flag, that specifies correspondence between the arrays: @@ -997,6 +997,12 @@ All of the above improvements have been implemented in :ocv:func:`matchTemplate` .. seealso:: :ocv:func:`dct` , :ocv:func:`getOptimalDFTSize` , :ocv:func:`mulSpectrums`, :ocv:func:`filter2D` , :ocv:func:`matchTemplate` , :ocv:func:`flip` , :ocv:func:`cartToPolar` , :ocv:func:`magnitude` , :ocv:func:`phase` +.. note:: + + * An example using the discrete fourier transform can be found at opencv_source_code/samples/cpp/dft.cpp + + * (Python) An example using the dft functionality to perform Wiener deconvolution can be found at opencv_source/samples/python2/deconvolution.py + * (Python) An example rearranging the quadrants of a Fourier image can be found at opencv_source/samples/python2/dft.py divide @@ -2262,7 +2268,9 @@ The sample below is the function that takes two matrices. The first function sto :ocv:func:`dft`, :ocv:func:`dct` +.. note:: + * An example using PCA for dimensionality reduction while maintaining an amount of variance can be found at opencv_source_code/samples/cpp/pca.cpp PCA::PCA -------- diff --git a/modules/core/doc/xml_yaml_persistence.rst b/modules/core/doc/xml_yaml_persistence.rst index 28bae2450..01a118829 100644 --- a/modules/core/doc/xml_yaml_persistence.rst +++ b/modules/core/doc/xml_yaml_persistence.rst @@ -11,7 +11,7 @@ You can store and then restore various OpenCV data structures to/from XML (http: Use the following procedure to write something to XML or YAML: #. Create new :ocv:class:`FileStorage` and open it for writing. It can be done with a single call to :ocv:func:`FileStorage::FileStorage` constructor that takes a filename, or you can use the default constructor and then call :ocv:func:`FileStorage::open`. Format of the file (XML or YAML) is determined from the filename extension (".xml" and ".yml"/".yaml", respectively) - #. Write all the data you want using the streaming operator ``>>``, just like in the case of STL streams. + #. Write all the data you want using the streaming operator ``<<``, just like in the case of STL streams. #. Close the file using :ocv:func:`FileStorage::release`. ``FileStorage`` destructor also closes the file. Here is an example: :: @@ -91,6 +91,10 @@ Several things can be noted by looking at the sample code and the output: * In YAML (but not XML), mappings and sequences can be written in a compact Python-like inline form. In the sample above matrix elements, as well as each feature, including its lbp value, is stored in such inline form. To store a mapping/sequence in a compact form, put ":" after the opening character, e.g. use **"{:"** instead of **"{"** and **"[:"** instead of **"["**. When the data is written to XML, those extra ":" are ignored. +.. note:: + + * A complete example using the FileStorage interface can be found at opencv_source_code/samples/cpp/filestorage.cpp + Reading data from a file storage. --------------------------------- diff --git a/modules/core/include/opencv2/core/eigen.hpp b/modules/core/include/opencv2/core/eigen.hpp index 751734eb5..a7b237f96 100644 --- a/modules/core/include/opencv2/core/eigen.hpp +++ b/modules/core/include/opencv2/core/eigen.hpp @@ -278,4 +278,3 @@ void cv2eigen( const Matx<_Tp, 1, _cols>& src, #endif #endif - diff --git a/modules/core/include/opencv2/core/internal.hpp b/modules/core/include/opencv2/core/internal.hpp index 606c62f8f..2f26e7cb6 100644 --- a/modules/core/include/opencv2/core/internal.hpp +++ b/modules/core/include/opencv2/core/internal.hpp @@ -136,7 +136,13 @@ CV_INLINE IppiSize ippiSize(int width, int height) # endif #endif -#ifdef __ARM_NEON__ + +#if (defined WIN32 || defined _WIN32) && defined(_M_ARM) +# include +# include "arm_neon.h" +# define CV_NEON 1 +# define CPU_HAS_NEON_FEATURE (true) +#elif defined(__ARM_NEON__) # include # define CV_NEON 1 # define CPU_HAS_NEON_FEATURE (true) @@ -340,25 +346,6 @@ namespace cv * Common declarations * \****************************************************************************************/ -/* get alloca declaration */ -#ifdef __GNUC__ -# undef alloca -# define alloca __builtin_alloca -# define CV_HAVE_ALLOCA 1 -#elif defined WIN32 || defined _WIN32 || \ - defined WINCE || defined _MSC_VER || defined __BORLANDC__ -# include -# define CV_HAVE_ALLOCA 1 -#elif defined HAVE_ALLOCA_H -# include -# define CV_HAVE_ALLOCA 1 -#elif defined HAVE_ALLOCA -# include -# define CV_HAVE_ALLOCA 1 -#else -# undef CV_HAVE_ALLOCA -#endif - #ifdef __GNUC__ # define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) #elif defined _MSC_VER @@ -367,11 +354,6 @@ namespace cv # define CV_DECL_ALIGNED(x) #endif -#if CV_HAVE_ALLOCA -/* ! DO NOT make it an inline function */ -# define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN ) -#endif - #ifndef CV_IMPL # define CV_IMPL CV_EXTERN_C #endif diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index 9d8696a05..d3b80a004 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -3001,6 +3001,58 @@ static inline void read(const FileNode& node, string& value, const string& defau value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); } +template static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2])); +} + +template static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +template static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]); +} + +template static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +static inline void read(const FileNode& node, Range& value, const Range& default_value) +{ + Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end); + read(node, temp, default_temp); + value.start = temp.x; value.end = temp.y; +} + CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); diff --git a/modules/core/include/opencv2/core/types_c.h b/modules/core/include/opencv2/core/types_c.h index 8db2fe766..27e53cd00 100644 --- a/modules/core/include/opencv2/core/types_c.h +++ b/modules/core/include/opencv2/core/types_c.h @@ -137,7 +137,7 @@ #ifndef CV_INLINE # if defined __cplusplus # define CV_INLINE inline -# elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__ +# elif defined _MSC_VER # define CV_INLINE __inline # else # define CV_INLINE static @@ -317,7 +317,7 @@ CV_INLINE int cvRound( double value ) return t; #elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION TEGRA_ROUND(value); -#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__ +#elif defined CV_ICC || defined __GNUC__ # ifdef HAVE_TEGRA_OPTIMIZATION TEGRA_ROUND(value); # else diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp index 0a7760873..bd95e6359 100644 --- a/modules/core/include/opencv2/core/version.hpp +++ b/modules/core/include/opencv2/core/version.hpp @@ -50,11 +50,14 @@ #define CV_VERSION_EPOCH 2 #define CV_VERSION_MAJOR 4 #define CV_VERSION_MINOR 6 -#define CV_VERSION_REVISION 0 +#define CV_VERSION_REVISION 1 #define CVAUX_STR_EXP(__A) #__A #define CVAUX_STR(__A) CVAUX_STR_EXP(__A) +#define CVAUX_STRW_EXP(__A) L#__A +#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A) + #if CV_VERSION_REVISION # define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION) #else diff --git a/modules/core/perf/perf_abs.cpp b/modules/core/perf/perf_abs.cpp index 691c6f7c4..63cb06b63 100644 --- a/modules/core/perf/perf_abs.cpp +++ b/modules/core/perf/perf_abs.cpp @@ -24,4 +24,3 @@ PERF_TEST_P(Size_MatType, abs, TYPICAL_MATS_ABS) SANITY_CHECK(c); } - diff --git a/modules/core/perf/perf_bitwise.cpp b/modules/core/perf/perf_bitwise.cpp index 64a8dd8bd..1308b7bf3 100644 --- a/modules/core/perf/perf_bitwise.cpp +++ b/modules/core/perf/perf_bitwise.cpp @@ -73,4 +73,3 @@ PERF_TEST_P(Size_MatType, bitwise_xor, TYPICAL_MATS_BITW_ARITHM) SANITY_CHECK(c); } - diff --git a/modules/core/perf/perf_main.cpp b/modules/core/perf/perf_main.cpp index 79c28a645..7c899c244 100644 --- a/modules/core/perf/perf_main.cpp +++ b/modules/core/perf/perf_main.cpp @@ -1,3 +1,8 @@ #include "perf_precomp.hpp" +#ifdef _MSC_VER +# if _MSC_VER >= 1700 +# pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +# endif +#endif CV_PERF_TEST_MAIN(core) diff --git a/modules/core/perf/perf_merge.cpp b/modules/core/perf/perf_merge.cpp index d82941a92..e7e8d2fe3 100644 --- a/modules/core/perf/perf_merge.cpp +++ b/modules/core/perf/perf_merge.cpp @@ -34,4 +34,4 @@ PERF_TEST_P( Size_SrcDepth_DstChannels, merge, TEST_CYCLE_MULTIRUN(runs) merge( (vector &)mv, dst ); SANITY_CHECK(dst, 1e-12); -} \ No newline at end of file +} diff --git a/modules/core/perf/perf_precomp.cpp b/modules/core/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d4..000000000 --- a/modules/core/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/core/perf/perf_stat.cpp b/modules/core/perf/perf_stat.cpp index b7fc43d12..9698076ad 100644 --- a/modules/core/perf/perf_stat.cpp +++ b/modules/core/perf/perf_stat.cpp @@ -33,7 +33,7 @@ PERF_TEST_P(Size_MatType, mean, TYPICAL_MATS) TEST_CYCLE() s = mean(src); - SANITY_CHECK(s, 1e-6); + SANITY_CHECK(s, 1e-5); } PERF_TEST_P(Size_MatType, mean_mask, TYPICAL_MATS) @@ -49,7 +49,7 @@ PERF_TEST_P(Size_MatType, mean_mask, TYPICAL_MATS) TEST_CYCLE() s = mean(src, mask); - SANITY_CHECK(s, 1e-6); + SANITY_CHECK(s, 5e-5); } PERF_TEST_P(Size_MatType, meanStdDev, TYPICAL_MATS) diff --git a/modules/core/src/alloc.cpp b/modules/core/src/alloc.cpp index 1944ed17d..37b1e0db9 100644 --- a/modules/core/src/alloc.cpp +++ b/modules/core/src/alloc.cpp @@ -94,9 +94,20 @@ void fastFree(void* ptr) #define STAT(stmt) #ifdef WIN32 +#if (_WIN32_WINNT >= 0x0602) +#include +#endif + struct CriticalSection { - CriticalSection() { InitializeCriticalSection(&cs); } + CriticalSection() + { +#if (_WIN32_WINNT >= 0x0600) + InitializeCriticalSectionEx(&cs, 1000, 0); +#else + InitializeCriticalSection(&cs); +#endif + } ~CriticalSection() { DeleteCriticalSection(&cs); } void lock() { EnterCriticalSection(&cs); } void unlock() { LeaveCriticalSection(&cs); } diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 5fda1415c..0db4c62bf 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -1131,23 +1131,33 @@ static void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst, } } -static BinaryFunc maxTab[] = +static BinaryFunc* getMaxTab() { - (BinaryFunc)GET_OPTIMIZED(max8u), (BinaryFunc)GET_OPTIMIZED(max8s), - (BinaryFunc)GET_OPTIMIZED(max16u), (BinaryFunc)GET_OPTIMIZED(max16s), - (BinaryFunc)GET_OPTIMIZED(max32s), - (BinaryFunc)GET_OPTIMIZED(max32f), (BinaryFunc)max64f, - 0 -}; + static BinaryFunc maxTab[] = + { + (BinaryFunc)GET_OPTIMIZED(max8u), (BinaryFunc)GET_OPTIMIZED(max8s), + (BinaryFunc)GET_OPTIMIZED(max16u), (BinaryFunc)GET_OPTIMIZED(max16s), + (BinaryFunc)GET_OPTIMIZED(max32s), + (BinaryFunc)GET_OPTIMIZED(max32f), (BinaryFunc)max64f, + 0 + }; -static BinaryFunc minTab[] = + return maxTab; +} + +static BinaryFunc* getMinTab() { - (BinaryFunc)GET_OPTIMIZED(min8u), (BinaryFunc)GET_OPTIMIZED(min8s), - (BinaryFunc)GET_OPTIMIZED(min16u), (BinaryFunc)GET_OPTIMIZED(min16s), - (BinaryFunc)GET_OPTIMIZED(min32s), - (BinaryFunc)GET_OPTIMIZED(min32f), (BinaryFunc)min64f, - 0 -}; + static BinaryFunc minTab[] = + { + (BinaryFunc)GET_OPTIMIZED(min8u), (BinaryFunc)GET_OPTIMIZED(min8s), + (BinaryFunc)GET_OPTIMIZED(min16u), (BinaryFunc)GET_OPTIMIZED(min16s), + (BinaryFunc)GET_OPTIMIZED(min32s), + (BinaryFunc)GET_OPTIMIZED(min32f), (BinaryFunc)min64f, + 0 + }; + + return minTab; +} } @@ -1177,36 +1187,36 @@ void cv::bitwise_not(InputArray a, OutputArray c, InputArray mask) void cv::max( InputArray src1, InputArray src2, OutputArray dst ) { - binary_op(src1, src2, dst, noArray(), maxTab, false ); + binary_op(src1, src2, dst, noArray(), getMaxTab(), false ); } void cv::min( InputArray src1, InputArray src2, OutputArray dst ) { - binary_op(src1, src2, dst, noArray(), minTab, false ); + binary_op(src1, src2, dst, noArray(), getMinTab(), false ); } void cv::max(const Mat& src1, const Mat& src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), maxTab, false ); + binary_op(src1, src2, _dst, noArray(), getMaxTab(), false ); } void cv::min(const Mat& src1, const Mat& src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), minTab, false ); + binary_op(src1, src2, _dst, noArray(), getMinTab(), false ); } void cv::max(const Mat& src1, double src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), maxTab, false ); + binary_op(src1, src2, _dst, noArray(), getMaxTab(), false ); } void cv::min(const Mat& src1, double src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), minTab, false ); + binary_op(src1, src2, _dst, noArray(), getMinTab(), false ); } /****************************************************************************************\ @@ -1242,14 +1252,14 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst, Mat src1 = _src1.getMat(), src2 = _src2.getMat(); bool haveMask = !_mask.empty(); bool reallocate = false; - - bool src1Scalar = checkScalar(src1, src2.type(), kind1, kind2); - bool src2Scalar = checkScalar(src2, src1.type(), kind2, kind1); + + bool src1Scalar = checkScalar(src1, src2.type(), kind1, kind2); + bool src2Scalar = checkScalar(src2, src1.type(), kind2, kind1); if( (kind1 == kind2 || src1.channels() == 1) && src1.dims <= 2 && src2.dims <= 2 && src1.size() == src2.size() && src1.type() == src2.type() && !haveMask && ((!_dst.fixedType() && (dtype < 0 || CV_MAT_DEPTH(dtype) == src1.depth())) || - (_dst.fixedType() && _dst.type() == _src1.type())) && + (_dst.fixedType() && _dst.type() == _src1.type())) && ((src1Scalar && src2Scalar) || (!src1Scalar && !src2Scalar)) ) { _dst.create(src1.size(), src1.type()); @@ -1493,39 +1503,54 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst, } } -static BinaryFunc addTab[] = +static BinaryFunc* getAddTab() { - (BinaryFunc)GET_OPTIMIZED(add8u), (BinaryFunc)GET_OPTIMIZED(add8s), - (BinaryFunc)GET_OPTIMIZED(add16u), (BinaryFunc)GET_OPTIMIZED(add16s), - (BinaryFunc)GET_OPTIMIZED(add32s), - (BinaryFunc)GET_OPTIMIZED(add32f), (BinaryFunc)add64f, - 0 -}; + static BinaryFunc addTab[] = + { + (BinaryFunc)GET_OPTIMIZED(add8u), (BinaryFunc)GET_OPTIMIZED(add8s), + (BinaryFunc)GET_OPTIMIZED(add16u), (BinaryFunc)GET_OPTIMIZED(add16s), + (BinaryFunc)GET_OPTIMIZED(add32s), + (BinaryFunc)GET_OPTIMIZED(add32f), (BinaryFunc)add64f, + 0 + }; -static BinaryFunc subTab[] = -{ - (BinaryFunc)GET_OPTIMIZED(sub8u), (BinaryFunc)GET_OPTIMIZED(sub8s), - (BinaryFunc)GET_OPTIMIZED(sub16u), (BinaryFunc)GET_OPTIMIZED(sub16s), - (BinaryFunc)GET_OPTIMIZED(sub32s), - (BinaryFunc)GET_OPTIMIZED(sub32f), (BinaryFunc)sub64f, - 0 -}; + return addTab; +} -static BinaryFunc absdiffTab[] = +static BinaryFunc* getSubTab() { - (BinaryFunc)GET_OPTIMIZED(absdiff8u), (BinaryFunc)GET_OPTIMIZED(absdiff8s), - (BinaryFunc)GET_OPTIMIZED(absdiff16u), (BinaryFunc)GET_OPTIMIZED(absdiff16s), - (BinaryFunc)GET_OPTIMIZED(absdiff32s), - (BinaryFunc)GET_OPTIMIZED(absdiff32f), (BinaryFunc)absdiff64f, - 0 -}; + static BinaryFunc subTab[] = + { + (BinaryFunc)GET_OPTIMIZED(sub8u), (BinaryFunc)GET_OPTIMIZED(sub8s), + (BinaryFunc)GET_OPTIMIZED(sub16u), (BinaryFunc)GET_OPTIMIZED(sub16s), + (BinaryFunc)GET_OPTIMIZED(sub32s), + (BinaryFunc)GET_OPTIMIZED(sub32f), (BinaryFunc)sub64f, + 0 + }; + + return subTab; +} + +static BinaryFunc* getAbsDiffTab() +{ + static BinaryFunc absDiffTab[] = + { + (BinaryFunc)GET_OPTIMIZED(absdiff8u), (BinaryFunc)GET_OPTIMIZED(absdiff8s), + (BinaryFunc)GET_OPTIMIZED(absdiff16u), (BinaryFunc)GET_OPTIMIZED(absdiff16s), + (BinaryFunc)GET_OPTIMIZED(absdiff32s), + (BinaryFunc)GET_OPTIMIZED(absdiff32f), (BinaryFunc)absdiff64f, + 0 + }; + + return absDiffTab; +} } void cv::add( InputArray src1, InputArray src2, OutputArray dst, InputArray mask, int dtype ) { - arithm_op(src1, src2, dst, mask, dtype, addTab ); + arithm_op(src1, src2, dst, mask, dtype, getAddTab() ); } void cv::subtract( InputArray src1, InputArray src2, OutputArray dst, @@ -1560,12 +1585,12 @@ void cv::subtract( InputArray src1, InputArray src2, OutputArray dst, } } #endif - arithm_op(src1, src2, dst, mask, dtype, subTab ); + arithm_op(src1, src2, dst, mask, dtype, getSubTab() ); } void cv::absdiff( InputArray src1, InputArray src2, OutputArray dst ) { - arithm_op(src1, src2, dst, noArray(), -1, absdiffTab); + arithm_op(src1, src2, dst, noArray(), -1, getAbsDiffTab()); } /****************************************************************************************\ @@ -1855,46 +1880,60 @@ static void recip64f( const double* src1, size_t step1, const double* src2, size } -static BinaryFunc mulTab[] = +static BinaryFunc* getMulTab() { - (BinaryFunc)mul8u, (BinaryFunc)mul8s, (BinaryFunc)mul16u, - (BinaryFunc)mul16s, (BinaryFunc)mul32s, (BinaryFunc)mul32f, - (BinaryFunc)mul64f, 0 -}; + static BinaryFunc mulTab[] = + { + (BinaryFunc)mul8u, (BinaryFunc)mul8s, (BinaryFunc)mul16u, + (BinaryFunc)mul16s, (BinaryFunc)mul32s, (BinaryFunc)mul32f, + (BinaryFunc)mul64f, 0 + }; -static BinaryFunc divTab[] = + return mulTab; +} + +static BinaryFunc* getDivTab() { - (BinaryFunc)div8u, (BinaryFunc)div8s, (BinaryFunc)div16u, - (BinaryFunc)div16s, (BinaryFunc)div32s, (BinaryFunc)div32f, - (BinaryFunc)div64f, 0 -}; + static BinaryFunc divTab[] = + { + (BinaryFunc)div8u, (BinaryFunc)div8s, (BinaryFunc)div16u, + (BinaryFunc)div16s, (BinaryFunc)div32s, (BinaryFunc)div32f, + (BinaryFunc)div64f, 0 + }; -static BinaryFunc recipTab[] = + return divTab; +} + +static BinaryFunc* getRecipTab() { - (BinaryFunc)recip8u, (BinaryFunc)recip8s, (BinaryFunc)recip16u, - (BinaryFunc)recip16s, (BinaryFunc)recip32s, (BinaryFunc)recip32f, - (BinaryFunc)recip64f, 0 -}; + static BinaryFunc recipTab[] = + { + (BinaryFunc)recip8u, (BinaryFunc)recip8s, (BinaryFunc)recip16u, + (BinaryFunc)recip16s, (BinaryFunc)recip32s, (BinaryFunc)recip32f, + (BinaryFunc)recip64f, 0 + }; + return recipTab; +} } void cv::multiply(InputArray src1, InputArray src2, OutputArray dst, double scale, int dtype) { - arithm_op(src1, src2, dst, noArray(), dtype, mulTab, true, &scale); + arithm_op(src1, src2, dst, noArray(), dtype, getMulTab(), true, &scale); } void cv::divide(InputArray src1, InputArray src2, OutputArray dst, double scale, int dtype) { - arithm_op(src1, src2, dst, noArray(), dtype, divTab, true, &scale); + arithm_op(src1, src2, dst, noArray(), dtype, getDivTab(), true, &scale); } void cv::divide(double scale, InputArray src2, OutputArray dst, int dtype) { - arithm_op(src2, src2, dst, noArray(), dtype, recipTab, true, &scale); + arithm_op(src2, src2, dst, noArray(), dtype, getRecipTab(), true, &scale); } /****************************************************************************************\ @@ -2037,12 +2076,17 @@ static void addWeighted64f( const double* src1, size_t step1, const double* src2 addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); } -static BinaryFunc addWeightedTab[] = +static BinaryFunc* getAddWeightedTab() { - (BinaryFunc)GET_OPTIMIZED(addWeighted8u), (BinaryFunc)GET_OPTIMIZED(addWeighted8s), (BinaryFunc)GET_OPTIMIZED(addWeighted16u), - (BinaryFunc)GET_OPTIMIZED(addWeighted16s), (BinaryFunc)GET_OPTIMIZED(addWeighted32s), (BinaryFunc)addWeighted32f, - (BinaryFunc)addWeighted64f, 0 -}; + static BinaryFunc addWeightedTab[] = + { + (BinaryFunc)GET_OPTIMIZED(addWeighted8u), (BinaryFunc)GET_OPTIMIZED(addWeighted8s), (BinaryFunc)GET_OPTIMIZED(addWeighted16u), + (BinaryFunc)GET_OPTIMIZED(addWeighted16s), (BinaryFunc)GET_OPTIMIZED(addWeighted32s), (BinaryFunc)addWeighted32f, + (BinaryFunc)addWeighted64f, 0 + }; + + return addWeightedTab; +} } @@ -2050,7 +2094,7 @@ void cv::addWeighted( InputArray src1, double alpha, InputArray src2, double beta, double gamma, OutputArray dst, int dtype ) { double scalars[] = {alpha, beta, gamma}; - arithm_op(src1, src2, dst, noArray(), dtype, addWeightedTab, true, scalars); + arithm_op(src1, src2, dst, noArray(), dtype, getAddWeightedTab(), true, scalars); } @@ -2310,15 +2354,19 @@ static void cmp64f(const double* src1, size_t step1, const double* src2, size_t cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); } -static BinaryFunc cmpTab[] = +static BinaryFunc getCmpFunc(int depth) { - (BinaryFunc)GET_OPTIMIZED(cmp8u), (BinaryFunc)GET_OPTIMIZED(cmp8s), - (BinaryFunc)GET_OPTIMIZED(cmp16u), (BinaryFunc)GET_OPTIMIZED(cmp16s), - (BinaryFunc)GET_OPTIMIZED(cmp32s), - (BinaryFunc)GET_OPTIMIZED(cmp32f), (BinaryFunc)cmp64f, - 0 -}; + static BinaryFunc cmpTab[] = + { + (BinaryFunc)GET_OPTIMIZED(cmp8u), (BinaryFunc)GET_OPTIMIZED(cmp8s), + (BinaryFunc)GET_OPTIMIZED(cmp16u), (BinaryFunc)GET_OPTIMIZED(cmp16s), + (BinaryFunc)GET_OPTIMIZED(cmp32s), + (BinaryFunc)GET_OPTIMIZED(cmp32f), (BinaryFunc)cmp64f, + 0 + }; + return cmpTab[depth]; +} static double getMinVal(int depth) { @@ -2348,7 +2396,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op) _dst.create(src1.size(), CV_8UC(cn)); Mat dst = _dst.getMat(); Size sz = getContinuousSize(src1, src2, dst, src1.channels()); - cmpTab[src1.depth()](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op); + getCmpFunc(src1.depth())(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op); return; } @@ -2380,7 +2428,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op) size_t esz = src1.elemSize(); size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz; - BinaryFunc func = cmpTab[depth1]; + BinaryFunc func = getCmpFunc(depth1); if( !haveScalar ) { @@ -2557,12 +2605,17 @@ static void inRangeReduce(const uchar* src, uchar* dst, size_t len, int cn) typedef void (*InRangeFunc)( const uchar* src1, size_t step1, const uchar* src2, size_t step2, const uchar* src3, size_t step3, uchar* dst, size_t step, Size sz ); -static InRangeFunc inRangeTab[] = +static InRangeFunc getInRangeFunc(int depth) { - (InRangeFunc)GET_OPTIMIZED(inRange8u), (InRangeFunc)GET_OPTIMIZED(inRange8s), (InRangeFunc)GET_OPTIMIZED(inRange16u), - (InRangeFunc)GET_OPTIMIZED(inRange16s), (InRangeFunc)GET_OPTIMIZED(inRange32s), (InRangeFunc)GET_OPTIMIZED(inRange32f), - (InRangeFunc)inRange64f, 0 -}; + static InRangeFunc inRangeTab[] = + { + (InRangeFunc)GET_OPTIMIZED(inRange8u), (InRangeFunc)GET_OPTIMIZED(inRange8s), (InRangeFunc)GET_OPTIMIZED(inRange16u), + (InRangeFunc)GET_OPTIMIZED(inRange16s), (InRangeFunc)GET_OPTIMIZED(inRange32s), (InRangeFunc)GET_OPTIMIZED(inRange32f), + (InRangeFunc)inRange64f, 0 + }; + + return inRangeTab[depth]; +} } @@ -2601,7 +2654,7 @@ void cv::inRange(InputArray _src, InputArray _lowerb, _dst.create(src.dims, src.size, CV_8U); Mat dst = _dst.getMat(); - InRangeFunc func = inRangeTab[depth]; + InRangeFunc func = getInRangeFunc(depth); const Mat* arrays_sc[] = { &src, &dst, 0 }; const Mat* arrays_nosc[] = { &src, &dst, &lb, &ub, 0 }; diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index d313f3362..3de9f83d1 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -194,17 +194,27 @@ static void merge64s(const int64** src, int64* dst, int len, int cn ) typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn); typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn); -static SplitFunc splitTab[] = +static SplitFunc getSplitFunc(int depth) { - (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split16u), (SplitFunc)GET_OPTIMIZED(split16u), - (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split64s), 0 -}; + static SplitFunc splitTab[] = + { + (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split16u), (SplitFunc)GET_OPTIMIZED(split16u), + (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split64s), 0 + }; -static MergeFunc mergeTab[] = + return splitTab[depth]; +} + +static MergeFunc getMergeFunc(int depth) { - (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u), - (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0 -}; + static MergeFunc mergeTab[] = + { + (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u), + (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0 + }; + + return mergeTab[depth]; +} } @@ -217,7 +227,7 @@ void cv::split(const Mat& src, Mat* mv) return; } - SplitFunc func = splitTab[depth]; + SplitFunc func = getSplitFunc(depth); CV_Assert( func != 0 ); int esz = (int)src.elemSize(), esz1 = (int)src.elemSize1(); @@ -328,7 +338,7 @@ void cv::merge(const Mat* mv, size_t n, OutputArray _dst) NAryMatIterator it(arrays, ptrs, cn+1); int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0); - MergeFunc func = mergeTab[depth]; + MergeFunc func = getMergeFunc(depth); for( i = 0; i < it.nplanes; i++, ++it ) { @@ -429,12 +439,17 @@ static void mixChannels64s( const int64** src, const int* sdelta, typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta, uchar** dst, const int* ddelta, int len, int npairs ); -static MixChannelsFunc mixchTab[] = +static MixChannelsFunc getMixchFunc(int depth) { - (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u, - (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s, - (MixChannelsFunc)mixChannels64s, 0 -}; + static MixChannelsFunc mixchTab[] = + { + (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u, + (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s, + (MixChannelsFunc)mixChannels64s, 0 + }; + + return mixchTab[depth]; +} } @@ -489,7 +504,7 @@ void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, cons NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts)); int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1)); - MixChannelsFunc func = mixchTab[depth]; + MixChannelsFunc func = getMixchFunc(depth); for( i = 0; i < it.nplanes; i++, ++it ) { @@ -941,104 +956,109 @@ DEF_CVT_FUNC(32s64f, int, double); DEF_CVT_FUNC(32f64f, float, double); DEF_CPY_FUNC(64s, int64); -static BinaryFunc cvtScaleAbsTab[] = +static BinaryFunc getCvtScaleAbsFunc(int depth) { - (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u, - (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u, - (BinaryFunc)cvtScaleAbs64f8u, 0 -}; + static BinaryFunc cvtScaleAbsTab[] = + { + (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u, + (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u, + (BinaryFunc)cvtScaleAbs64f8u, 0 + }; -static BinaryFunc cvtScaleTab[][8] = -{ - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u), - (BinaryFunc)cvtScale64f8u, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s), - (BinaryFunc)cvtScale64f8s, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u), - (BinaryFunc)cvtScale64f16u, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s), - (BinaryFunc)cvtScale64f16s, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s), - (BinaryFunc)cvtScale64f32s, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f), - (BinaryFunc)cvtScale64f32f, 0 - }, - { - (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f, - (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f, - (BinaryFunc)cvtScale64f, 0 - }, - { - 0, 0, 0, 0, 0, 0, 0, 0 - } -}; - -static BinaryFunc cvtTab[][8] = -{ - { - (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u), - (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u), - (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s), - (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s), - (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u, - (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u), - (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s), - (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s), - (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s), - (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s), - (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f), - (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s, - (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f), - (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f), - (BinaryFunc)(cvt64s), 0 - }, - { - 0, 0, 0, 0, 0, 0, 0, 0 - } -}; + return cvtScaleAbsTab[depth]; +} BinaryFunc getConvertFunc(int sdepth, int ddepth) { + static BinaryFunc cvtTab[][8] = + { + { + (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u), + (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u), + (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s), + (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s), + (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u, + (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u), + (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s), + (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s), + (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s), + (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s), + (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f), + (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s, + (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f), + (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f), + (BinaryFunc)(cvt64s), 0 + }, + { + 0, 0, 0, 0, 0, 0, 0, 0 + } + }; + return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; } BinaryFunc getConvertScaleFunc(int sdepth, int ddepth) { + static BinaryFunc cvtScaleTab[][8] = + { + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u), + (BinaryFunc)cvtScale64f8u, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s), + (BinaryFunc)cvtScale64f8s, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u), + (BinaryFunc)cvtScale64f16u, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s), + (BinaryFunc)cvtScale64f16s, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s), + (BinaryFunc)cvtScale64f32s, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f), + (BinaryFunc)cvtScale64f32f, 0 + }, + { + (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f, + (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f, + (BinaryFunc)cvtScale64f, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0, 0 + } + }; + return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; } @@ -1051,7 +1071,7 @@ void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, doubl double scale[] = {alpha, beta}; _dst.create( src.dims, src.size, CV_8UC(cn) ); Mat dst = _dst.getMat(); - BinaryFunc func = cvtScaleAbsTab[src.depth()]; + BinaryFunc func = getCvtScaleAbsFunc(src.depth()); CV_Assert( func != 0 ); if( src.dims <= 2 ) @@ -1371,4 +1391,4 @@ CV_IMPL void cvNormalize( const CvArr* srcarr, CvArr* dstarr, cv::normalize( src, dst, a, b, norm_type, dst.type(), mask ); } -/* End of file. */ \ No newline at end of file +/* End of file. */ diff --git a/modules/core/src/drawing.cpp b/modules/core/src/drawing.cpp index 9e3340897..144fa964f 100644 --- a/modules/core/src/drawing.cpp +++ b/modules/core/src/drawing.cpp @@ -886,11 +886,13 @@ void ellipse2Poly( Point center, Size axes, int angle, Point pt; pt.x = cvRound( cx + x * alpha - y * beta ); pt.y = cvRound( cy + x * beta + y * alpha ); - if( pt != prevPt ) + if( pt != prevPt ){ pts.push_back(pt); + prevPt = pt; + } } - if( pts.size() < 2 ) + if( pts.size() == 1 ) pts.push_back(pts[0]); } diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp index b3c2b833b..c76705f1b 100644 --- a/modules/core/src/dxt.cpp +++ b/modules/core/src/dxt.cpp @@ -1458,6 +1458,10 @@ static void CCSIDFT_64f( const double* src, double* dst, int n, int nf, int* fac } +#ifdef HAVE_IPP +typedef IppStatus (CV_STDCALL* IppDFTGetSizeFunc)(int, int, IppHintAlgorithm, int*, int*, int*); +typedef IppStatus (CV_STDCALL* IppDFTInitFunc)(int, int, IppHintAlgorithm, void*, uchar*); +#endif void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) { @@ -1483,7 +1487,7 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) int factors[34]; bool inplace_transform = false; #ifdef HAVE_IPP - void *spec_r = 0, *spec_c = 0; + AutoBuffer ippbuf; int ipp_norm_flag = !(flags & DFT_SCALE) ? 8 : inv ? 2 : 1; #endif @@ -1543,52 +1547,51 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) spec = 0; #ifdef HAVE_IPP - if( len*count >= 64 ) // use IPP DFT if available + if( +#if IPP_VERSION_MAJOR >= 7 + depth == CV_32F && // IPP 7.x and 8.0 have bug somewhere in double-precision DFT +#endif + len*count >= 64 ) // use IPP DFT if available { - int ipp_sz = 0; + int specsize=0, initsize=0, worksize=0; + IppDFTGetSizeFunc getSizeFunc = 0; + IppDFTInitFunc initFunc = 0; if( real_transform && stage == 0 ) { if( depth == CV_32F ) { - if( spec_r ) - IPPI_CALL( ippsDFTFree_R_32f( (IppsDFTSpec_R_32f*)spec_r )); - IPPI_CALL( ippsDFTInitAlloc_R_32f( - (IppsDFTSpec_R_32f**)&spec_r, len, ipp_norm_flag, ippAlgHintNone )); - IPPI_CALL( ippsDFTGetBufSize_R_32f( (IppsDFTSpec_R_32f*)spec_r, &ipp_sz )); + getSizeFunc = ippsDFTGetSize_R_32f; + initFunc = (IppDFTInitFunc)ippsDFTInit_R_32f; } else { - if( spec_r ) - IPPI_CALL( ippsDFTFree_R_64f( (IppsDFTSpec_R_64f*)spec_r )); - IPPI_CALL( ippsDFTInitAlloc_R_64f( - (IppsDFTSpec_R_64f**)&spec_r, len, ipp_norm_flag, ippAlgHintNone )); - IPPI_CALL( ippsDFTGetBufSize_R_64f( (IppsDFTSpec_R_64f*)spec_r, &ipp_sz )); + getSizeFunc = ippsDFTGetSize_R_64f; + initFunc = (IppDFTInitFunc)ippsDFTInit_R_64f; } - spec = spec_r; } else { if( depth == CV_32F ) { - if( spec_c ) - IPPI_CALL( ippsDFTFree_C_32fc( (IppsDFTSpec_C_32fc*)spec_c )); - IPPI_CALL( ippsDFTInitAlloc_C_32fc( - (IppsDFTSpec_C_32fc**)&spec_c, len, ipp_norm_flag, ippAlgHintNone )); - IPPI_CALL( ippsDFTGetBufSize_C_32fc( (IppsDFTSpec_C_32fc*)spec_c, &ipp_sz )); + getSizeFunc = ippsDFTGetSize_C_32fc; + initFunc = (IppDFTInitFunc)ippsDFTInit_C_32fc; } else { - if( spec_c ) - IPPI_CALL( ippsDFTFree_C_64fc( (IppsDFTSpec_C_64fc*)spec_c )); - IPPI_CALL( ippsDFTInitAlloc_C_64fc( - (IppsDFTSpec_C_64fc**)&spec_c, len, ipp_norm_flag, ippAlgHintNone )); - IPPI_CALL( ippsDFTGetBufSize_C_64fc( (IppsDFTSpec_C_64fc*)spec_c, &ipp_sz )); + getSizeFunc = ippsDFTGetSize_C_64fc; + initFunc = (IppDFTInitFunc)ippsDFTInit_C_64fc; } - spec = spec_c; } - - sz += ipp_sz; + if( getSizeFunc(len, ipp_norm_flag, ippAlgHintNone, &specsize, &initsize, &worksize) >= 0 ) + { + ippbuf.allocate(specsize + initsize + 64); + spec = alignPtr(&ippbuf[0], 32); + uchar* initbuf = alignPtr((uchar*)spec + specsize, 32); + if( initFunc(len, ipp_norm_flag, ippAlgHintNone, spec, initbuf) < 0 ) + spec = 0; + sz += worksize; + } } else #endif @@ -1862,24 +1865,6 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) src = dst; } } - -#ifdef HAVE_IPP - if( spec_c ) - { - if( depth == CV_32F ) - ippsDFTFree_C_32fc( (IppsDFTSpec_C_32fc*)spec_c ); - else - ippsDFTFree_C_64fc( (IppsDFTSpec_C_64fc*)spec_c ); - } - - if( spec_r ) - { - if( depth == CV_32F ) - ippsDFTFree_R_32f( (IppsDFTSpec_R_32f*)spec_r ); - else - ippsDFTFree_R_64f( (IppsDFTSpec_R_64f*)spec_r ); - } -#endif } diff --git a/modules/core/src/glob.cpp b/modules/core/src/glob.cpp index 368f304ef..208b4e05c 100644 --- a/modules/core/src/glob.cpp +++ b/modules/core/src/glob.cpp @@ -56,16 +56,38 @@ namespace struct DIR { +#ifdef HAVE_WINRT + WIN32_FIND_DATAW data; +#else WIN32_FIND_DATA data; +#endif HANDLE handle; dirent ent; +#ifdef HAVE_WINRT + DIR() {}; + ~DIR() + { + if (ent.d_name) + delete[] ent.d_name; + } +#endif }; DIR* opendir(const char* path) { DIR* dir = new DIR; dir->ent.d_name = 0; - dir->handle = ::FindFirstFileA((cv::String(path) + "\\*").c_str(), &dir->data); +#ifdef HAVE_WINRT + cv::String full_path = cv::String(path) + "\\*"; + wchar_t wfull_path[MAX_PATH]; + size_t copied = mbstowcs(wfull_path, full_path.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + dir->handle = ::FindFirstFileExW(wfull_path, FindExInfoStandard, + &dir->data, FindExSearchNameMatch, NULL, 0); +#else + dir->handle = ::FindFirstFileExA((cv::String(path) + "\\*").c_str(), + FindExInfoStandard, &dir->data, FindExSearchNameMatch, NULL, 0); +#endif if(dir->handle == INVALID_HANDLE_VALUE) { /*closedir will do all cleanup*/ @@ -76,12 +98,26 @@ namespace dirent* readdir(DIR* dir) { +#ifdef HAVE_WINRT if (dir->ent.d_name != 0) { - if (::FindNextFile(dir->handle, &dir->data) != TRUE) + if (::FindNextFileW(dir->handle, &dir->data) != TRUE) + return 0; + } + size_t asize = wcstombs(NULL, dir->data.cFileName, 0); + CV_Assert((asize != 0) && (asize != (size_t)-1)); + char* aname = new char[asize+1]; + aname[asize] = 0; + wcstombs(aname, dir->data.cFileName, asize); + dir->ent.d_name = aname; +#else + if (dir->ent.d_name != 0) + { + if (::FindNextFileA(dir->handle, &dir->data) != TRUE) return 0; } dir->ent.d_name = dir->data.cFileName; +#endif return &dir->ent; } @@ -107,7 +143,18 @@ static bool isDir(const cv::String& path, DIR* dir) if (dir) attributes = dir->data.dwFileAttributes; else - attributes = ::GetFileAttributes(path.c_str()); + { + WIN32_FILE_ATTRIBUTE_DATA all_attrs; +#ifdef HAVE_WINRT + wchar_t wpath[MAX_PATH]; + size_t copied = mbstowcs(wpath, path.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &all_attrs); +#else + ::GetFileAttributesExA(path.c_str(), GetFileExInfoStandard, &all_attrs); +#endif + attributes = all_attrs.dwFileAttributes; + } return (attributes != INVALID_FILE_ATTRIBUTES) && ((attributes & FILE_ATTRIBUTE_DIRECTORY) != 0); #else diff --git a/modules/core/src/lapack.cpp b/modules/core/src/lapack.cpp index f5fe53ae9..7ee84a6d4 100644 --- a/modules/core/src/lapack.cpp +++ b/modules/core/src/lapack.cpp @@ -1832,4 +1832,4 @@ cvSVBkSb( const CvArr* warr, const CvArr* uarr, cv::SVD::backSubst(w, u, v, rhs, dst); CV_Assert( dst.data == dst0.data ); -} \ No newline at end of file +} diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index 05a0c5552..19443cb05 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -1725,19 +1725,29 @@ diagtransform_64f(const double* src, double* dst, const double* m, int len, int typedef void (*TransformFunc)( const uchar* src, uchar* dst, const uchar* m, int, int, int ); -static TransformFunc transformTab[] = +static TransformFunc getTransformFunc(int depth) { - (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u, - (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f, - (TransformFunc)transform_64f, 0 -}; + static TransformFunc transformTab[] = + { + (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u, + (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f, + (TransformFunc)transform_64f, 0 + }; -static TransformFunc diagTransformTab[] = + return transformTab[depth]; +} + +static TransformFunc getDiagTransformFunc(int depth) { - (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u, - (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f, - (TransformFunc)diagtransform_64f, 0 -}; + static TransformFunc diagTransformTab[] = + { + (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u, + (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f, + (TransformFunc)diagtransform_64f, 0 + }; + + return diagTransformTab[depth]; +} } @@ -1800,7 +1810,7 @@ void cv::transform( InputArray _src, OutputArray _dst, InputArray _mtx ) } } - TransformFunc func = isDiag ? diagTransformTab[depth] : transformTab[depth]; + TransformFunc func = isDiag ? getDiagTransformFunc(depth): getTransformFunc(depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src, &dst, 0}; @@ -2766,19 +2776,24 @@ static double dotProd_64f(const double* src1, const double* src2, int len) typedef double (*DotProdFunc)(const uchar* src1, const uchar* src2, int len); -static DotProdFunc dotProdTab[] = +static DotProdFunc getDotProdFunc(int depth) { - (DotProdFunc)GET_OPTIMIZED(dotProd_8u), (DotProdFunc)GET_OPTIMIZED(dotProd_8s), - (DotProdFunc)dotProd_16u, (DotProdFunc)dotProd_16s, - (DotProdFunc)dotProd_32s, (DotProdFunc)GET_OPTIMIZED(dotProd_32f), - (DotProdFunc)dotProd_64f, 0 -}; + static DotProdFunc dotProdTab[] = + { + (DotProdFunc)GET_OPTIMIZED(dotProd_8u), (DotProdFunc)GET_OPTIMIZED(dotProd_8s), + (DotProdFunc)dotProd_16u, (DotProdFunc)dotProd_16s, + (DotProdFunc)dotProd_32s, (DotProdFunc)GET_OPTIMIZED(dotProd_32f), + (DotProdFunc)dotProd_64f, 0 + }; + + return dotProdTab[depth]; +} double Mat::dot(InputArray _mat) const { Mat mat = _mat.getMat(); int cn = channels(); - DotProdFunc func = dotProdTab[depth()]; + DotProdFunc func = getDotProdFunc(depth()); CV_Assert( mat.type() == type() && mat.size == size && func != 0 ); if( isContinuous() && mat.isContinuous() ) diff --git a/modules/core/src/out.cpp b/modules/core/src/out.cpp index 6817fca61..65190c50e 100644 --- a/modules/core/src/out.cpp +++ b/modules/core/src/out.cpp @@ -304,4 +304,3 @@ Formatted::Formatted(const Mat& _m, const Formatter* _fmt, const int* _params) } } - diff --git a/modules/core/src/parallel.cpp b/modules/core/src/parallel.cpp index 0a9ed0987..27d7ecc03 100644 --- a/modules/core/src/parallel.cpp +++ b/modules/core/src/parallel.cpp @@ -144,9 +144,9 @@ namespace { cv::Range r; r.start = (int)(wholeRange.start + - ((size_t)sr.start*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); + ((uint64)sr.start*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); r.end = sr.end >= nstripes ? wholeRange.end : (int)(wholeRange.start + - ((size_t)sr.end*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); + ((uint64)sr.end*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); (*body)(r); } cv::Range stripeRange() const { return cv::Range(0, nstripes); } @@ -453,7 +453,11 @@ int cv::getNumberOfCPUs(void) { #if defined WIN32 || defined _WIN32 SYSTEM_INFO sysinfo; +#if defined(_M_ARM) || defined(_M_X64) || defined(HAVE_WINRT) + GetNativeSystemInfo( &sysinfo ); +#else GetSystemInfo( &sysinfo ); +#endif return (int)sysinfo.dwNumberOfProcessors; #elif defined ANDROID diff --git a/modules/core/src/persistence.cpp b/modules/core/src/persistence.cpp index a16b63e13..bf6a64c97 100644 --- a/modules/core/src/persistence.cpp +++ b/modules/core/src/persistence.cpp @@ -59,7 +59,6 @@ #endif #if USE_ZLIB -# undef HAVE_UNISTD_H //to avoid redefinition # ifndef _LFS64_LARGEFILE # define _LFS64_LARGEFILE 0 # endif diff --git a/modules/core/src/precomp.cpp b/modules/core/src/precomp.cpp deleted file mode 100644 index e540cc5e8..000000000 --- a/modules/core/src/precomp.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/core/src/precomp.hpp b/modules/core/src/precomp.hpp index 84f7f5e2b..c53224e0a 100644 --- a/modules/core/src/precomp.hpp +++ b/modules/core/src/precomp.hpp @@ -43,9 +43,7 @@ #ifndef __OPENCV_PRECOMP_H__ #define __OPENCV_PRECOMP_H__ -#ifdef HAVE_CVCONFIG_H #include "cvconfig.h" -#endif #include "opencv2/core/core.hpp" #include "opencv2/core/core_c.h" diff --git a/modules/core/src/rand.cpp b/modules/core/src/rand.cpp index 2cdbe3916..54bb753a1 100644 --- a/modules/core/src/rand.cpp +++ b/modules/core/src/rand.cpp @@ -726,33 +726,54 @@ void RNG::fill( InputOutputArray _mat, int disttype, } #ifdef WIN32 + + +#ifdef HAVE_WINRT +// using C++11 thread attribute for local thread data +__declspec( thread ) RNG* rng = NULL; + + void deleteThreadRNGData() + { + if (rng) + delete rng; +} + +RNG& theRNG() +{ + if (!rng) + { + rng = new RNG; + } + return *rng; +} +#else #ifdef WINCE # define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF) #endif static DWORD tlsRNGKey = TLS_OUT_OF_INDEXES; -void deleteThreadRNGData() -{ - if( tlsRNGKey != TLS_OUT_OF_INDEXES ) - delete (RNG*)TlsGetValue( tlsRNGKey ); + void deleteThreadRNGData() + { + if( tlsRNGKey != TLS_OUT_OF_INDEXES ) + delete (RNG*)TlsGetValue( tlsRNGKey ); } RNG& theRNG() { if( tlsRNGKey == TLS_OUT_OF_INDEXES ) { - tlsRNGKey = TlsAlloc(); - CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES); + tlsRNGKey = TlsAlloc(); + CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES); } RNG* rng = (RNG*)TlsGetValue( tlsRNGKey ); if( !rng ) { - rng = new RNG; - TlsSetValue( tlsRNGKey, rng ); + rng = new RNG; + TlsSetValue( tlsRNGKey, rng ); } return *rng; } - +#endif //HAVE_WINRT #else static pthread_key_t tlsRNGKey = 0; diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index e069e5298..447141c5d 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -199,14 +199,19 @@ static int sum64f( const double* src, const uchar* mask, double* dst, int len, i typedef int (*SumFunc)(const uchar*, const uchar* mask, uchar*, int, int); -static SumFunc sumTab[] = +static SumFunc getSumFunc(int depth) { - (SumFunc)GET_OPTIMIZED(sum8u), (SumFunc)sum8s, - (SumFunc)sum16u, (SumFunc)sum16s, - (SumFunc)sum32s, - (SumFunc)GET_OPTIMIZED(sum32f), (SumFunc)sum64f, - 0 -}; + static SumFunc sumTab[] = + { + (SumFunc)GET_OPTIMIZED(sum8u), (SumFunc)sum8s, + (SumFunc)sum16u, (SumFunc)sum16s, + (SumFunc)sum32s, + (SumFunc)GET_OPTIMIZED(sum32f), (SumFunc)sum64f, + 0 + }; + + return sumTab[depth]; +} template static int countNonZero_(const T* src, int len ) @@ -271,14 +276,18 @@ static int countNonZero64f( const double* src, int len ) typedef int (*CountNonZeroFunc)(const uchar*, int); -static CountNonZeroFunc countNonZeroTab[] = +static CountNonZeroFunc getCountNonZeroTab(int depth) { - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32s), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32f), - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero64f), 0 -}; + static CountNonZeroFunc countNonZeroTab[] = + { + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32s), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32f), + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero64f), 0 + }; + return countNonZeroTab[depth]; +} template static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn ) @@ -427,11 +436,16 @@ static int sqsum64f( const double* src, const uchar* mask, double* sum, double* typedef int (*SumSqrFunc)(const uchar*, const uchar* mask, uchar*, uchar*, int, int); -static SumSqrFunc sumSqrTab[] = +static SumSqrFunc getSumSqrTab(int depth) { - (SumSqrFunc)GET_OPTIMIZED(sqsum8u), (SumSqrFunc)sqsum8s, (SumSqrFunc)sqsum16u, (SumSqrFunc)sqsum16s, - (SumSqrFunc)sqsum32s, (SumSqrFunc)GET_OPTIMIZED(sqsum32f), (SumSqrFunc)sqsum64f, 0 -}; + static SumSqrFunc sumSqrTab[] = + { + (SumSqrFunc)GET_OPTIMIZED(sqsum8u), (SumSqrFunc)sqsum8s, (SumSqrFunc)sqsum16u, (SumSqrFunc)sqsum16s, + (SumSqrFunc)sqsum32s, (SumSqrFunc)GET_OPTIMIZED(sqsum32f), (SumSqrFunc)sqsum64f, 0 + }; + + return sumSqrTab[depth]; +} } @@ -439,7 +453,46 @@ cv::Scalar cv::sum( InputArray _src ) { Mat src = _src.getMat(); int k, cn = src.channels(), depth = src.depth(); - SumFunc func = sumTab[depth]; + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + size_t total_size = src.total(); + int rows = src.size[0], cols = (int)(total_size/rows); + if( src.dims == 2 || (src.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) + { + IppiSize sz = { cols, rows }; + int type = src.type(); + typedef IppStatus (CV_STDCALL* ippiSumFunc)(const void*, int, IppiSize, double *, int); + ippiSumFunc ippFunc = + type == CV_8UC1 ? (ippiSumFunc)ippiSum_8u_C1R : + type == CV_8UC3 ? (ippiSumFunc)ippiSum_8u_C3R : + type == CV_8UC4 ? (ippiSumFunc)ippiSum_8u_C4R : + type == CV_16UC1 ? (ippiSumFunc)ippiSum_16u_C1R : + type == CV_16UC3 ? (ippiSumFunc)ippiSum_16u_C3R : + type == CV_16UC4 ? (ippiSumFunc)ippiSum_16u_C4R : + type == CV_16SC1 ? (ippiSumFunc)ippiSum_16s_C1R : + type == CV_16SC3 ? (ippiSumFunc)ippiSum_16s_C3R : + type == CV_16SC4 ? (ippiSumFunc)ippiSum_16s_C4R : + type == CV_32FC1 ? (ippiSumFunc)ippiSum_32f_C1R : + type == CV_32FC3 ? (ippiSumFunc)ippiSum_32f_C3R : + type == CV_32FC4 ? (ippiSumFunc)ippiSum_32f_C4R : + 0; + if( ippFunc ) + { + Ipp64f res[4]; + if( ippFunc(src.data, src.step[0], sz, res, ippAlgHintAccurate) >= 0 ) + { + Scalar sc; + for( int i = 0; i < cn; i++ ) + { + sc[i] = res[i]; + } + return sc; + } + } + } +#endif + + SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); @@ -491,7 +544,7 @@ cv::Scalar cv::sum( InputArray _src ) int cv::countNonZero( InputArray _src ) { Mat src = _src.getMat(); - CountNonZeroFunc func = countNonZeroTab[src.depth()]; + CountNonZeroFunc func = getCountNonZeroTab(src.depth()); CV_Assert( src.channels() == 1 && func != 0 ); @@ -512,7 +565,82 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask ) CV_Assert( mask.empty() || mask.type() == CV_8U ); int k, cn = src.channels(), depth = src.depth(); - SumFunc func = sumTab[depth]; + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + size_t total_size = src.total(); + int rows = src.size[0], cols = (int)(total_size/rows); + if( src.dims == 2 || (src.isContinuous() && mask.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) + { + IppiSize sz = { cols, rows }; + int type = src.type(); + if( !mask.empty() ) + { + typedef IppStatus (CV_STDCALL* ippiMaskMeanFuncC1)(const void *, int, void *, int, IppiSize, Ipp64f *); + ippiMaskMeanFuncC1 ippFuncC1 = + type == CV_8UC1 ? (ippiMaskMeanFuncC1)ippiMean_8u_C1MR : + type == CV_16UC1 ? (ippiMaskMeanFuncC1)ippiMean_16u_C1MR : + type == CV_32FC1 ? (ippiMaskMeanFuncC1)ippiMean_32f_C1MR : + 0; + if( ippFuncC1 ) + { + Ipp64f res; + if( ippFuncC1(src.data, src.step[0], mask.data, mask.step[0], sz, &res) >= 0 ) + { + return Scalar(res); + } + } + typedef IppStatus (CV_STDCALL* ippiMaskMeanFuncC3)(const void *, int, void *, int, IppiSize, int, Ipp64f *); + ippiMaskMeanFuncC3 ippFuncC3 = + type == CV_8UC3 ? (ippiMaskMeanFuncC3)ippiMean_8u_C3CMR : + type == CV_16UC3 ? (ippiMaskMeanFuncC3)ippiMean_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskMeanFuncC3)ippiMean_32f_C3CMR : + 0; + if( ippFuncC3 ) + { + Ipp64f res1, res2, res3; + if( ippFuncC3(src.data, src.step[0], mask.data, mask.step[0], sz, 1, &res1) >= 0 && + ippFuncC3(src.data, src.step[0], mask.data, mask.step[0], sz, 2, &res2) >= 0 && + ippFuncC3(src.data, src.step[0], mask.data, mask.step[0], sz, 3, &res3) >= 0 ) + { + return Scalar(res1, res2, res3); + } + } + } + else + { + typedef IppStatus (CV_STDCALL* ippiMeanFunc)(const void*, int, IppiSize, double *, int); + ippiMeanFunc ippFunc = + type == CV_8UC1 ? (ippiMeanFunc)ippiMean_8u_C1R : + type == CV_8UC3 ? (ippiMeanFunc)ippiMean_8u_C3R : + type == CV_8UC4 ? (ippiMeanFunc)ippiMean_8u_C4R : + type == CV_16UC1 ? (ippiMeanFunc)ippiMean_16u_C1R : + type == CV_16UC3 ? (ippiMeanFunc)ippiMean_16u_C3R : + type == CV_16UC4 ? (ippiMeanFunc)ippiMean_16u_C4R : + type == CV_16SC1 ? (ippiMeanFunc)ippiMean_16s_C1R : + type == CV_16SC3 ? (ippiMeanFunc)ippiMean_16s_C3R : + type == CV_16SC4 ? (ippiMeanFunc)ippiMean_16s_C4R : + type == CV_32FC1 ? (ippiMeanFunc)ippiMean_32f_C1R : + type == CV_32FC3 ? (ippiMeanFunc)ippiMean_32f_C3R : + type == CV_32FC4 ? (ippiMeanFunc)ippiMean_32f_C4R : + 0; + if( ippFunc ) + { + Ipp64f res[4]; + if( ippFunc(src.data, src.step[0], sz, res, ippAlgHintAccurate) >= 0 ) + { + Scalar sc; + for( int i = 0; i < cn; i++ ) + { + sc[i] = res[i]; + } + return sc; + } + } + } + } +#endif + + SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); @@ -571,7 +699,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input CV_Assert( mask.empty() || mask.type() == CV_8U ); int k, cn = src.channels(), depth = src.depth(); - SumSqrFunc func = sumSqrTab[depth]; + SumSqrFunc func = getSumSqrTab(depth); CV_Assert( func != 0 ); @@ -745,14 +873,19 @@ static void minMaxIdx_64f(const double* src, const uchar* mask, double* minval, typedef void (*MinMaxIdxFunc)(const uchar*, const uchar*, int*, int*, size_t*, size_t*, int, size_t); -static MinMaxIdxFunc minmaxTab[] = +static MinMaxIdxFunc getMinmaxTab(int depth) { - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8s), - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16s), - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32s), - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32f), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_64f), - 0 -}; + static MinMaxIdxFunc minmaxTab[] = + { + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8s), + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16s), + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32s), + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32f), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_64f), + 0 + }; + + return minmaxTab[depth]; +} static void ofs2idx(const Mat& a, size_t ofs, int* idx) { @@ -785,7 +918,7 @@ void cv::minMaxIdx(InputArray _src, double* minVal, CV_Assert( (cn == 1 && (mask.empty() || mask.type() == CV_8U)) || (cn >= 1 && mask.empty() && !minIdx && !maxIdx) ); - MinMaxIdxFunc func = minmaxTab[depth]; + MinMaxIdxFunc func = getMinmaxTab(depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src, &mask, 0}; @@ -1248,43 +1381,53 @@ CV_DEF_NORM_ALL(64f, double, double, double, double) typedef int (*NormFunc)(const uchar*, const uchar*, uchar*, int, int); typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int); -static NormFunc normTab[3][8] = +static NormFunc getNormFunc(int normType, int depth) { + static NormFunc normTab[3][8] = { - (NormFunc)GET_OPTIMIZED(normInf_8u), (NormFunc)GET_OPTIMIZED(normInf_8s), (NormFunc)GET_OPTIMIZED(normInf_16u), (NormFunc)GET_OPTIMIZED(normInf_16s), - (NormFunc)GET_OPTIMIZED(normInf_32s), (NormFunc)GET_OPTIMIZED(normInf_32f), (NormFunc)normInf_64f, 0 - }, - { - (NormFunc)GET_OPTIMIZED(normL1_8u), (NormFunc)GET_OPTIMIZED(normL1_8s), (NormFunc)GET_OPTIMIZED(normL1_16u), (NormFunc)GET_OPTIMIZED(normL1_16s), - (NormFunc)GET_OPTIMIZED(normL1_32s), (NormFunc)GET_OPTIMIZED(normL1_32f), (NormFunc)normL1_64f, 0 - }, - { - (NormFunc)GET_OPTIMIZED(normL2_8u), (NormFunc)GET_OPTIMIZED(normL2_8s), (NormFunc)GET_OPTIMIZED(normL2_16u), (NormFunc)GET_OPTIMIZED(normL2_16s), - (NormFunc)GET_OPTIMIZED(normL2_32s), (NormFunc)GET_OPTIMIZED(normL2_32f), (NormFunc)normL2_64f, 0 - } -}; + { + (NormFunc)GET_OPTIMIZED(normInf_8u), (NormFunc)GET_OPTIMIZED(normInf_8s), (NormFunc)GET_OPTIMIZED(normInf_16u), (NormFunc)GET_OPTIMIZED(normInf_16s), + (NormFunc)GET_OPTIMIZED(normInf_32s), (NormFunc)GET_OPTIMIZED(normInf_32f), (NormFunc)normInf_64f, 0 + }, + { + (NormFunc)GET_OPTIMIZED(normL1_8u), (NormFunc)GET_OPTIMIZED(normL1_8s), (NormFunc)GET_OPTIMIZED(normL1_16u), (NormFunc)GET_OPTIMIZED(normL1_16s), + (NormFunc)GET_OPTIMIZED(normL1_32s), (NormFunc)GET_OPTIMIZED(normL1_32f), (NormFunc)normL1_64f, 0 + }, + { + (NormFunc)GET_OPTIMIZED(normL2_8u), (NormFunc)GET_OPTIMIZED(normL2_8s), (NormFunc)GET_OPTIMIZED(normL2_16u), (NormFunc)GET_OPTIMIZED(normL2_16s), + (NormFunc)GET_OPTIMIZED(normL2_32s), (NormFunc)GET_OPTIMIZED(normL2_32f), (NormFunc)normL2_64f, 0 + } + }; -static NormDiffFunc normDiffTab[3][8] = + return normTab[normType][depth]; +} + +static NormDiffFunc getNormDiffFunc(int normType, int depth) { + static NormDiffFunc normDiffTab[3][8] = { - (NormDiffFunc)GET_OPTIMIZED(normDiffInf_8u), (NormDiffFunc)normDiffInf_8s, - (NormDiffFunc)normDiffInf_16u, (NormDiffFunc)normDiffInf_16s, - (NormDiffFunc)normDiffInf_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffInf_32f), - (NormDiffFunc)normDiffInf_64f, 0 - }, - { - (NormDiffFunc)GET_OPTIMIZED(normDiffL1_8u), (NormDiffFunc)normDiffL1_8s, - (NormDiffFunc)normDiffL1_16u, (NormDiffFunc)normDiffL1_16s, - (NormDiffFunc)normDiffL1_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL1_32f), - (NormDiffFunc)normDiffL1_64f, 0 - }, - { - (NormDiffFunc)GET_OPTIMIZED(normDiffL2_8u), (NormDiffFunc)normDiffL2_8s, - (NormDiffFunc)normDiffL2_16u, (NormDiffFunc)normDiffL2_16s, - (NormDiffFunc)normDiffL2_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL2_32f), - (NormDiffFunc)normDiffL2_64f, 0 - } -}; + { + (NormDiffFunc)GET_OPTIMIZED(normDiffInf_8u), (NormDiffFunc)normDiffInf_8s, + (NormDiffFunc)normDiffInf_16u, (NormDiffFunc)normDiffInf_16s, + (NormDiffFunc)normDiffInf_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffInf_32f), + (NormDiffFunc)normDiffInf_64f, 0 + }, + { + (NormDiffFunc)GET_OPTIMIZED(normDiffL1_8u), (NormDiffFunc)normDiffL1_8s, + (NormDiffFunc)normDiffL1_16u, (NormDiffFunc)normDiffL1_16s, + (NormDiffFunc)normDiffL1_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL1_32f), + (NormDiffFunc)normDiffL1_64f, 0 + }, + { + (NormDiffFunc)GET_OPTIMIZED(normDiffL2_8u), (NormDiffFunc)normDiffL2_8s, + (NormDiffFunc)normDiffL2_16u, (NormDiffFunc)normDiffL2_16s, + (NormDiffFunc)normDiffL2_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL2_32f), + (NormDiffFunc)normDiffL2_64f, 0 + } + }; + + return normDiffTab[normType][depth]; +} } @@ -1368,7 +1511,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask ) return result; } - NormFunc func = normTab[normType >> 1][depth]; + NormFunc func = getNormFunc(normType >> 1, depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src, &mask, 0}; @@ -1509,7 +1652,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m return result; } - NormDiffFunc func = normDiffTab[normType >> 1][depth]; + NormDiffFunc func = getNormDiffFunc(normType >> 1, depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src1, &src2, &mask, 0}; diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index a891e94cc..88993630e 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -42,11 +42,20 @@ #include "precomp.hpp" +#ifdef _MSC_VER +# if _MSC_VER >= 1700 +# pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +# endif +#endif + #if defined WIN32 || defined _WIN32 || defined WINCE #ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?) #define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx #endif #include +#if (_WIN32_WINNT >= 0x0602) + #include +#endif #undef small #undef min #undef max @@ -75,6 +84,30 @@ } #endif #endif + +#ifdef HAVE_WINRT +#include + +std::wstring GetTempPathWinRT() +{ + return std::wstring(Windows::Storage::ApplicationData::Current->TemporaryFolder->Path->Data()); +} + +std::wstring GetTempFileNameWinRT(std::wstring prefix) +{ + wchar_t guidStr[40]; + GUID g; + CoCreateGuid(&g); + wchar_t* mask = L"%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x"; + swprintf(&guidStr[0], sizeof(guidStr)/sizeof(wchar_t), mask, + g.Data1, g.Data2, g.Data3, UINT(g.Data4[0]), UINT(g.Data4[1]), + UINT(g.Data4[2]), UINT(g.Data4[3]), UINT(g.Data4[4]), + UINT(g.Data4[5]), UINT(g.Data4[6]), UINT(g.Data4[7])); + + return prefix + std::wstring(guidStr); +} + +#endif #else #include #include @@ -359,12 +392,39 @@ string format( const char* fmt, ... ) string tempfile( const char* suffix ) { +#ifdef HAVE_WINRT + std::wstring temp_dir = L""; + const wchar_t* opencv_temp_dir = _wgetenv(L"OPENCV_TEMP_PATH"); + if (opencv_temp_dir) + temp_dir = std::wstring(opencv_temp_dir); +#else const char *temp_dir = getenv("OPENCV_TEMP_PATH"); +#endif string fname; #if defined WIN32 || defined _WIN32 - char temp_dir2[MAX_PATH + 1] = { 0 }; - char temp_file[MAX_PATH + 1] = { 0 }; +#ifdef HAVE_WINRT + RoInitialize(RO_INIT_MULTITHREADED); + std::wstring temp_dir2; + if (temp_dir.empty()) + temp_dir = GetTempPathWinRT(); + + std::wstring temp_file; + temp_file = GetTempFileNameWinRT(L"ocv"); + if (temp_file.empty()) + return std::string(); + + temp_file = temp_dir + std::wstring(L"\\") + temp_file; + DeleteFileW(temp_file.c_str()); + + char aname[MAX_PATH]; + size_t copied = wcstombs(aname, temp_file.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + fname = std::string(aname); + RoUninitialize(); +#else + char temp_dir2[MAX_PATH] = { 0 }; + char temp_file[MAX_PATH] = { 0 }; if (temp_dir == 0 || temp_dir[0] == 0) { @@ -377,6 +437,7 @@ string tempfile( const char* suffix ) DeleteFileA(temp_file); fname = temp_file; +#endif # else # ifdef ANDROID //char defaultTemplate[] = "/mnt/sdcard/__opencv_temp.XXXXXX"; @@ -469,40 +530,6 @@ redirectError( CvErrorCallback errCallback, void* userdata, void** prevUserdata) } -/*CV_IMPL int -cvGuiBoxReport( int code, const char *func_name, const char *err_msg, - const char *file, int line, void* ) -{ -#if (!defined WIN32 && !defined _WIN32) || defined WINCE - return cvStdErrReport( code, func_name, err_msg, file, line, 0 ); -#else - if( code != CV_StsBackTrace && code != CV_StsAutoTrace ) - { - size_t msg_len = strlen(err_msg ? err_msg : "") + 1024; - char* message = (char*)alloca(msg_len); - char title[100]; - - wsprintf( message, "%s (%s)\nin function %s, %s(%d)\n\n" - "Press \"Abort\" to terminate application.\n" - "Press \"Retry\" to debug (if the app is running under debugger).\n" - "Press \"Ignore\" to continue (this is not safe).\n", - cvErrorStr(code), err_msg ? err_msg : "no description", - func_name, file, line ); - - wsprintf( title, "OpenCV GUI Error Handler" ); - - int answer = MessageBox( NULL, message, title, MB_ICONERROR|MB_ABORTRETRYIGNORE|MB_SYSTEMMODAL ); - - if( answer == IDRETRY ) - { - CV_DBG_BREAK(); - } - return answer != IDIGNORE; - } - return 0; -#endif -}*/ - CV_IMPL int cvCheckHardwareSupport(int feature) { CV_DbgAssert( 0 <= feature && feature <= CV_HARDWARE_MAX_FEATURE ); @@ -779,7 +806,11 @@ cvGetModuleInfo( const char* name, const char **version, const char **plugin_lis *plugin_list = plugin_list_buf; } -#if defined BUILD_SHARED_LIBS && defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE +#if defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE +#ifdef HAVE_WINRT + #pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +#endif + BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID ); BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID ) @@ -800,7 +831,15 @@ namespace cv struct Mutex::Impl { - Impl() { InitializeCriticalSection(&cs); refcount = 1; } + Impl() + { +#if (_WIN32_WINNT >= 0x0600) + ::InitializeCriticalSectionEx(&cs, 1000, 0); +#else + ::InitializeCriticalSection(&cs); +#endif + refcount = 1; + } ~Impl() { DeleteCriticalSection(&cs); } void lock() { EnterCriticalSection(&cs); } @@ -904,4 +943,4 @@ bool Mutex::trylock() { return impl->trylock(); } } -/* End of file. */ \ No newline at end of file +/* End of file. */ diff --git a/modules/core/test/test_arithm.cpp b/modules/core/test/test_arithm.cpp index a3e61f22a..664fa0204 100644 --- a/modules/core/test/test_arithm.cpp +++ b/modules/core/test/test_arithm.cpp @@ -1123,7 +1123,7 @@ struct MeanOp : public BaseElemWiseOp } double getMaxErr(int) { - return 1e-6; + return 1e-5; } }; @@ -1563,4 +1563,4 @@ TEST(Core_round, CvRound) ASSERT_EQ(4, cvRound(3.5)); ASSERT_EQ(-2, cvRound(-2.5)); ASSERT_EQ(-4, cvRound(-3.5)); -} \ No newline at end of file +} diff --git a/modules/core/test/test_ds.cpp b/modules/core/test/test_ds.cpp index d79786054..cd76ca2fe 100644 --- a/modules/core/test/test_ds.cpp +++ b/modules/core/test/test_ds.cpp @@ -2118,5 +2118,3 @@ TEST(Core_DS_Seq, sort_invert) { Core_SeqSortInvTest test; test.safe_run(); } TEST(Core_DS_Set, basic_operations) { Core_SetTest test; test.safe_run(); } TEST(Core_DS_Graph, basic_operations) { Core_GraphTest test; test.safe_run(); } TEST(Core_DS_Graph, scan) { Core_GraphScanTest test; test.safe_run(); } - - diff --git a/modules/core/test/test_dxt.cpp b/modules/core/test/test_dxt.cpp index 16025fa8f..1c0c7b00b 100644 --- a/modules/core/test/test_dxt.cpp +++ b/modules/core/test/test_dxt.cpp @@ -866,5 +866,3 @@ protected: }; TEST(Core_DFT, complex_output) { Core_DFTComplexOutputTest test; test.safe_run(); } - - diff --git a/modules/core/test/test_io.cpp b/modules/core/test/test_io.cpp index 3526e8376..09a45fd6a 100644 --- a/modules/core/test/test_io.cpp +++ b/modules/core/test/test_io.cpp @@ -390,7 +390,6 @@ protected: try { string fname = cv::tempfile(".xml"); - FileStorage fs(fname, FileStorage::WRITE); vector mi, mi2, mi3, mi4; vector mv, mv2, mv3, mv4; Mat m(10, 9, CV_32F); @@ -398,24 +397,59 @@ protected: randu(m, 0, 1); mi3.push_back(5); mv3.push_back(m); + Point_ p1(1.1f, 2.2f), op1; + Point3i p2(3, 4, 5), op2; + Size s1(6, 7), os1; + Complex c1(9, 10), oc1; + Rect r1(11, 12, 13, 14), or1; + Vec v1(15, 16, 17, 18, 19), ov1; + Scalar sc1(20.0, 21.1, 22.2, 23.3), osc1; + Range g1(7, 8), og1; + + FileStorage fs(fname, FileStorage::WRITE); fs << "mi" << mi; fs << "mv" << mv; fs << "mi3" << mi3; fs << "mv3" << mv3; fs << "empty" << empty; + fs << "p1" << p1; + fs << "p2" << p2; + fs << "s1" << s1; + fs << "c1" << c1; + fs << "r1" << r1; + fs << "v1" << v1; + fs << "sc1" << sc1; + fs << "g1" << g1; fs.release(); + fs.open(fname, FileStorage::READ); fs["mi"] >> mi2; fs["mv"] >> mv2; fs["mi3"] >> mi4; fs["mv3"] >> mv4; fs["empty"] >> empty; + fs["p1"] >> op1; + fs["p2"] >> op2; + fs["s1"] >> os1; + fs["c1"] >> oc1; + fs["r1"] >> or1; + fs["v1"] >> ov1; + fs["sc1"] >> osc1; + fs["g1"] >> og1; CV_Assert( mi2.empty() ); CV_Assert( mv2.empty() ); CV_Assert( norm(mi3, mi4, CV_C) == 0 ); CV_Assert( mv4.size() == 1 ); double n = norm(mv3[0], mv4[0], CV_C); CV_Assert( n == 0 ); + CV_Assert( op1 == p1 ); + CV_Assert( op2 == p2 ); + CV_Assert( os1 == s1 ); + CV_Assert( oc1 == c1 ); + CV_Assert( or1 == r1 ); + CV_Assert( ov1 == v1 ); + CV_Assert( osc1 == sc1 ); + CV_Assert( og1 == g1 ); } catch(...) { diff --git a/modules/core/test/test_main.cpp b/modules/core/test/test_main.cpp index 6b2499344..d5400e20f 100644 --- a/modules/core/test/test_main.cpp +++ b/modules/core/test/test_main.cpp @@ -1,3 +1,10 @@ +#ifdef _MSC_VER +# if _MSC_VER >= 1700 +# pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +# endif +#endif + + #include "test_precomp.hpp" CV_TEST_MAIN("cv") diff --git a/modules/core/test/test_math.cpp b/modules/core/test/test_math.cpp index c3d88bda6..3847afce6 100644 --- a/modules/core/test/test_math.cpp +++ b/modules/core/test/test_math.cpp @@ -2457,7 +2457,7 @@ TEST(Core_Invert, small) { cv::Mat a = (cv::Mat_(3,3) << 2.42104644730331, 1.81444796521479, -3.98072565304758, 0, 7.08389214348967e-3, 5.55326770986007e-3, 0,0, 7.44556154284261e-3); //cv::randu(a, -1, 1); - + cv::Mat b = a.t()*a; cv::Mat c, i = Mat_::eye(3, 3); cv::invert(b, c, cv::DECOMP_LU); //std::cout << b*c << std::endl; @@ -2643,4 +2643,3 @@ TEST(CovariationMatrixVectorOfMatWithMean, accuracy) } /* End of file. */ - diff --git a/modules/core/test/test_precomp.cpp b/modules/core/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3..000000000 --- a/modules/core/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/features2d/CMakeLists.txt b/modules/features2d/CMakeLists.txt index 7d36a58ac..0b080cfb9 100644 --- a/modules/features2d/CMakeLists.txt +++ b/modules/features2d/CMakeLists.txt @@ -1,3 +1,2 @@ set(the_description "2D Features Framework") ocv_define_module(features2d opencv_imgproc opencv_flann OPTIONAL opencv_highgui) - diff --git a/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst b/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst index 63e56f678..3639bd600 100644 --- a/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst +++ b/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst @@ -9,7 +9,10 @@ represented as vectors in a multidimensional space. All objects that implement t descriptor extractors inherit the :ocv:class:`DescriptorExtractor` interface. +.. note:: + * An example explaining keypoint extraction can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp + * An example on descriptor evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_evaluation.cpp DescriptorExtractor ------------------- @@ -78,9 +81,10 @@ The current implementation supports the following types of a descriptor extracto * ``"SIFT"`` -- :ocv:class:`SIFT` * ``"SURF"`` -- :ocv:class:`SURF` - * ``"ORB"`` -- :ocv:class:`ORB` - * ``"BRISK"`` -- :ocv:class:`BRISK` * ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor` + * ``"BRISK"`` -- :ocv:class:`BRISK` + * ``"ORB"`` -- :ocv:class:`ORB` + * ``"FREAK"`` -- :ocv:class:`FREAK` A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` -- :ocv:class:`OpponentColorDescriptorExtractor` ) + descriptor extractor name (see above), @@ -137,4 +141,6 @@ Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* , ... }; +.. note:: + * A complete BRIEF extractor sample can be found at opencv_source_code/samples/cpp/brief_match_test.cpp diff --git a/modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst b/modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst index d7e5eb4c2..91c4110d2 100644 --- a/modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst +++ b/modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst @@ -9,6 +9,12 @@ that are represented as vectors in a multidimensional space. All objects that im descriptor matchers inherit the :ocv:class:`DescriptorMatcher` interface. +.. note:: + + * An example explaining keypoint matching can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp + * An example on descriptor matching evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp + * An example on one to many image matching can be found at opencv_source_code/samples/cpp/matching_to_many_images.cpp + DMatch ------ .. ocv:struct:: DMatch @@ -299,4 +305,3 @@ Flann-based descriptor matcher. This matcher trains :ocv:class:`flann::Index_` o }; .. - diff --git a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst index 81c72d3a8..51efca5ce 100644 --- a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst +++ b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst @@ -8,6 +8,11 @@ between different algorithms solving the same problem. All objects that implemen inherit the :ocv:class:`FeatureDetector` interface. +.. note:: + + * An example explaining keypoint detection can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp + + KeyPoint -------- .. ocv:class:: KeyPoint @@ -220,7 +225,7 @@ StarFeatureDetector ------------------- .. ocv:class:: StarFeatureDetector : public FeatureDetector -The class implements the keypoint detector introduced by K. Konolige, synonym of ``StarDetector``. :: +The class implements the keypoint detector introduced by [Agrawal08]_, synonym of ``StarDetector``. :: class StarFeatureDetector : public FeatureDetector { @@ -234,6 +239,9 @@ The class implements the keypoint detector introduced by K. Konolige, synonym of ... }; +.. [Agrawal08] Agrawal, M., Konolige, K., & Blas, M. R. (2008). Censure: Center surround extremas for realtime feature detection and matching. In Computer Vision–ECCV 2008 (pp. 102-115). Springer Berlin Heidelberg. + + DenseFeatureDetector -------------------- .. ocv:class:: DenseFeatureDetector : public FeatureDetector diff --git a/modules/features2d/doc/common_interfaces_of_generic_descriptor_matchers.rst b/modules/features2d/doc/common_interfaces_of_generic_descriptor_matchers.rst index a306c6606..5a7f952bc 100644 --- a/modules/features2d/doc/common_interfaces_of_generic_descriptor_matchers.rst +++ b/modules/features2d/doc/common_interfaces_of_generic_descriptor_matchers.rst @@ -11,7 +11,11 @@ Every descriptor with the :ocv:class:`VectorDescriptorMatcher` ). There are descriptors such as the One-way descriptor and Ferns that have the ``GenericDescriptorMatcher`` interface implemented but do not support ``DescriptorExtractor``. +.. note:: + * An example explaining keypoint description can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp + * An example on descriptor matching evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp + * An example on one to many image matching can be found at opencv_source_code/samples/cpp/matching_to_many_images.cpp GenericDescriptorMatcher ------------------------ @@ -270,5 +274,3 @@ Example: :: VectorDescriptorMatcher matcher( new SurfDescriptorExtractor, new BruteForceMatcher > ); - - diff --git a/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst b/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst index 2669ab9f2..e8ef4e0b3 100644 --- a/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst +++ b/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst @@ -76,4 +76,3 @@ Draws keypoints. :param color: Color of keypoints. :param flags: Flags setting drawing features. Possible ``flags`` bit values are defined by ``DrawMatchesFlags``. See details above in :ocv:func:`drawMatches` . - diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index 80a1de04a..a0272026d 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -3,6 +3,10 @@ Feature Detection and Description .. highlight:: cpp +.. note:: + + * An example explaining keypoint detection and description can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp + FAST ---- Detects corners using the FAST algorithm @@ -51,6 +55,10 @@ Maximally stable extremal region extractor. :: The class encapsulates all the parameters of the MSER extraction algorithm (see http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http://code.opencv.org/projects/opencv/wiki/MSER for useful comments and parameters description. +.. note:: + + * (Python) A complete example showing the use of the MSER detector can be found at opencv_source_code/samples/python2/mser.py + ORB --- @@ -158,6 +166,10 @@ Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, descr .. [AOV12] A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012. CVPR 2012 Open Source Award Winner. +.. note:: + + * An example on how to use the FREAK descriptor can be found at opencv_source_code/samples/cpp/freak_demo.cpp + FREAK::FREAK ------------ The FREAK constructor diff --git a/modules/features2d/doc/object_categorization.rst b/modules/features2d/doc/object_categorization.rst index d7b34aff5..644634fd0 100644 --- a/modules/features2d/doc/object_categorization.rst +++ b/modules/features2d/doc/object_categorization.rst @@ -5,6 +5,12 @@ Object Categorization This section describes approaches based on local 2D features and used to categorize objects. +.. note:: + + * A complete Bag-Of-Words sample can be found at opencv_source_code/samples/cpp/bagofwords_classification.cpp + + * (Python) An example using the features2D framework to perform object categorization can be found at opencv_source_code/samples/python2/find_obj.py + BOWTrainer ---------- .. ocv:class:: BOWTrainer @@ -34,7 +40,7 @@ Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :: BOWTrainer::add ------------------- -Adds descriptors to a training set. +Adds descriptors to a training set. .. ocv:function:: void BOWTrainer::add( const Mat& descriptors ) @@ -60,7 +66,7 @@ Returns the count of all descriptors stored in the training set. BOWTrainer::cluster ----------------------- -Clusters train descriptors. +Clusters train descriptors. .. ocv:function:: Mat BOWTrainer::cluster() const @@ -110,7 +116,7 @@ Class to compute an image descriptor using the *bag of visual words*. Such a com #. Compute descriptors for a given image and its keypoints set. #. Find the nearest visual words from the vocabulary for each keypoint descriptor. #. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words encountered in the image. The ``i``-th bin of the histogram is a frequency of ``i``-th word of the vocabulary in the given image. - + The class declaration is the following: :: class BOWImgDescriptorExtractor @@ -198,4 +204,3 @@ BOWImgDescriptorExtractor::descriptorType Returns an image descriptor type. .. ocv:function:: int BOWImgDescriptorExtractor::descriptorType() const - diff --git a/modules/features2d/include/opencv2/features2d/features2d.hpp b/modules/features2d/include/opencv2/features2d/features2d.hpp index c1967f813..d4649baab 100644 --- a/modules/features2d/include/opencv2/features2d/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d/features2d.hpp @@ -263,6 +263,8 @@ public: OutputArray descriptors, bool useProvidedKeypoints=false ) const = 0; + CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector& keypoints, CV_OUT Mat& descriptors ) const; + // Create feature detector and descriptor extractor by name. CV_WRAP static Ptr create( const string& name ); }; diff --git a/modules/features2d/perf/perf_fast.cpp b/modules/features2d/perf/perf_fast.cpp index f6a47fe0f..abd3d8733 100644 --- a/modules/features2d/perf/perf_fast.cpp +++ b/modules/features2d/perf/perf_fast.cpp @@ -41,4 +41,3 @@ PERF_TEST_P(fast, detect, testing::Combine( SANITY_CHECK_KEYPOINTS(points); } - diff --git a/modules/features2d/perf/perf_precomp.cpp b/modules/features2d/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d4..000000000 --- a/modules/features2d/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/features2d/src/bagofwords.cpp b/modules/features2d/src/bagofwords.cpp index 9770064c9..83d9df7b3 100644 --- a/modules/features2d/src/bagofwords.cpp +++ b/modules/features2d/src/bagofwords.cpp @@ -149,7 +149,7 @@ void BOWImgDescriptorExtractor::compute( const Mat& image, vector& key int clusterCount = descriptorSize(); // = vocabulary.rows // Compute descriptors for the image. - Mat descriptors = _descriptors ? *_descriptors : Mat(); + Mat descriptors; dextractor->compute( image, keypoints, descriptors ); // Match keypoint descriptors to cluster center (to vocabulary) @@ -178,6 +178,11 @@ void BOWImgDescriptorExtractor::compute( const Mat& image, vector& key // Normalize image descriptor. imgDescriptor /= descriptors.rows; + + // Add the descriptors of image keypoints + if (_descriptors) { + *_descriptors = descriptors.clone(); + } } int BOWImgDescriptorExtractor::descriptorSize() const diff --git a/modules/features2d/src/brisk.cpp b/modules/features2d/src/brisk.cpp index d1fa0c9c8..622f77229 100644 --- a/modules/features2d/src/brisk.cpp +++ b/modules/features2d/src/brisk.cpp @@ -525,7 +525,11 @@ BRISK::operator()( InputArray _image, InputArray _mask, vector& keypoi bool doOrientation=true; if (useProvidedKeypoints) doOrientation = false; - computeDescriptorsAndOrOrientation(_image, _mask, keypoints, _descriptors, true, doOrientation, + + // If the user specified cv::noArray(), this will yield false. Otherwise it will return true. + bool doDescriptors = _descriptors.needed(); + + computeDescriptorsAndOrOrientation(_image, _mask, keypoints, _descriptors, doDescriptors, doOrientation, useProvidedKeypoints); } diff --git a/modules/features2d/src/descriptors.cpp b/modules/features2d/src/descriptors.cpp index 06efe9791..c9e87c2ba 100644 --- a/modules/features2d/src/descriptors.cpp +++ b/modules/features2d/src/descriptors.cpp @@ -106,6 +106,12 @@ Ptr DescriptorExtractor::create(const string& descriptorExt return Algorithm::create("Feature2D." + descriptorExtractorType); } + +CV_WRAP void Feature2D::compute( const Mat& image, CV_OUT CV_IN_OUT std::vector& keypoints, CV_OUT Mat& descriptors ) const +{ + DescriptorExtractor::compute(image, keypoints, descriptors); +} + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// /****************************************************************************************\ diff --git a/modules/features2d/src/evaluation.cpp b/modules/features2d/src/evaluation.cpp index 1724b0176..44151c03c 100644 --- a/modules/features2d/src/evaluation.cpp +++ b/modules/features2d/src/evaluation.cpp @@ -258,7 +258,7 @@ struct IntersectAreaCounter { CV_Assert( miny < maxy ); CV_Assert( dr > FLT_EPSILON ); - + int temp_bua = bua, temp_bna = bna; for( int i = range.begin(); i != range.end(); i++ ) { diff --git a/modules/features2d/src/fast_score.cpp b/modules/features2d/src/fast_score.cpp index 423b1f950..de697b7c9 100644 --- a/modules/features2d/src/fast_score.cpp +++ b/modules/features2d/src/fast_score.cpp @@ -357,4 +357,3 @@ int cornerScore<8>(const uchar* ptr, const int pixel[], int threshold) } } // namespace cv - diff --git a/modules/features2d/src/precomp.cpp b/modules/features2d/src/precomp.cpp deleted file mode 100644 index 3e0ec42de..000000000 --- a/modules/features2d/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/features2d/src/precomp.hpp b/modules/features2d/src/precomp.hpp index cbc1eb690..72f618600 100644 --- a/modules/features2d/src/precomp.hpp +++ b/modules/features2d/src/precomp.hpp @@ -43,9 +43,7 @@ #ifndef __OPENCV_PRECOMP_H__ #define __OPENCV_PRECOMP_H__ -#ifdef HAVE_CVCONFIG_H #include "cvconfig.h" -#endif #include "opencv2/features2d/features2d.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/modules/features2d/test/test_brisk.cpp b/modules/features2d/test/test_brisk.cpp index 49d149834..a254992b4 100644 --- a/modules/features2d/test/test_brisk.cpp +++ b/modules/features2d/test/test_brisk.cpp @@ -92,4 +92,3 @@ void CV_BRISKTest::run( int ) } TEST(Features2d_BRISK, regression) { CV_BRISKTest test; test.safe_run(); } - diff --git a/modules/features2d/test/test_fast.cpp b/modules/features2d/test/test_fast.cpp index 4161be1ab..40cc7f02a 100644 --- a/modules/features2d/test/test_fast.cpp +++ b/modules/features2d/test/test_fast.cpp @@ -134,4 +134,3 @@ void CV_FastTest::run( int ) } TEST(Features2d_FAST, regression) { CV_FastTest test; test.safe_run(); } - diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index b5d01a737..009ded5db 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -166,5 +166,3 @@ TEST(Features2d_Detector_Keypoints_Dense, validation) CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.Dense")); test.safe_run(); } - - diff --git a/modules/features2d/test/test_mser.cpp b/modules/features2d/test/test_mser.cpp index b30400a50..0170bad91 100644 --- a/modules/features2d/test/test_mser.cpp +++ b/modules/features2d/test/test_mser.cpp @@ -204,4 +204,3 @@ void CV_MserTest::run(int) } TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); } - diff --git a/modules/features2d/test/test_precomp.cpp b/modules/features2d/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3..000000000 --- a/modules/features2d/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/flann/CMakeLists.txt b/modules/flann/CMakeLists.txt index 645e8ce85..a6326c40a 100644 --- a/modules/flann/CMakeLists.txt +++ b/modules/flann/CMakeLists.txt @@ -1,3 +1,2 @@ set(the_description "Clustering and Search in Multi-Dimensional Spaces") ocv_define_module(flann opencv_core) - diff --git a/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst b/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst index ee44b301e..07b0b2a46 100644 --- a/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst +++ b/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst @@ -68,11 +68,11 @@ The method constructs a fast search structure from a set of features using the s * **branching** The branching factor to use for the hierarchical k-means tree - * **iterations** The maximum number of iterations to use in the k-means clustering stage when building the k-means tree. A value of -1 used here means that the k-means clustering should be iterated until convergence + * **iterations** The maximum number of iterations to use in the k-means clustering stage when building the k-means tree. A value of -1 used here means that the k-means clustering should be iterated until convergence - * **centers_init** The algorithm to use for selecting the initial centers when performing a k-means clustering step. The possible values are ``CENTERS_RANDOM`` (picks the initial cluster centers randomly), ``CENTERS_GONZALES`` (picks the initial centers using Gonzales' algorithm) and ``CENTERS_KMEANSPP`` (picks the initial centers using the algorithm suggested in arthur_kmeanspp_2007 ) + * **centers_init** The algorithm to use for selecting the initial centers when performing a k-means clustering step. The possible values are ``CENTERS_RANDOM`` (picks the initial cluster centers randomly), ``CENTERS_GONZALES`` (picks the initial centers using Gonzales' algorithm) and ``CENTERS_KMEANSPP`` (picks the initial centers using the algorithm suggested in arthur_kmeanspp_2007 ) - * **cb_index** This parameter (cluster boundary index) influences the way exploration is performed in the hierarchical kmeans tree. When ``cb_index`` is zero the next kmeans domain to be explored is chosen to be the one with the closest center. A value greater then zero also takes into account the size of the domain. + * **cb_index** This parameter (cluster boundary index) influences the way exploration is performed in the hierarchical kmeans tree. When ``cb_index`` is zero the next kmeans domain to be explored is chosen to be the one with the closest center. A value greater then zero also takes into account the size of the domain. * **CompositeIndexParams** When using a parameters object of this type the index created combines the randomized kd-trees and the hierarchical k-means tree. :: @@ -122,16 +122,16 @@ The method constructs a fast search structure from a set of features using the s .. - * **target_precision** Is a number between 0 and 1 specifying the percentage of the approximate nearest-neighbor searches that return the exact nearest-neighbor. Using a higher value for this parameter gives more accurate results, but the search takes longer. The optimum value usually depends on the application. + * **target_precision** Is a number between 0 and 1 specifying the percentage of the approximate nearest-neighbor searches that return the exact nearest-neighbor. Using a higher value for this parameter gives more accurate results, but the search takes longer. The optimum value usually depends on the application. - * **build_weight** Specifies the importance of the index build time raported to the nearest-neighbor search time. In some applications it's acceptable for the index build step to take a long time if the subsequent searches in the index can be performed very fast. In other applications it's required that the index be build as fast as possible even if that leads to slightly longer search times. + * **build_weight** Specifies the importance of the index build time raported to the nearest-neighbor search time. In some applications it's acceptable for the index build step to take a long time if the subsequent searches in the index can be performed very fast. In other applications it's required that the index be build as fast as possible even if that leads to slightly longer search times. * **memory_weight** Is used to specify the tradeoff between time (index build time and search time) and memory used by the index. A value less than 1 gives more importance to the time spent and a value greater than 1 gives more importance to the memory usage. - * **sample_fraction** Is a number between 0 and 1 indicating what fraction of the dataset to use in the automatic parameter configuration algorithm. Running the algorithm on the full dataset gives the most accurate results, but for very large datasets can take longer than desired. In such case using just a fraction of the data helps speeding up this algorithm while still giving good approximations of the optimum parameters. + * **sample_fraction** Is a number between 0 and 1 indicating what fraction of the dataset to use in the automatic parameter configuration algorithm. Running the algorithm on the full dataset gives the most accurate results, but for very large datasets can take longer than desired. In such case using just a fraction of the data helps speeding up this algorithm while still giving good approximations of the optimum parameters. * **SavedIndexParams** This object type is used for loading a previously saved index from the disk. :: diff --git a/modules/flann/include/opencv2/flann/dist.h b/modules/flann/include/opencv2/flann/dist.h index 7380d0c5d..80ae2dc91 100644 --- a/modules/flann/include/opencv2/flann/dist.h +++ b/modules/flann/include/opencv2/flann/dist.h @@ -43,8 +43,12 @@ typedef unsigned __int64 uint64_t; #include "defines.h" +#if (defined WIN32 || defined _WIN32) && defined(_M_ARM) +# include +#endif + #ifdef __ARM_NEON__ -#include "arm_neon.h" +# include "arm_neon.h" #endif namespace cvflann diff --git a/modules/flann/include/opencv2/flann/random.h b/modules/flann/include/opencv2/flann/random.h index 2a67352da..a3cf5ec53 100644 --- a/modules/flann/include/opencv2/flann/random.h +++ b/modules/flann/include/opencv2/flann/random.h @@ -131,5 +131,3 @@ public: } #endif //OPENCV_FLANN_RANDOM_H - - diff --git a/modules/flann/include/opencv2/flann/result_set.h b/modules/flann/include/opencv2/flann/result_set.h index 7bb709b76..3adad4659 100644 --- a/modules/flann/include/opencv2/flann/result_set.h +++ b/modules/flann/include/opencv2/flann/result_set.h @@ -540,4 +540,3 @@ private: } #endif //OPENCV_FLANN_RESULTSET_H - diff --git a/modules/flann/src/flann.cpp b/modules/flann/src/flann.cpp index fa1fdaf41..67fff88a4 100644 --- a/modules/flann/src/flann.cpp +++ b/modules/flann/src/flann.cpp @@ -54,4 +54,4 @@ namespace cvflann } void dummyfunc() {} -} \ No newline at end of file +} diff --git a/modules/flann/src/precomp.cpp b/modules/flann/src/precomp.cpp deleted file mode 100644 index c149df18f..000000000 --- a/modules/flann/src/precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "precomp.hpp" diff --git a/modules/flann/src/precomp.hpp b/modules/flann/src/precomp.hpp index fb5ee3f98..988c01f2a 100644 --- a/modules/flann/src/precomp.hpp +++ b/modules/flann/src/precomp.hpp @@ -5,9 +5,7 @@ #include #include -#ifdef HAVE_CVCONFIG_H # include "cvconfig.h" -#endif #include "opencv2/core/core.hpp" #include "opencv2/core/internal.hpp" @@ -24,4 +22,3 @@ #include "opencv2/flann/flann_base.hpp" #endif - diff --git a/modules/flann/test/test_precomp.cpp b/modules/flann/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3..000000000 --- a/modules/flann/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/gpu/CMakeLists.txt b/modules/gpu/CMakeLists.txt index 44b507268..8650b3f09 100644 --- a/modules/gpu/CMakeLists.txt +++ b/modules/gpu/CMakeLists.txt @@ -43,7 +43,7 @@ if(HAVE_CUDA) ocv_cuda_compile(cuda_objs ${lib_cuda} ${ncv_cuda}) set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY}) - + if(HAVE_CUFFT) set(cuda_link_libs ${cuda_link_libs} ${CUDA_cufft_LIBRARY}) endif() diff --git a/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst b/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst index 587c253d2..aafbf7458 100644 --- a/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst @@ -44,7 +44,11 @@ The class also performs pre- and post-filtering steps: Sobel pre-filtering (if ` This means that the input left image is low textured. +.. note:: + * A basic stereo matching example can be found at opencv_source_code/samples/gpu/stereo_match.cpp + * A stereo matching example using several GPU's can be found at opencv_source_code/samples/gpu/stereo_multi.cpp + * A stereo matching example using several GPU's and driver API can be found at opencv_source_code/samples/gpu/driver_api_stereo_multi.cpp gpu::StereoBM_GPU::StereoBM_GPU ----------------------------------- diff --git a/modules/gpu/doc/data_structures.rst b/modules/gpu/doc/data_structures.rst index 569972320..0c75a5ccc 100644 --- a/modules/gpu/doc/data_structures.rst +++ b/modules/gpu/doc/data_structures.rst @@ -386,4 +386,3 @@ Class that enables getting ``cudaStream_t`` from :ocv:class:`gpu::Stream` and is { CV_EXPORTS static cudaStream_t getStream(const Stream& stream); }; - diff --git a/modules/gpu/doc/image_filtering.rst b/modules/gpu/doc/image_filtering.rst index 348a42510..824fbfa4f 100644 --- a/modules/gpu/doc/image_filtering.rst +++ b/modules/gpu/doc/image_filtering.rst @@ -5,7 +5,9 @@ Image Filtering Functions and classes described in this section are used to perform various linear or non-linear filtering operations on 2D images. +.. note:: + * An example containing all basic morphology operators like erode and dilate can be found at opencv_source_code/samples/gpu/morphology.cpp gpu::BaseRowFilter_GPU ---------------------- diff --git a/modules/gpu/doc/image_processing.rst b/modules/gpu/doc/image_processing.rst index 0b3254033..abf4fdaba 100644 --- a/modules/gpu/doc/image_processing.rst +++ b/modules/gpu/doc/image_processing.rst @@ -966,7 +966,9 @@ Composites two images using alpha opacity values contained in each image. :param stream: Stream for the asynchronous version. +.. note:: + * An example demonstrating the use of alphaComp can be found at opencv_source_code/samples/gpu/alpha_comp.cpp gpu::Canny ------------------- @@ -1028,7 +1030,9 @@ Finds lines in a binary image using the classical Hough transform. .. seealso:: :ocv:func:`HoughLines` +.. note:: + * An example using the Hough lines detector can be found at opencv_source_code/samples/gpu/houghlines.cpp gpu::HoughLinesDownload ----------------------- diff --git a/modules/gpu/doc/introduction.rst b/modules/gpu/doc/introduction.rst index ef34c369b..a1237629c 100644 --- a/modules/gpu/doc/introduction.rst +++ b/modules/gpu/doc/introduction.rst @@ -60,4 +60,3 @@ With this algorithm, a dual GPU gave a 180 % performance increase comparing to the single Fermi GPU. For a source code example, see http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/gpu/. - diff --git a/modules/gpu/doc/object_detection.rst b/modules/gpu/doc/object_detection.rst index 133660236..c8e2dbf0a 100644 --- a/modules/gpu/doc/object_detection.rst +++ b/modules/gpu/doc/object_detection.rst @@ -62,7 +62,12 @@ The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detec Interfaces of all methods are kept similar to the ``CPU HOG`` descriptor and detector analogues as much as possible. +.. note:: + * An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/cpp/peopledetect.cpp + * A GPU example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/gpu/hog.cpp + + * (Python) An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/python2/peopledetect.py gpu::HOGDescriptor::HOGDescriptor ------------------------------------- @@ -230,7 +235,10 @@ Cascade classifier class used for object detection. Supports HAAR and LBP cascad Size getClassifierSize() const; }; +.. note:: + * A cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier.cpp + * A Nvidea API specific cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp gpu::CascadeClassifier_GPU::CascadeClassifier_GPU ----------------------------------------------------- diff --git a/modules/gpu/doc/video.rst b/modules/gpu/doc/video.rst index 284bb17fa..ea15a062a 100644 --- a/modules/gpu/doc/video.rst +++ b/modules/gpu/doc/video.rst @@ -3,7 +3,10 @@ Video Analysis .. highlight:: cpp +.. note:: + * A general optical flow example can be found at opencv_source_code/samples/gpu/optical_flow.cpp + * A general optical flow example using the Nvidia API can be found at opencv_source_code/samples/gpu/opticalflow_nvidia_api.cpp gpu::BroxOpticalFlow -------------------- @@ -44,7 +47,9 @@ Class computing the optical flow for two images using Brox et al Optical Flow al GpuMat buf; }; +.. note:: + * An example illustrating the Brox et al optical flow algorithm can be found at opencv_source_code/samples/gpu/brox_optical_flow.cpp gpu::GoodFeaturesToTrackDetector_GPU ------------------------------------ @@ -213,7 +218,9 @@ The class can calculate an optical flow for a sparse feature set or dense optica .. seealso:: :ocv:func:`calcOpticalFlowPyrLK` +.. note:: + * An example of the Lucas Kanade optical flow algorithm can be found at opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp gpu::PyrLKOpticalFlow::sparse ----------------------------- @@ -418,7 +425,9 @@ The class discriminates between foreground and background pixels by building and .. seealso:: :ocv:class:`BackgroundSubtractorMOG` +.. note:: + * An example on gaussian mixture based background/foreground segmantation can be found at opencv_source_code/samples/gpu/bgfg_segm.cpp gpu::MOG_GPU::MOG_GPU --------------------- @@ -697,7 +706,9 @@ The class uses H264 video codec. .. note:: Currently only Windows platform is supported. +.. note:: + * An example on how to use the videoWriter class can be found at opencv_source_code/samples/gpu/video_writer.cpp gpu::VideoWriter_GPU::VideoWriter_GPU ------------------------------------- @@ -910,7 +921,9 @@ Class for reading video from files. .. note:: Currently only Windows and Linux platforms are supported. +.. note:: + * An example on how to use the videoReader class can be found at opencv_source_code/samples/gpu/video_reader.cpp gpu::VideoReader_GPU::Codec --------------------------- diff --git a/modules/gpu/include/opencv2/gpu/device/block.hpp b/modules/gpu/include/opencv2/gpu/device/block.hpp index 86ce205bc..6cc00aed4 100644 --- a/modules/gpu/include/opencv2/gpu/device/block.hpp +++ b/modules/gpu/include/opencv2/gpu/device/block.hpp @@ -201,5 +201,3 @@ namespace cv { namespace gpu { namespace device }}} #endif /* __OPENCV_GPU_DEVICE_BLOCK_HPP__ */ - - diff --git a/modules/gpu/include/opencv2/gpu/device/color.hpp b/modules/gpu/include/opencv2/gpu/device/color.hpp index c087d179b..5af64bf61 100644 --- a/modules/gpu/include/opencv2/gpu/device/color.hpp +++ b/modules/gpu/include/opencv2/gpu/device/color.hpp @@ -107,25 +107,25 @@ namespace cv { namespace gpu { namespace device #undef OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0) #undef OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0) #undef OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS diff --git a/modules/gpu/misc/mark_nvidia.py b/modules/gpu/misc/mark_nvidia.py index 08743fb13..80dc7f9ce 100755 --- a/modules/gpu/misc/mark_nvidia.py +++ b/modules/gpu/misc/mark_nvidia.py @@ -257,4 +257,3 @@ if __name__ == "__main__": outputFile = open(sys.argv[2], 'w') outputFile.writelines(lines) outputFile.close() - diff --git a/modules/gpu/perf/perf_core.cpp b/modules/gpu/perf/perf_core.cpp index 3042beadc..e38196b99 100644 --- a/modules/gpu/perf/perf_core.cpp +++ b/modules/gpu/perf/perf_core.cpp @@ -1337,7 +1337,7 @@ PERF_TEST_P(Sz_Type_Flags, Core_GEMM, TEST_CYCLE() cv::gpu::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, dst, flags); - GPU_SANITY_CHECK(dst, 1e-6); + GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE); } else { diff --git a/modules/gpu/perf/perf_imgproc.cpp b/modules/gpu/perf/perf_imgproc.cpp index ee4601277..d942bed61 100644 --- a/modules/gpu/perf/perf_imgproc.cpp +++ b/modules/gpu/perf/perf_imgproc.cpp @@ -562,7 +562,17 @@ PERF_TEST_P(Sz, ImgProc_CalcHist, } else { - FAIL_NO_CPU(); + cv::Mat dst; + + const int hbins = 256; + const float hranges[] = {0.0f, 256.0f}; + const int histSize[] = {hbins}; + const float* ranges[] = {hranges}; + const int channels[] = {0}; + + TEST_CYCLE() cv::calcHist(&src, 1, channels, cv::Mat(), dst, 1, histSize, ranges); + + CPU_SANITY_CHECK(dst); } } @@ -874,7 +884,7 @@ PERF_TEST_P(Sz_KernelSz_Ccorr, ImgProc_Convolve, TEST_CYCLE() cv::gpu::convolve(d_image, d_templ, dst, ccorr, d_buf); - GPU_SANITY_CHECK(dst); + GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE); } else { diff --git a/modules/gpu/perf/perf_video.cpp b/modules/gpu/perf/perf_video.cpp index 672d657b2..9894f7f7b 100644 --- a/modules/gpu/perf/perf_video.cpp +++ b/modules/gpu/perf/perf_video.cpp @@ -49,6 +49,7 @@ using namespace perf; #if defined(HAVE_XINE) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ defined(WIN32) /* assume that we have ffmpeg */ @@ -426,8 +427,8 @@ PERF_TEST_P(ImagePair, Video_OpticalFlowDual_TVL1, TEST_CYCLE() d_alg(d_frame0, d_frame1, u, v); - GPU_SANITY_CHECK(u, 1e-2); - GPU_SANITY_CHECK(v, 1e-2); + GPU_SANITY_CHECK(u, 1e-1); + GPU_SANITY_CHECK(v, 1e-1); } else { diff --git a/modules/gpu/perf4au/CMakeLists.txt b/modules/gpu/perf4au/CMakeLists.txt index 745220382..376e7b270 100644 --- a/modules/gpu/perf4au/CMakeLists.txt +++ b/modules/gpu/perf4au/CMakeLists.txt @@ -25,4 +25,3 @@ if(WIN32) set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG") endif() endif() - diff --git a/modules/gpu/perf4au/main.cpp b/modules/gpu/perf4au/main.cpp index f6a65ad63..8c68385e1 100644 --- a/modules/gpu/perf4au/main.cpp +++ b/modules/gpu/perf4au/main.cpp @@ -42,9 +42,7 @@ #include -#ifdef HAVE_CVCONFIG_H #include "cvconfig.h" -#endif #include "opencv2/ts/ts.hpp" #include "opencv2/ts/gpu_perf.hpp" @@ -55,16 +53,12 @@ #include "opencv2/video/video.hpp" #include "opencv2/legacy/legacy.hpp" -int main(int argc, char* argv[]) -{ - perf::printCudaInfo(); +static const char * impls[] = { + "cuda", + "plain" +}; - perf::Regression::Init("gpu_perf4au"); - perf::TestBase::Init(argc, argv); - testing::InitGoogleTest(&argc, argv); - - return RUN_ALL_TESTS(); -} +CV_PERF_TEST_MAIN_WITH_IMPLS(gpu_perf4au, impls, perf::printCudaInfo()) ////////////////////////////////////////////////////////// // HoughLinesP diff --git a/modules/gpu/src/calib3d.cpp b/modules/gpu/src/calib3d.cpp index b84f09d0a..ee0004bcf 100644 --- a/modules/gpu/src/calib3d.cpp +++ b/modules/gpu/src/calib3d.cpp @@ -291,5 +291,3 @@ void cv::gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& cam } #endif - - diff --git a/modules/gpu/src/cuda/hist.cu b/modules/gpu/src/cuda/hist.cu index 8b8a1e8c6..d0ec257c4 100644 --- a/modules/gpu/src/cuda/hist.cu +++ b/modules/gpu/src/cuda/hist.cu @@ -109,6 +109,86 @@ namespace hist ///////////////////////////////////////////////////////////////////////// +namespace hist +{ + __device__ __forceinline__ void histEvenInc(int* shist, uint data, int binSize, int lowerLevel, int upperLevel) + { + if (data >= lowerLevel && data <= upperLevel) + { + const uint ind = (data - lowerLevel) / binSize; + Emulation::smem::atomicAdd(shist + ind, 1); + } + } + + __global__ void histEven8u(const uchar* src, const size_t step, const int rows, const int cols, + int* hist, const int binCount, const int binSize, const int lowerLevel, const int upperLevel) + { + extern __shared__ int shist[]; + + const int y = blockIdx.x * blockDim.y + threadIdx.y; + const int tid = threadIdx.y * blockDim.x + threadIdx.x; + + if (tid < binCount) + shist[tid] = 0; + + __syncthreads(); + + if (y < rows) + { + const uchar* rowPtr = src + y * step; + const uint* rowPtr4 = (uint*) rowPtr; + + const int cols_4 = cols / 4; + for (int x = threadIdx.x; x < cols_4; x += blockDim.x) + { + const uint data = rowPtr4[x]; + + histEvenInc(shist, (data >> 0) & 0xFFU, binSize, lowerLevel, upperLevel); + histEvenInc(shist, (data >> 8) & 0xFFU, binSize, lowerLevel, upperLevel); + histEvenInc(shist, (data >> 16) & 0xFFU, binSize, lowerLevel, upperLevel); + histEvenInc(shist, (data >> 24) & 0xFFU, binSize, lowerLevel, upperLevel); + } + + if (cols % 4 != 0 && threadIdx.x == 0) + { + for (int x = cols_4 * 4; x < cols; ++x) + { + const uchar data = rowPtr[x]; + histEvenInc(shist, data, binSize, lowerLevel, upperLevel); + } + } + } + + __syncthreads(); + + if (tid < binCount) + { + const int histVal = shist[tid]; + + if (histVal > 0) + ::atomicAdd(hist + tid, histVal); + } + } + + void histEven8u(PtrStepSzb src, int* hist, int binCount, int lowerLevel, int upperLevel, cudaStream_t stream) + { + const dim3 block(32, 8); + const dim3 grid(divUp(src.rows, block.y)); + + const int binSize = divUp(upperLevel - lowerLevel, binCount); + + const size_t smem_size = binCount * sizeof(int); + + histEven8u<<>>(src.data, src.step, src.rows, src.cols, hist, binCount, binSize, lowerLevel, upperLevel); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } +} + +///////////////////////////////////////////////////////////////////////// + namespace hist { __constant__ int c_lut[256]; diff --git a/modules/gpu/src/cuda/imgproc.cu b/modules/gpu/src/cuda/imgproc.cu index b23e0a665..067dfaf64 100644 --- a/modules/gpu/src/cuda/imgproc.cu +++ b/modules/gpu/src/cuda/imgproc.cu @@ -619,6 +619,7 @@ namespace cv { namespace gpu { namespace device ////////////////////////////////////////////////////////////////////////// // mulSpectrums +#ifdef HAVE_CUFFT __global__ void mulSpectrumsKernel(const PtrStep a, const PtrStep b, PtrStepSz c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; @@ -642,11 +643,13 @@ namespace cv { namespace gpu { namespace device if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } +#endif ////////////////////////////////////////////////////////////////////////// // mulSpectrums_CONJ +#ifdef HAVE_CUFFT __global__ void mulSpectrumsKernel_CONJ(const PtrStep a, const PtrStep b, PtrStepSz c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; @@ -670,11 +673,13 @@ namespace cv { namespace gpu { namespace device if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } +#endif ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums +#ifdef HAVE_CUFFT __global__ void mulAndScaleSpectrumsKernel(const PtrStep a, const PtrStep b, float scale, PtrStepSz c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; @@ -699,11 +704,13 @@ namespace cv { namespace gpu { namespace device if (stream) cudaSafeCall( cudaDeviceSynchronize() ); } +#endif ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums_CONJ +#ifdef HAVE_CUFFT __global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep a, const PtrStep b, float scale, PtrStepSz c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; @@ -728,6 +735,7 @@ namespace cv { namespace gpu { namespace device if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } +#endif ////////////////////////////////////////////////////////////////////////// // buildWarpMaps diff --git a/modules/gpu/src/cuda/resize.cu b/modules/gpu/src/cuda/resize.cu index e72029767..dc9f462d1 100644 --- a/modules/gpu/src/cuda/resize.cu +++ b/modules/gpu/src/cuda/resize.cu @@ -42,261 +42,441 @@ #if !defined CUDA_DISABLER -#include "internal_shared.hpp" +#include +#include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/filters.hpp" -#include -#include namespace cv { namespace gpu { namespace device { - namespace imgproc + // kernels + + template __global__ void resize_nearest(const PtrStep src, PtrStepSz dst, const float fy, const float fx) { - template __global__ void resize(const Ptr2D src, float fx, float fy, PtrStepSz dst) + const int dst_x = blockDim.x * blockIdx.x + threadIdx.x; + const int dst_y = blockDim.y * blockIdx.y + threadIdx.y; + + if (dst_x < dst.cols && dst_y < dst.rows) { - const int x = blockDim.x * blockIdx.x + threadIdx.x; - const int y = blockDim.y * blockIdx.y + threadIdx.y; + const float src_x = dst_x * fx; + const float src_y = dst_y * fy; - if (x < dst.cols && y < dst.rows) - { - const float xcoo = x * fx; - const float ycoo = y * fy; - - dst(y, x) = saturate_cast(src(ycoo, xcoo)); - } + dst(dst_y, dst_x) = src(__float2int_rz(src_y), __float2int_rz(src_x)); } + } - template __global__ void resize_area(const Ptr2D src, float fx, float fy, PtrStepSz dst) + template __global__ void resize_linear(const PtrStepSz src, PtrStepSz dst, const float fy, const float fx) + { + typedef typename TypeVec::cn>::vec_type work_type; + + const int dst_x = blockDim.x * blockIdx.x + threadIdx.x; + const int dst_y = blockDim.y * blockIdx.y + threadIdx.y; + + if (dst_x < dst.cols && dst_y < dst.rows) { - const int x = blockDim.x * blockIdx.x + threadIdx.x; - const int y = blockDim.y * blockIdx.y + threadIdx.y; + const float src_x = dst_x * fx; + const float src_y = dst_y * fy; - if (x < dst.cols && y < dst.rows) - { - dst(y, x) = saturate_cast(src(y, x)); - } + work_type out = VecTraits::all(0); + + const int x1 = __float2int_rd(src_x); + const int y1 = __float2int_rd(src_y); + const int x2 = x1 + 1; + const int y2 = y1 + 1; + const int x2_read = ::min(x2, src.cols - 1); + const int y2_read = ::min(y2, src.rows - 1); + + T src_reg = src(y1, x1); + out = out + src_reg * ((x2 - src_x) * (y2 - src_y)); + + src_reg = src(y1, x2_read); + out = out + src_reg * ((src_x - x1) * (y2 - src_y)); + + src_reg = src(y2_read, x1); + out = out + src_reg * ((x2 - src_x) * (src_y - y1)); + + src_reg = src(y2_read, x2_read); + out = out + src_reg * ((src_x - x1) * (src_y - y1)); + + dst(dst_y, dst_x) = saturate_cast(out); } + } - template